gpt4 book ai didi

apache-spark - 在 Spark 中提取配置单元表分区 - java

转载 作者:行者123 更新时间:2023-12-02 15:16:55 28 4
gpt4 key购买 nike

Spark有什么方法可以只提取分区列名吗?我使用的解决方法是使用 HiveContext

运行“ show extended table like table_name

最佳答案

您可以使用类 HiveMetaStoreClient直接从 HiveMetaStore 查询。

这个类也被流行的 APIS 广泛使用,用于与HiveMetaStore 例如:Apache Drill

org.apache.hadoop.hive.metastore.api.Partition getPartition(String db_name, String tbl_name, List part_vals)

org.apache.hadoop.hive.metastore.api.Partition getPartition(String db, String tableName, String partName) Map> getPartitionColumnStatistics(String dbName, String tableName, List partNames, List colNames)

Get partitions column statistics given dbName, tableName, multiple partitions and colName-s

List getPartitionsByNames(String db_name, String tbl_name, List part_names) Get partitions by a list of partition names.

此外,还有列表方法......

List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts)

List listPartitionNames(String dbName, String tblName, short max)

List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts)

List listPartitions(String db_name, String tbl_name, short max_parts)

示例代码片段 1:

import org.apache.hadoop.hive.conf.HiveConf;

// test program
public class Test {
public static void main(String[] args){

HiveConf hiveConf = new HiveConf();
hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://host:port");

HiveMetaStoreConnector hiveMetaStoreConnector = new HiveMetaStoreConnector(hiveConf);
if(hiveMetaStoreConnector != null){
System.out.print(hiveMetaStoreConnector.getAllPartitionInfo("tablename"));
}
}
}


// define a class like this

import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.thrift.TException;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormatter;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class HiveMetaStoreConnector {
private HiveConf hiveConf;
HiveMetaStoreClient hiveMetaStoreClient;

public HiveMetaStoreConnector(String msAddr, String msPort){
try {
hiveConf = new HiveConf();
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, msAddr+":"+ msPort);
hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);
} catch (MetaException e) {
e.printStackTrace();
System.err.println("Constructor error");
System.err.println(e.toString());
System.exit(-100);
}
}

public HiveMetaStoreConnector(HiveConf hiveConf){
try {
this.hiveConf = hiveConf;
hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);
} catch (MetaException e) {
e.printStackTrace();
System.err.println("Constructor error");
System.err.println(e.toString());
System.exit(-100);
}
}

public String getAllPartitionInfo(String dbName){
List<String> res = Lists.newArrayList();
try {
List<String> tableList = hiveMetaStoreClient.getAllTables(dbName);
for(String tableName:tableList){
res.addAll(getTablePartitionInformation(dbName,tableName));
}
} catch (MetaException e) {
e.printStackTrace();
System.out.println("getAllTableStatistic error");
System.out.println(e.toString());
System.exit(-100);
}

return Joiner.on("\n").join(res);
}

public List<String> getTablePartitionInformation(String dbName, String tableName){
List<String> partitionsInfo = Lists.newArrayList();
try {
List<String> partitionNames = hiveMetaStoreClient.listPartitionNames(dbName,tableName, (short) 10000);
List<Partition> partitions = hiveMetaStoreClient.listPartitions(dbName,tableName, (short) 10000);
for(Partition partition:partitions){
StringBuffer sb = new StringBuffer();
sb.append(tableName);
sb.append("\t");
List<String> partitionValues = partition.getValues();
if(partitionValues.size()<4){
int size = partitionValues.size();
for(int j=0; j<4-size;j++){
partitionValues.add("null");
}
}
sb.append(Joiner.on("\t").join(partitionValues));
sb.append("\t");
DateTime createDate = new DateTime((long)partition.getCreateTime()*1000);
sb.append(createDate.toString("yyyy-MM-dd HH:mm:ss"));
partitionsInfo.add(sb.toString());
}

} catch (TException e) {
e.printStackTrace();
return Arrays.asList(new String[]{"error for request on" + tableName});
}

return partitionsInfo;
}

public String getAllTableStatistic(String dbName){
List<String> res = Lists.newArrayList();
try {
List<String> tableList = hiveMetaStoreClient.getAllTables(dbName);
for(String tableName:tableList){
res.addAll(getTableColumnsInformation(dbName,tableName));
}
} catch (MetaException e) {
e.printStackTrace();
System.out.println("getAllTableStatistic error");
System.out.println(e.toString());
System.exit(-100);
}

return Joiner.on("\n").join(res);
}

public List<String> getTableColumnsInformation(String dbName, String tableName){
try {
List<FieldSchema> fields = hiveMetaStoreClient.getFields(dbName, tableName);
List<String> infs = Lists.newArrayList();
int cnt = 0;
for(FieldSchema fs : fields){
StringBuffer sb = new StringBuffer();
sb.append(tableName);
sb.append("\t");
sb.append(cnt);
sb.append("\t");
cnt++;
sb.append(fs.getName());
sb.append("\t");
sb.append(fs.getType());
sb.append("\t");
sb.append(fs.getComment());
infs.add(sb.toString());
}

return infs;

} catch (TException e) {
e.printStackTrace();
System.out.println("getTableColumnsInformation error");
System.out.println(e.toString());
System.exit(-100);
return null;
}
}
}

示例代码片段示例 2 ( source )

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hive.hcatalog.common.HCatUtil;
import org.apache.thrift.TException;

import javax.xml.crypto.Data;
import java.io.IOException;
import java.util.HashMap;

public class HiveMetaStoreClientTest {
public static void main(String[] args) {

HiveConf hiveConf = null;
HiveMetaStoreClient hiveMetaStoreClient = null;
String dbName = null;

try {
hiveConf = HCatUtil.getHiveConf(new Configuration());
hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);

dbName = args[0];

getDatabase(hiveMetaStoreClient, dbName);


} catch (MetaException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (NoSuchObjectException e) {
e.printStackTrace();
System.out.println("===============");
System.out.println("database " + args[0] + "not exists");
System.out.println("===============");
createDatabase(hiveMetaStoreClient, dbName);
try {
getDatabase(hiveMetaStoreClient, dbName);
} catch (TException e1) {
e1.printStackTrace();
System.out.println("TMD");
}
} catch (TException e) {
e.printStackTrace();
}
}

public static Database getDatabase(HiveMetaStoreClient hiveMetaStoreClient, String dbName) throws TException {
Database database = null;

database = hiveMetaStoreClient.getDatabase(dbName);

System.out.println(database.getLocationUri());
System.out.println(database.getOwnerName());

for (String key : database.getParameters().keySet()) {
System.out.println(key + " = " + database.getParameters().get(key));
}
return database;
}

public static Database createDatabase(HiveMetaStoreClient hiveMetaStoreClient, String dbName) {
HashMap<String, String> map = new HashMap<String,String>();
Database database = new Database(dbName, "desc", null, map);
try {
hiveMetaStoreClient.createDatabase(database);
} catch (TException e) {
e.printStackTrace();
System.out.println("some error");
}
return database;
}
}

关于apache-spark - 在 Spark 中提取配置单元表分区 - java,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/39807098/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com