use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project presto by prestodb.
the class ThriftMetastoreUtil method getSerdeInfo.
private static SerDeInfo getSerdeInfo(org.apache.hadoop.hive.metastore.api.Table table) {
StorageDescriptor storageDescriptor = table.getSd();
if (storageDescriptor == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table does not contain a storage descriptor: " + table);
}
SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info");
}
return serdeInfo;
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project storm by apache.
the class HiveSetupUtil method addPartition.
private static void addPartition(IMetaStoreClient client, Table tbl, List<String> partValues) throws IOException, TException {
Partition part = new Partition();
part.setDbName(tbl.getDbName());
part.setTableName(tbl.getTableName());
StorageDescriptor sd = new StorageDescriptor(tbl.getSd());
sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues));
part.setSd(sd);
part.setValues(partValues);
client.add_partition(part);
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project storm by apache.
the class HiveSetupUtil method createDbAndTable.
public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals, String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
IMetaStoreClient client = new HiveMetaStoreClient(conf);
try {
Database db = new Database();
db.setName(databaseName);
db.setLocationUri(dbLocation);
client.createDatabase(db);
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType(TableType.MANAGED_TABLE.toString());
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns(colNames, colTypes));
sd.setNumBuckets(1);
sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
if (partNames != null && partNames.length != 0) {
tbl.setPartitionKeys(getPartitionKeys(partNames));
}
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
sd.setInputFormat(OrcInputFormat.class.getName());
sd.setOutputFormat(OrcOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
client.createTable(tbl);
try {
if (partVals != null && partVals.size() > 0) {
addPartition(client, tbl, partVals);
}
} catch (AlreadyExistsException e) {
}
} finally {
client.close();
}
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project flink by apache.
the class HiveTableUtil method instantiateHiveTable.
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf, boolean managedTable) {
final boolean isView = table instanceof CatalogView;
// let Hive set default parameters for us, e.g. serialization.format
Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName());
hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
Map<String, String> properties = new HashMap<>(table.getOptions());
if (managedTable) {
properties.put(CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
}
// Table comment
if (table.getComment() != null) {
properties.put(HiveCatalogConfig.COMMENT, table.getComment());
}
boolean isHiveTable = HiveCatalog.isHiveTable(properties);
// Hive table's StorageDescriptor
StorageDescriptor sd = hiveTable.getSd();
HiveTableUtil.setDefaultStorageFormat(sd, hiveConf);
// because hive cannot understand the expanded query anyway
if (isHiveTable && !isView) {
HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf);
List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema());
// Table columns and partition keys
if (table instanceof CatalogTable) {
CatalogTable catalogTable = (CatalogTable) table;
if (catalogTable.isPartitioned()) {
int partitionKeySize = catalogTable.getPartitionKeys().size();
List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize);
List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size());
sd.setCols(regularColumns);
hiveTable.setPartitionKeys(partitionColumns);
} else {
sd.setCols(allColumns);
hiveTable.setPartitionKeys(new ArrayList<>());
}
} else {
sd.setCols(allColumns);
}
// Table properties
hiveTable.getParameters().putAll(properties);
} else {
DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema());
if (table instanceof CatalogTable) {
tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys());
}
properties.putAll(tableSchemaProps.asMap());
properties = maskFlinkProperties(properties);
// 2. when creating views which don't have connector properties
if (isView || (!properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR.key()) && !properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR_TYPE))) {
properties.put(IS_GENERIC, "true");
}
hiveTable.setParameters(properties);
}
if (isView) {
// TODO: [FLINK-12398] Support partitioned view in catalog API
hiveTable.setPartitionKeys(new ArrayList<>());
CatalogView view = (CatalogView) table;
hiveTable.setViewOriginalText(view.getOriginalQuery());
hiveTable.setViewExpandedText(view.getExpandedQuery());
hiveTable.setTableType(TableType.VIRTUAL_VIEW.name());
}
return hiveTable;
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project flink by apache.
the class PartitionMonitorTest method commitPartitionWithGivenCreateTime.
private void commitPartitionWithGivenCreateTime(List<String> partitionValues, Integer createTime) {
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/tmp/test");
Partition partition = new Partition(partitionValues, "testDb", "testTable", createTime, createTime, sd, null);
partition.setValues(partitionValues);
testPartitionWithOffset.add(partition);
}
Aggregations