use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project flink by apache.
the class HivePartitionUtils method toHiveTablePartition.
public static HiveTablePartition toHiveTablePartition(List<String> partitionKeys, Properties tableProps, Partition partition) {
StorageDescriptor sd = partition.getSd();
Map<String, String> partitionSpec = new HashMap<>();
for (int i = 0; i < partitionKeys.size(); i++) {
String partitionColName = partitionKeys.get(i);
String partitionValue = partition.getValues().get(i);
partitionSpec.put(partitionColName, partitionValue);
}
return new HiveTablePartition(sd, partitionSpec, tableProps);
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project brisk by riptano.
the class SchemaManagerServiceTest method testCreateOnConfigWithMetaData.
@Test
public void testCreateOnConfigWithMetaData() throws Exception {
KsDef ksDef = setupOtherKeyspace(configuration, "ConfigCreatedKeyspaceMetaData", true);
cassandraClientHolder.getClient().system_add_keyspace(ksDef);
configuration.setBoolean("cassandra.autoCreateHiveSchema", true);
schemaManagerService.createKeyspaceSchemasIfNeeded();
List<KsDef> keyspaces = schemaManagerService.findUnmappedKeyspaces();
for (KsDef ks : keyspaces) {
if (StringUtils.equals(ks.name, "ConfigCreatedKeyspaceMetaData")) {
fail("keyspace not created by configuration");
}
}
Table table = cassandraHiveMetaStore.getTable("ConfigCreatedKeyspaceMetaData", "OtherCf1");
assertNotNull(table);
StorageDescriptor sd = table.getSd();
assertEquals(5, sd.getColsSize());
for (Iterator<FieldSchema> iterator = sd.getColsIterator(); iterator.hasNext(); ) {
FieldSchema fs = iterator.next();
if (StringUtils.equals(fs.getName(), "col_name_utf8"))
assertEquals("string", fs.getType());
if (StringUtils.equals(fs.getName(), "col_name_bytes"))
assertEquals("string", fs.getType());
if (StringUtils.equals(fs.getName(), "col_name_timeuuid"))
assertEquals("string", fs.getType());
if (StringUtils.equals(fs.getName(), "col_name_long"))
assertEquals("int", fs.getType());
if (StringUtils.equals(fs.getName(), "col_name_int"))
assertEquals("bigint", fs.getType());
}
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class HiveTableOperations method storageDescriptor.
private StorageDescriptor storageDescriptor(TableMetadata metadata, boolean hiveEngineEnabled) {
final StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(HiveSchemaUtil.convert(metadata.schema()));
storageDescriptor.setLocation(metadata.location());
SerDeInfo serDeInfo = new SerDeInfo();
serDeInfo.setParameters(Maps.newHashMap());
if (hiveEngineEnabled) {
storageDescriptor.setInputFormat("org.apache.iceberg.mr.hive.HiveIcebergInputFormat");
storageDescriptor.setOutputFormat("org.apache.iceberg.mr.hive.HiveIcebergOutputFormat");
serDeInfo.setSerializationLib("org.apache.iceberg.mr.hive.HiveIcebergSerDe");
} else {
storageDescriptor.setOutputFormat("org.apache.hadoop.mapred.FileOutputFormat");
storageDescriptor.setInputFormat("org.apache.hadoop.mapred.FileInputFormat");
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
}
storageDescriptor.setSerdeInfo(serDeInfo);
return storageDescriptor;
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class HiveIcebergMetaHook method assertTableCanBeMigrated.
/**
* Checks if the table can be migrated to iceberg format. An eligible table is:
* - external
* - not temporary
* - not acid
* - uses one of supported file formats
* @param hmsTable the table which should be migrated to iceberg, if eligible
* @throws MetaException if the table is not eligible for migration due to violating one of the conditions above
*/
private void assertTableCanBeMigrated(org.apache.hadoop.hive.metastore.api.Table hmsTable) throws MetaException {
StorageDescriptor sd = hmsTable.getSd();
boolean hasCorrectTableType = MetaStoreUtils.isExternalTable(hmsTable) && !hmsTable.isTemporary() && !AcidUtils.isTransactionalTable(hmsTable);
if (!hasCorrectTableType) {
throw new MetaException("Converting non-external, temporary or transactional hive table to iceberg " + "table is not allowed.");
}
boolean hasCorrectFileFormat = MIGRATION_ALLOWED_SOURCE_FORMATS.stream().anyMatch(f -> sd.getInputFormat().toLowerCase().contains(f));
if (!hasCorrectFileFormat) {
throw new MetaException("Cannot convert hive table to iceberg with input format: " + sd.getInputFormat());
}
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class HiveTableTest method createHiveTable.
private org.apache.hadoop.hive.metastore.api.Table createHiveTable(String hiveTableName) throws IOException {
Map<String, String> parameters = Maps.newHashMap();
parameters.put(serdeConstants.SERIALIZATION_CLASS, "org.apache.hadoop.hive.serde2.thrift.test.IntString");
parameters.put(serdeConstants.SERIALIZATION_FORMAT, "org.apache.thrift.protocol.TBinaryProtocol");
SerDeInfo serDeInfo = new SerDeInfo(null, "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer", parameters);
// StorageDescriptor has an empty list of fields - SerDe will report them.
StorageDescriptor sd = new StorageDescriptor(Lists.newArrayList(), tempFolder.newFolder().getAbsolutePath(), "org.apache.hadoop.mapred.TextInputFormat", "org.apache.hadoop.mapred.TextOutputFormat", false, -1, serDeInfo, Lists.newArrayList(), Lists.newArrayList(), Maps.newHashMap());
org.apache.hadoop.hive.metastore.api.Table hiveTable = new org.apache.hadoop.hive.metastore.api.Table(hiveTableName, DB_NAME, "test_owner", 0, 0, 0, sd, Lists.newArrayList(), Maps.newHashMap(), "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
return hiveTable;
}
Aggregations