use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHiveMetaStore method testAlterViewParititon.
@Test
public void testAlterViewParititon() throws Throwable {
String dbName = "compdb";
String tblName = "comptbl";
String viewName = "compView";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
db.setDescription("Alter Partition Test database");
client.createDatabase(db);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).addCol("name", ColumnType.STRING_TYPE_NAME).addCol("income", ColumnType.INT_TYPE_NAME).build();
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
ArrayList<FieldSchema> viewCols = new ArrayList<>(1);
viewCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
ArrayList<FieldSchema> viewPartitionCols = new ArrayList<>(1);
viewPartitionCols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
Table view = new Table();
view.setDbName(dbName);
view.setTableName(viewName);
view.setTableType(TableType.VIRTUAL_VIEW.name());
view.setPartitionKeys(viewPartitionCols);
view.setViewOriginalText("SELECT income, name FROM " + tblName);
view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName + "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
view.setRewriteEnabled(false);
StorageDescriptor viewSd = new StorageDescriptor();
view.setSd(viewSd);
viewSd.setCols(viewCols);
viewSd.setCompressed(false);
viewSd.setParameters(new HashMap<>());
viewSd.setSerdeInfo(new SerDeInfo());
viewSd.getSerdeInfo().setParameters(new HashMap<>());
client.createTable(view);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
view = client.getTable(dbName, viewName);
}
List<String> vals = new ArrayList<>(1);
vals.add("abc");
Partition part = new Partition();
part.setDbName(dbName);
part.setTableName(viewName);
part.setValues(vals);
part.setParameters(new HashMap<>());
client.add_partition(part);
Partition part2 = client.getPartition(dbName, viewName, part.getValues());
part2.getParameters().put("a", "b");
client.alter_partition(dbName, viewName, part2, null);
Partition part3 = client.getPartition(dbName, viewName, part.getValues());
assertEquals("couldn't view alter partition", part3.getParameters().get("a"), "b");
client.dropTable(dbName, viewName);
client.dropTable(dbName, tblName);
client.dropDatabase(dbName);
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method duplicateSerde.
@Test(expected = AlreadyExistsException.class)
public void duplicateSerde() throws TException {
String serdeName = uniqueSerdeName();
SerDeInfo serDeInfo = new SerDeInfo(serdeName, "x", Collections.emptyMap());
client.addSerDe(serDeInfo);
client.addSerDe(serDeInfo);
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method mapSerdeToSchemaVersion.
@Test
public void mapSerdeToSchemaVersion() throws TException {
ISchema schema = new ISchemaBuilder().setSchemaType(SchemaType.AVRO).setName(uniqueSchemaName()).build();
client.createISchema(schema);
// Create schema with no serde, then map it
SchemaVersion schemaVersion = new SchemaVersionBuilder().versionOf(schema).setVersion(1).addCol("x", ColumnType.BOOLEAN_TYPE_NAME).build();
client.addSchemaVersion(schemaVersion);
SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap());
client.addSerDe(serDeInfo);
client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName());
schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion());
Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName());
// Create schema with a serde, then remap it
String serDeName = uniqueSerdeName();
schemaVersion = new SchemaVersionBuilder().versionOf(schema).setVersion(2).addCol("x", ColumnType.BOOLEAN_TYPE_NAME).setSerdeName(serDeName).setSerdeLib("x").build();
client.addSchemaVersion(schemaVersion);
schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), 2);
Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName());
serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap());
client.addSerDe(serDeInfo);
client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), 2, serDeInfo.getName());
schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), 2);
Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName());
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method mapSerdeToSchemaVersionOtherDb.
@Test
public void mapSerdeToSchemaVersionOtherDb() throws TException {
String dbName = "map_other_db";
Database db = new DatabaseBuilder().setName(dbName).build();
client.createDatabase(db);
ISchema schema = new ISchemaBuilder().setSchemaType(SchemaType.AVRO).setDbName(dbName).setName(uniqueSchemaName()).build();
client.createISchema(schema);
// Create schema with no serde, then map it
SchemaVersion schemaVersion = new SchemaVersionBuilder().versionOf(schema).setVersion(1).addCol("x", ColumnType.BOOLEAN_TYPE_NAME).build();
client.addSchemaVersion(schemaVersion);
SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap());
client.addSerDe(serDeInfo);
client.mapSchemaVersionToSerde(dbName, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName());
schemaVersion = client.getSchemaVersion(dbName, schema.getName(), schemaVersion.getVersion());
Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName());
// Create schema with a serde, then remap it
String serDeName = uniqueSerdeName();
schemaVersion = new SchemaVersionBuilder().versionOf(schema).setVersion(2).addCol("x", ColumnType.BOOLEAN_TYPE_NAME).setSerdeName(serDeName).setSerdeLib("x").build();
client.addSchemaVersion(schemaVersion);
schemaVersion = client.getSchemaVersion(dbName, schema.getName(), 2);
Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName());
serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap());
client.addSerDe(serDeInfo);
client.mapSchemaVersionToSerde(dbName, schema.getName(), 2, serDeInfo.getName());
schemaVersion = client.getSchemaVersion(dbName, schema.getName(), 2);
Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName());
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method addSerde.
@Test
public void addSerde() throws TException {
String serdeName = uniqueSerdeName();
SerDeInfo serDeInfo = new SerDeInfo(serdeName, "serdeLib", Collections.singletonMap("a", "b"));
serDeInfo.setSerializerClass("serializer");
serDeInfo.setDeserializerClass("deserializer");
serDeInfo.setDescription("description");
serDeInfo.setSerdeType(SerdeType.SCHEMA_REGISTRY);
client.addSerDe(serDeInfo);
serDeInfo = client.getSerDe(serdeName);
Assert.assertEquals(serdeName, serDeInfo.getName());
Assert.assertEquals("serdeLib", serDeInfo.getSerializationLib());
Assert.assertEquals(1, serDeInfo.getParametersSize());
Assert.assertEquals("b", serDeInfo.getParameters().get("a"));
Assert.assertEquals("serializer", serDeInfo.getSerializerClass());
Assert.assertEquals("deserializer", serDeInfo.getDeserializerClass());
Assert.assertEquals("description", serDeInfo.getDescription());
Assert.assertEquals(SerdeType.SCHEMA_REGISTRY, serDeInfo.getSerdeType());
}
Aggregations