use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testTableSchemaPropagation.
/**
* Test for detecting schema-changes for an HCatalog table, across 2 different HCat instances.
* A table is created with the same schema on 2 HCat instances. The table-schema is modified on the source HCat
* instance (columns, I/O formats, SerDe definitions, etc.). The table metadata is compared between source
* and target, the changes are detected and propagated to target.
* @throws Exception
*/
@Test
public void testTableSchemaPropagation() throws Exception {
try {
startReplicationTargetMetaStoreIfRequired();
HCatClient sourceMetaStore = HCatClient.create(new Configuration(hcatConf));
final String dbName = "myDb";
final String tableName = "myTable";
sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), new HCatFieldSchema("bar", Type.STRING, ""));
List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), new HCatFieldSchema("grid", Type.STRING, ""));
HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build());
// Verify that the sourceTable was created successfully.
sourceTable = sourceMetaStore.getTable(dbName, tableName);
assertNotNull("Table couldn't be queried for. ", sourceTable);
// Serialize Table definition. Deserialize using the target HCatClient instance.
String tableStringRep = sourceMetaStore.serializeTable(sourceTable);
HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf));
targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
HCatTable targetTable = targetMetaStore.deserializeTable(tableStringRep);
assertEquals("Table after deserialization should have been identical to sourceTable.", HCatTable.NO_DIFF, sourceTable.diff(targetTable));
// Create table on Target.
targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build());
// Verify that the created table is identical to sourceTable.
targetTable = targetMetaStore.getTable(dbName, tableName);
assertEquals("Table after deserialization should have been identical to sourceTable.", HCatTable.NO_DIFF, sourceTable.diff(targetTable));
// Modify sourceTable.
List<HCatFieldSchema> newColumnSchema = new ArrayList<HCatFieldSchema>(columnSchema);
newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, ""));
Map<String, String> tableParams = new HashMap<String, String>(1);
tableParams.put("orc.compress", "ZLIB");
// Add a column.
sourceTable.cols(newColumnSchema).fileFormat(// Change SerDe, File I/O formats.
"orcfile").tblProps(tableParams).serdeParam(serdeConstants.FIELD_DELIM, Character.toString('\001'));
sourceMetaStore.updateTableSchema(dbName, tableName, sourceTable);
sourceTable = sourceMetaStore.getTable(dbName, tableName);
// Diff against table on target.
EnumSet<HCatTable.TableAttribute> diff = targetTable.diff(sourceTable);
assertTrue("Couldn't find change in column-schema.", diff.contains(HCatTable.TableAttribute.COLUMNS));
assertTrue("Couldn't find change in InputFormat.", diff.contains(HCatTable.TableAttribute.INPUT_FORMAT));
assertTrue("Couldn't find change in OutputFormat.", diff.contains(HCatTable.TableAttribute.OUTPUT_FORMAT));
assertTrue("Couldn't find change in SerDe.", diff.contains(HCatTable.TableAttribute.SERDE));
assertTrue("Couldn't find change in SerDe parameters.", diff.contains(HCatTable.TableAttribute.SERDE_PROPERTIES));
assertTrue("Couldn't find change in Table parameters.", diff.contains(HCatTable.TableAttribute.TABLE_PROPERTIES));
// Replicate the changes to the replicated-table.
targetMetaStore.updateTableSchema(dbName, tableName, targetTable.resolve(sourceTable, diff));
targetTable = targetMetaStore.getTable(dbName, tableName);
assertEquals("After propagating schema changes, source and target tables should have been equivalent.", HCatTable.NO_DIFF, targetTable.diff(sourceTable));
} catch (Exception unexpected) {
LOG.error("Unexpected exception!", unexpected);
assertTrue("Unexpected exception! " + unexpected.getMessage(), false);
}
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testGetMessageBusTopicName.
@Test
public void testGetMessageBusTopicName() throws Exception {
try {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
String dbName = "testGetMessageBusTopicName_DBName";
String tableName = "testGetMessageBusTopicName_TableName";
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
client.createDatabase(HCatCreateDBDesc.create(dbName).build());
String messageBusTopicName = "MY.topic.name";
Map<String, String> tableProperties = new HashMap<String, String>(1);
tableProperties.put(HCatConstants.HCAT_MSGBUS_TOPIC_NAME, messageBusTopicName);
client.createTable(HCatCreateTableDesc.create(dbName, tableName, Arrays.asList(new HCatFieldSchema("foo", Type.STRING, ""))).tblProps(tableProperties).build());
assertEquals("MessageBus topic-name doesn't match!", messageBusTopicName, client.getMessageBusTopicName(dbName, tableName));
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
client.close();
} catch (Exception exception) {
LOG.error("Unexpected exception.", exception);
assertTrue("Unexpected exception:" + exception.getMessage(), false);
}
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testRenameTable.
@Test
public void testRenameTable() throws Exception {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
String tableName = "temptable";
String newName = "mytable";
client.dropTable(null, tableName, true);
client.dropTable(null, newName, true);
ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(null, tableName, cols).fileFormat("rcfile").build();
client.createTable(tableDesc);
client.renameTable(null, tableName, newName);
try {
client.getTable(null, tableName);
} catch (HCatException exp) {
assertTrue("Unexpected exception message: " + exp.getMessage(), exp.getMessage().contains("NoSuchObjectException while fetching table"));
}
HCatTable newTable = client.getTable(null, newName);
assertTrue(newTable != null);
assertTrue(newTable.getTableName().equals(newName));
client.close();
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testTransportFailure.
@Test
public void testTransportFailure() throws Exception {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
boolean isExceptionCaught = false;
// Table creation with a long table name causes ConnectionFailureException
final String tableName = "Temptable" + new BigInteger(200, new Random()).toString(2);
ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
try {
HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(null, tableName, cols).fileFormat("rcfile").build();
client.createTable(tableDesc);
} catch (Exception exp) {
isExceptionCaught = true;
assertEquals("Unexpected exception type.", HCatException.class, exp.getClass());
// The connection was closed, so create a new one.
client = HCatClient.create(new Configuration(hcatConf));
String newName = "goodTable";
client.dropTable(null, newName, true);
HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc.create(null, newName, cols).fileFormat("rcfile").build();
client.createTable(tableDesc2);
HCatTable newTable = client.getTable(null, newName);
assertTrue(newTable != null);
assertTrue(newTable.getTableName().equalsIgnoreCase(newName));
} finally {
client.close();
assertTrue("The expected exception was never thrown.", isExceptionCaught);
}
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testPartitionSchema.
@Test
public void testPartitionSchema() throws Exception {
try {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
final String dbName = "myDb";
final String tableName = "myTable";
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
client.createDatabase(HCatCreateDBDesc.create(dbName).build());
List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), new HCatFieldSchema("bar", Type.STRING, ""));
List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), new HCatFieldSchema("grid", Type.STRING, ""));
client.createTable(HCatCreateTableDesc.create(dbName, tableName, columnSchema).partCols(partitionSchema).build());
HCatTable table = client.getTable(dbName, tableName);
List<HCatFieldSchema> partitionColumns = table.getPartCols();
assertArrayEquals("Didn't get expected partition-schema back from the HCatTable.", partitionSchema.toArray(), partitionColumns.toArray());
client.dropDatabase(dbName, false, HCatClient.DropDBMode.CASCADE);
} catch (Exception unexpected) {
LOG.error("Unexpected exception!", unexpected);
assertTrue("Unexpected exception! " + unexpected.getMessage(), false);
}
}
Aggregations