use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class TestCommands method testDropPartitionCommand.
@Test
public void testDropPartitionCommand() throws HCatException, MetaException {
String dbName = "cmd_testdb";
String tableName = "cmd_testtable";
int evid = 789;
List<HCatFieldSchema> pcols = HCatSchemaUtils.getHCatSchema("b:string").getFields();
List<HCatFieldSchema> cols = HCatSchemaUtils.getHCatSchema("a:int").getFields();
Map<String, String> ptnDesc = new HashMap<String, String>();
ptnDesc.put("b", "test");
Command testReplicatedDropPtnCmd = new DropPartitionCommand(dbName, tableName, ptnDesc, true, evid);
assertEquals(evid, testReplicatedDropPtnCmd.getEventId());
assertEquals(1, testReplicatedDropPtnCmd.get().size());
assertEquals(true, testReplicatedDropPtnCmd.isRetriable());
assertEquals(false, testReplicatedDropPtnCmd.isUndoable());
CommandTestUtils.testCommandSerialization(testReplicatedDropPtnCmd);
Command testNormalDropPtnCmd = new DropPartitionCommand(dbName, tableName, ptnDesc, false, evid);
assertEquals(evid, testNormalDropPtnCmd.getEventId());
assertEquals(1, testNormalDropPtnCmd.get().size());
assertEquals(true, testNormalDropPtnCmd.isRetriable());
assertEquals(false, testNormalDropPtnCmd.isUndoable());
CommandTestUtils.testCommandSerialization(testNormalDropPtnCmd);
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
client.createDatabase(HCatCreateDBDesc.create(dbName).ifNotExists(false).build());
Map<String, String> props = new HashMap<String, String>();
props.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 5));
HCatTable table = (new HCatTable(dbName, tableName)).tblProps(props).cols(cols).partCols(pcols);
client.createTable(HCatCreateTableDesc.create(table).build());
HCatTable tableCreated = client.getTable(dbName, tableName);
assertNotNull(tableCreated);
HCatPartition ptnToAdd = (new HCatPartition(tableCreated, ptnDesc, TestHCatClient.makePartLocation(tableCreated, ptnDesc))).parameters(props);
client.addPartition(HCatAddPartitionDesc.create(ptnToAdd).build());
HCatPartition p1 = client.getPartition(dbName, tableName, ptnDesc);
assertNotNull(p1);
// Test replicated drop, should not drop, because evid < repl.state.id
LOG.info("About to run :" + testReplicatedDropPtnCmd.get().get(0));
driver.run(testReplicatedDropPtnCmd.get().get(0));
HCatPartition p2 = client.getPartition(dbName, tableName, ptnDesc);
assertNotNull(p2);
// Test normal drop, should drop unconditionally.
LOG.info("About to run :" + testNormalDropPtnCmd.get().get(0));
driver.run(testNormalDropPtnCmd.get().get(0));
Exception onfe = null;
try {
HCatPartition p_del = client.getPartition(dbName, tableName, ptnDesc);
} catch (Exception e) {
onfe = e;
}
assertNotNull(onfe);
assertTrue(onfe instanceof ObjectNotFoundException);
Map<String, String> props2 = new HashMap<String, String>();
props2.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid - 5));
HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc, TestHCatClient.makePartLocation(tableCreated, ptnDesc))).parameters(props2);
client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build());
HCatPartition p3 = client.getPartition(dbName, tableName, ptnDesc);
assertNotNull(p3);
// Test replicated drop, should drop this time, since repl.state.id < evid.
LOG.info("About to run :" + testReplicatedDropPtnCmd.get().get(0));
driver.run(testReplicatedDropPtnCmd.get().get(0));
Exception onfe2 = null;
try {
HCatPartition p_del = client.getPartition(dbName, tableName, ptnDesc);
} catch (Exception e) {
onfe2 = e;
}
assertNotNull(onfe2);
assertTrue(onfe2 instanceof ObjectNotFoundException);
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class NotificationListener method onCreateTable.
@Override
public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
// as "HCAT_EVENT = HCAT_ADD_TABLE"
if (tableEvent.getStatus()) {
Table tbl = tableEvent.getTable();
IHMSHandler handler = tableEvent.getIHMSHandler();
Configuration conf = handler.getConf();
Table newTbl;
try {
newTbl = handler.get_table_core(tbl.getDbName(), tbl.getTableName()).deepCopy();
newTbl.getParameters().put(HCatConstants.HCAT_MSGBUS_TOPIC_NAME, getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "." + newTbl.getTableName().toLowerCase());
handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl);
} catch (TException e) {
MetaException me = new MetaException(e.toString());
me.initCause(e);
throw me;
}
String topicName = getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase();
send(messageFactory.buildCreateTableMessage(newTbl), topicName);
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class TestObjectStore method testDirectSqlErrorMetrics.
@Test
public void testDirectSqlErrorMetrics() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
Metrics.initialize(conf);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter");
// recall setup so that we get an object store with the metrics initalized
setUp();
Counter directSqlErrors = Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
objectStore.new GetDbHelper("foo", true, true) {
@Override
protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
return null;
}
@Override
protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
return null;
}
}.run(false);
Assert.assertEquals(0, directSqlErrors.getCount());
objectStore.new GetDbHelper("foo", true, true) {
@Override
protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
throw new RuntimeException();
}
@Override
protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
return null;
}
}.run(false);
Assert.assertEquals(1, directSqlErrors.getCount());
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class TestAddPartitions method testAddPartitionsOneInvalid.
@Test
public void testAddPartitionsOneInvalid() throws Exception {
createTable();
String tableLocation = metaStore.getWarehouseRoot() + "/" + TABLE_NAME;
Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2016", tableLocation + "/year=2016");
Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017", tableLocation + "/year=2017");
Partition partition3 = buildPartition(Lists.newArrayList("2015", "march"), getYearAndMonthPartCols(), 1);
partition3.getSd().setLocation(tableLocation + "/year=2015/month=march");
List<Partition> partitions = new ArrayList<>();
partitions.add(partition1);
partitions.add(partition2);
partitions.add(partition3);
try {
client.add_partitions(partitions);
Assert.fail("MetaException should have happened.");
} catch (MetaException e) {
// Expected exception
}
List<Partition> parts = client.listPartitions(DB_NAME, TABLE_NAME, MAX);
Assert.assertNotNull(parts);
Assert.assertTrue(parts.isEmpty());
// TODO: This does not work correctly. None of the partitions is created, but the folder
// for the first two is created. It is because in HiveMetaStore.add_partitions_core when
// going through the partitions, the first two are already put and started in the thread
// pool when the exception occurs in the third one. When the exception occurs, we go to
// the finally part, but the map can be empty (it depends on the progress of the other
// threads) so the folders won't be deleted.
// Assert.assertFalse(metaStore.isPathExists(new Path(tableLocation + "/year=2016")));
// Assert.assertFalse(metaStore.isPathExists(new Path(tableLocation + "/year=2017")));
Assert.assertFalse(metaStore.isPathExists(new Path(tableLocation + "/year=2015/month=march")));
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class TestExchangePartitions method testExchangePartitionsCustomPartLocation.
@Test
public void testExchangePartitionsCustomPartLocation() throws Exception {
Table source = createTable(DB_NAME, "test_source_table", getYearMonthAndDayPartCols(), null);
Table dest = createTable(DB_NAME, "test_dest_table", getYearMonthAndDayPartCols(), null);
Partition[] parts = new Partition[2];
parts[0] = createPartition(source, Lists.newArrayList("2019", "march", "15"), source.getSd().getLocation() + "/2019m15");
parts[1] = createPartition(source, Lists.newArrayList("2019", "march", "22"), source.getSd().getLocation() + "/2019m22");
Map<String, String> partitionSpecs = getPartitionSpec(parts[1]);
try {
client.exchange_partitions(partitionSpecs, source.getDbName(), source.getTableName(), dest.getDbName(), dest.getTableName());
Assert.fail("MetaException should have been thrown.");
} catch (MetaException e) {
// Expected exception as FileNotFoundException will occur if the partitions have custom
// location
}
checkRemainingPartitions(source, dest, Lists.newArrayList(parts[0], parts[1]));
List<Partition> destTablePartitions = client.listPartitions(dest.getDbName(), dest.getTableName(), (short) -1);
Assert.assertTrue(destTablePartitions.isEmpty());
}
Aggregations