use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HCatClientHMSImpl method listPartitionsByFilter.
@Override
public List<HCatPartition> listPartitionsByFilter(String dbName, String tblName, String filter) throws HCatException {
List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
try {
HCatTable table = getTable(dbName, tblName);
List<Partition> hivePtns = hmsClient.listPartitionsByFilter(table.getDbName(), table.getTableName(), filter, (short) -1);
for (Partition ptn : hivePtns) {
hcatPtns.add(new HCatPartition(table, ptn));
}
} catch (MetaException e) {
throw new HCatException("MetaException while fetching partitions.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while fetching partitions.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while fetching partitions.", e);
}
return hcatPtns;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HCatClientHMSImpl method getPartitions.
@Override
public List<HCatPartition> getPartitions(String dbName, String tblName) throws HCatException {
List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
try {
HCatTable hcatTable = getTable(dbName, tblName);
List<Partition> hivePtns = hmsClient.listPartitions(checkDB(dbName), tblName, (short) -1);
for (Partition ptn : hivePtns) {
hcatPtns.add(new HCatPartition(hcatTable, ptn));
}
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while retrieving partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while retrieving partition.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while retrieving partition.", e);
}
return hcatPtns;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestReplicationScenarios method testDrops.
@Test
public void testDrops() throws IOException {
String testName = "drops";
LOG.info("Testing " + testName);
String dbName = testName + "_" + tid;
run("CREATE DATABASE " + dbName);
run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE");
run("CREATE TABLE " + dbName + ".ptned3(a string) partitioned by (b int) STORED AS TEXTFILE");
String[] unptn_data = new String[] { "eleven", "twelve" };
String[] ptn_data_1 = new String[] { "thirteen", "fourteen", "fifteen" };
String[] ptn_data_2 = new String[] { "fifteen", "sixteen", "seventeen" };
String[] empty = new String[] {};
String unptn_locn = new Path(TEST_PATH, testName + "_unptn").toUri().getPath();
String ptn_locn_1 = new Path(TEST_PATH, testName + "_ptn1").toUri().getPath();
String ptn_locn_2 = new Path(TEST_PATH, testName + "_ptn2").toUri().getPath();
createTestDataFile(unptn_locn, unptn_data);
createTestDataFile(ptn_locn_1, ptn_data_1);
createTestDataFile(ptn_locn_2, ptn_data_2);
run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned");
verifySetup("SELECT * from " + dbName + ".unptned", unptn_data);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')");
verifySetup("SELECT a from " + dbName + ".ptned WHERE b='1'", ptn_data_1);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')");
verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", ptn_data_2);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='1'", ptn_data_1);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='2'", ptn_data_2);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=1)");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=1", ptn_data_1);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=2)");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=2", ptn_data_2);
// At this point, we've set up all the tables and ptns we're going to test drops across
// Replicate it first, and then we'll drop it on the source.
advanceDumpDir();
run("REPL DUMP " + dbName);
String replDumpLocn = getResult(0, 0);
String replDumpId = getResult(0, 1, true);
run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
printOutput();
run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
verifySetup("REPL STATUS " + dbName + "_dupe", new String[] { replDumpId });
verifySetup("SELECT * from " + dbName + "_dupe.unptned", unptn_data);
verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", ptn_data_1);
verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", ptn_data_2);
verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", ptn_data_1);
verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", ptn_data_2);
verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", ptn_data_1);
verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=2", ptn_data_2);
// All tables good on destination, drop on source.
run("DROP TABLE " + dbName + ".unptned");
run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b='2')");
run("DROP TABLE " + dbName + ".ptned2");
run("ALTER TABLE " + dbName + ".ptned3 DROP PARTITION (b=1)");
verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", empty);
verifySetup("SELECT a from " + dbName + ".ptned", ptn_data_1);
verifySetup("SELECT a from " + dbName + ".ptned3 WHERE b=1", empty);
verifySetup("SELECT a from " + dbName + ".ptned3", ptn_data_2);
// replicate the incremental drops
advanceDumpDir();
run("REPL DUMP " + dbName + " FROM " + replDumpId);
String postDropReplDumpLocn = getResult(0, 0);
String postDropReplDumpId = getResult(0, 1, true);
LOG.info("Dumped to {} with id {}->{}", postDropReplDumpLocn, replDumpId, postDropReplDumpId);
run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'");
printOutput();
run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'");
// verify that drops were replicated. This can either be from tables or ptns
// not existing, and thus, throwing a NoSuchObjectException, or returning nulls
// or select * returning empty, depending on what we're testing.
Exception e = null;
try {
Table tbl = metaStoreClient.getTable(dbName + "_dupe", "unptned");
assertNull(tbl);
} catch (TException te) {
e = te;
}
assertNotNull(e);
assertEquals(NoSuchObjectException.class, e.getClass());
verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", empty);
verifyRun("SELECT a from " + dbName + "_dupe.ptned", ptn_data_1);
verifyRun("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", empty);
verifyRun("SELECT a from " + dbName + "_dupe.ptned3", ptn_data_2);
Exception e2 = null;
try {
Table tbl = metaStoreClient.getTable(dbName + "_dupe", "ptned2");
assertNull(tbl);
} catch (TException te) {
e2 = te;
}
assertNotNull(e2);
assertEquals(NoSuchObjectException.class, e.getClass());
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project metacat by Netflix.
the class HiveConnectorTableService method create.
/**
* Create a table.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void create(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
updateTable(requestContext, table, tableInfo);
metacatHiveClient.createTable(table);
} catch (AlreadyExistsException exception) {
throw new TableAlreadyExistsException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (NoSuchObjectException | InvalidObjectException exception) {
throw new DatabaseNotFoundException(QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()), exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression, partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
Aggregations