use of org.apache.hadoop.hive.metastore.api.UnknownTableException in project hive by apache.
the class ObjectStore method markPartitionForEvent.
@Override
public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException {
LOG.debug("Begin executing markPartitionForEvent");
boolean success = false;
Table tbl = null;
try {
openTransaction();
// Make sure dbName and tblName are valid.
tbl = getTable(dbName, tblName);
if (null == tbl) {
throw new UnknownTableException("Table: " + tblName + " is not found.");
}
pm.makePersistent(new MPartitionEvent(dbName, tblName, getPartitionStr(tbl, partName), evtType.getValue()));
success = commitTransaction();
LOG.debug("Done executing markPartitionForEvent");
} finally {
if (!success) {
rollbackTransaction();
}
}
return tbl;
}
use of org.apache.hadoop.hive.metastore.api.UnknownTableException in project hive by apache.
the class ObjectStore method isPartitionMarkedForEvent.
@Override
public boolean isPartitionMarkedForEvent(String dbName, String tblName, Map<String, String> partName, PartitionEventType evtType) throws UnknownTableException, MetaException, InvalidPartitionException, UnknownPartitionException {
boolean success = false;
Query query = null;
try {
LOG.debug("Begin Executing isPartitionMarkedForEvent");
openTransaction();
query = pm.newQuery(MPartitionEvent.class, "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4");
query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4");
// Make sure dbName and tblName are valid.
Table tbl = getTable(dbName, tblName);
if (null == tbl) {
throw new UnknownTableException("Table: " + tblName + " is not found.");
}
Collection<MPartitionEvent> partEvents = (Collection<MPartitionEvent>) query.executeWithArray(dbName, tblName, getPartitionStr(tbl, partName), evtType.getValue());
pm.retrieveAll(partEvents);
success = commitTransaction();
LOG.debug("Done executing isPartitionMarkedForEvent");
return (partEvents != null && !partEvents.isEmpty()) ? true : false;
} finally {
if (!success) {
rollbackTransaction();
}
if (query != null) {
query.closeAll();
}
}
}
use of org.apache.hadoop.hive.metastore.api.UnknownTableException in project hive by apache.
the class TestMarkPartition method testMarkingPartitionSet.
public void testMarkingPartitionSet() throws CommandNeedRetryException, MetaException, TException, NoSuchObjectException, UnknownDBException, UnknownTableException, InvalidPartitionException, UnknownPartitionException, InterruptedException {
HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
driver = new Driver(hiveConf);
driver.run("drop database if exists hive2215 cascade");
driver.run("create database hive2215");
driver.run("use hive2215");
driver.run("drop table if exists tmptbl");
driver.run("create table tmptbl (a string) partitioned by (b string)");
driver.run("alter table tmptbl add partition (b='2011')");
Map<String, String> kvs = new HashMap<String, String>();
kvs.put("b", "'2011'");
msc.markPartitionForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
assert msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
Thread.sleep(10000);
assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
kvs.put("b", "'2012'");
assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
try {
msc.markPartitionForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
assert false;
} catch (Exception e) {
assert e instanceof UnknownTableException;
}
try {
msc.isPartitionMarkedForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
assert false;
} catch (Exception e) {
assert e instanceof UnknownTableException;
}
kvs.put("a", "'2012'");
try {
msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
assert false;
} catch (Exception e) {
assert e instanceof InvalidPartitionException;
}
}
use of org.apache.hadoop.hive.metastore.api.UnknownTableException in project drill by apache.
the class DrillHiveMetaStoreClient method getHiveReadEntryHelper.
/** Helper method which gets table metadata. Retries once if the first call to fetch the metadata fails */
protected static HiveReadEntry getHiveReadEntryHelper(final IMetaStoreClient mClient, final String dbName, final String tableName) throws TException {
Table table = null;
try {
table = mClient.getTable(dbName, tableName);
} catch (MetaException | NoSuchObjectException e) {
throw e;
} catch (TException e) {
logger.warn("Failure while attempting to get hive table. Retries once. ", e);
try {
mClient.close();
} catch (Exception ex) {
logger.warn("Failure while attempting to close existing hive metastore connection. May leak connection.", ex);
}
mClient.reconnect();
table = mClient.getTable(dbName, tableName);
}
if (table == null) {
throw new UnknownTableException(String.format("Unable to find table '%s'.", tableName));
}
List<Partition> partitions;
try {
partitions = mClient.listPartitions(dbName, tableName, (short) -1);
} catch (NoSuchObjectException | MetaException e) {
throw e;
} catch (TException e) {
logger.warn("Failure while attempting to get hive partitions. Retries once. ", e);
try {
mClient.close();
} catch (Exception ex) {
logger.warn("Failure while attempting to close existing hive metastore connection. May leak connection.", ex);
}
mClient.reconnect();
partitions = mClient.listPartitions(dbName, tableName, (short) -1);
}
List<HiveTableWrapper.HivePartitionWrapper> hivePartitionWrappers = Lists.newArrayList();
HiveTableWithColumnCache hiveTable = new HiveTableWithColumnCache(table, new ColumnListsCache(table));
for (Partition partition : partitions) {
hivePartitionWrappers.add(createPartitionWithSpecColumns(hiveTable, partition));
}
if (hivePartitionWrappers.isEmpty()) {
hivePartitionWrappers = null;
}
return new HiveReadEntry(new HiveTableWrapper(hiveTable), hivePartitionWrappers);
}
Aggregations