use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HCatClientHMSImpl method dropPartitions.
@Override
public void dropPartitions(String dbName, String tableName, Map<String, String> partitionSpec, boolean ifExists, boolean deleteData) throws HCatException {
LOG.info("HCatClient dropPartitions(db=" + dbName + ",table=" + tableName + ", partitionSpec: [" + partitionSpec + "]).");
try {
dbName = checkDB(dbName);
Table table = hmsClient.getTable(dbName, tableName);
if (hiveConfig.getBoolVar(HiveConf.ConfVars.METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS)) {
try {
dropPartitionsUsingExpressions(table, partitionSpec, ifExists, deleteData);
} catch (SemanticException parseFailure) {
LOG.warn("Could not push down partition-specification to back-end, for dropPartitions(). Resorting to iteration.", parseFailure);
dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
}
} else {
// Not using expressions.
dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
}
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while dropping partition. " + "Either db(" + dbName + ") or table(" + tableName + ") missing.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while dropping partition.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while dropping partition.", e);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HCatClientHMSImpl method listPartitionsByFilter.
@Override
public List<HCatPartition> listPartitionsByFilter(String dbName, String tblName, String filter) throws HCatException {
List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
try {
HCatTable table = getTable(dbName, tblName);
List<Partition> hivePtns = hmsClient.listPartitionsByFilter(table.getDbName(), table.getTableName(), filter, (short) -1);
for (Partition ptn : hivePtns) {
hcatPtns.add(new HCatPartition(table, ptn));
}
} catch (MetaException e) {
throw new HCatException("MetaException while fetching partitions.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while fetching partitions.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while fetching partitions.", e);
}
return hcatPtns;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HCatClientHMSImpl method getPartitions.
@Override
public List<HCatPartition> getPartitions(String dbName, String tblName) throws HCatException {
List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
try {
HCatTable hcatTable = getTable(dbName, tblName);
List<Partition> hivePtns = hmsClient.listPartitions(checkDB(dbName), tblName, (short) -1);
for (Partition ptn : hivePtns) {
hcatPtns.add(new HCatPartition(hcatTable, ptn));
}
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while retrieving partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while retrieving partition.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while retrieving partition.", e);
}
return hcatPtns;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestReplicationScenarios method testDrops.
@Test
public void testDrops() throws IOException {
String testName = "drops";
LOG.info("Testing " + testName);
String dbName = testName + "_" + tid;
run("CREATE DATABASE " + dbName);
run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE");
run("CREATE TABLE " + dbName + ".ptned3(a string) partitioned by (b int) STORED AS TEXTFILE");
String[] unptn_data = new String[] { "eleven", "twelve" };
String[] ptn_data_1 = new String[] { "thirteen", "fourteen", "fifteen" };
String[] ptn_data_2 = new String[] { "fifteen", "sixteen", "seventeen" };
String[] empty = new String[] {};
String unptn_locn = new Path(TEST_PATH, testName + "_unptn").toUri().getPath();
String ptn_locn_1 = new Path(TEST_PATH, testName + "_ptn1").toUri().getPath();
String ptn_locn_2 = new Path(TEST_PATH, testName + "_ptn2").toUri().getPath();
createTestDataFile(unptn_locn, unptn_data);
createTestDataFile(ptn_locn_1, ptn_data_1);
createTestDataFile(ptn_locn_2, ptn_data_2);
run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned");
verifySetup("SELECT * from " + dbName + ".unptned", unptn_data);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')");
verifySetup("SELECT a from " + dbName + ".ptned WHERE b='1'", ptn_data_1);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')");
verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", ptn_data_2);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='1'", ptn_data_1);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='2'", ptn_data_2);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=1)");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=1", ptn_data_1);
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=2)");
verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=2", ptn_data_2);
// At this point, we've set up all the tables and ptns we're going to test drops across
// Replicate it first, and then we'll drop it on the source.
advanceDumpDir();
run("REPL DUMP " + dbName);
String replDumpLocn = getResult(0, 0);
String replDumpId = getResult(0, 1, true);
run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
printOutput();
run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
verifySetup("REPL STATUS " + dbName + "_dupe", new String[] { replDumpId });
verifySetup("SELECT * from " + dbName + "_dupe.unptned", unptn_data);
verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", ptn_data_1);
verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", ptn_data_2);
verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", ptn_data_1);
verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", ptn_data_2);
verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", ptn_data_1);
verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=2", ptn_data_2);
// All tables good on destination, drop on source.
run("DROP TABLE " + dbName + ".unptned");
run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b='2')");
run("DROP TABLE " + dbName + ".ptned2");
run("ALTER TABLE " + dbName + ".ptned3 DROP PARTITION (b=1)");
verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", empty);
verifySetup("SELECT a from " + dbName + ".ptned", ptn_data_1);
verifySetup("SELECT a from " + dbName + ".ptned3 WHERE b=1", empty);
verifySetup("SELECT a from " + dbName + ".ptned3", ptn_data_2);
// replicate the incremental drops
advanceDumpDir();
run("REPL DUMP " + dbName + " FROM " + replDumpId);
String postDropReplDumpLocn = getResult(0, 0);
String postDropReplDumpId = getResult(0, 1, true);
LOG.info("Dumped to {} with id {}->{}", postDropReplDumpLocn, replDumpId, postDropReplDumpId);
run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'");
printOutput();
run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'");
// verify that drops were replicated. This can either be from tables or ptns
// not existing, and thus, throwing a NoSuchObjectException, or returning nulls
// or select * returning empty, depending on what we're testing.
Exception e = null;
try {
Table tbl = metaStoreClient.getTable(dbName + "_dupe", "unptned");
assertNull(tbl);
} catch (TException te) {
e = te;
}
assertNotNull(e);
assertEquals(NoSuchObjectException.class, e.getClass());
verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", empty);
verifyRun("SELECT a from " + dbName + "_dupe.ptned", ptn_data_1);
verifyRun("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", empty);
verifyRun("SELECT a from " + dbName + "_dupe.ptned3", ptn_data_2);
Exception e2 = null;
try {
Table tbl = metaStoreClient.getTable(dbName + "_dupe", "ptned2");
assertNull(tbl);
} catch (TException te) {
e2 = te;
}
assertNotNull(e2);
assertEquals(NoSuchObjectException.class, e.getClass());
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method dropPartitionCommon.
/**
* Drop an MPartition and cascade deletes (e.g., delete partition privilege grants,
* drop the storage descriptor cleanly, etc.)
* @param part - the MPartition to drop
* @return whether the transaction committed successfully
* @throws InvalidInputException
* @throws InvalidObjectException
* @throws MetaException
* @throws NoSuchObjectException
*/
private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
boolean success = false;
try {
openTransaction();
if (part != null) {
List<MFieldSchema> schemas = part.getTable().getPartitionKeys();
List<String> colNames = new ArrayList<String>();
for (MFieldSchema col : schemas) {
colNames.add(col.getName());
}
String partName = FileUtils.makePartName(colNames, part.getValues());
List<MPartitionPrivilege> partGrants = listPartitionGrants(part.getTable().getDatabase().getName(), part.getTable().getTableName(), Lists.newArrayList(partName));
if (partGrants != null && partGrants.size() > 0) {
pm.deletePersistentAll(partGrants);
}
List<MPartitionColumnPrivilege> partColumnGrants = listPartitionAllColumnGrants(part.getTable().getDatabase().getName(), part.getTable().getTableName(), Lists.newArrayList(partName));
if (partColumnGrants != null && partColumnGrants.size() > 0) {
pm.deletePersistentAll(partColumnGrants);
}
String dbName = part.getTable().getDatabase().getName();
String tableName = part.getTable().getTableName();
// delete partition level column stats if it exists
try {
deletePartitionColumnStatistics(dbName, tableName, partName, part.getValues(), null);
} catch (NoSuchObjectException e) {
LOG.info("No column statistics records found to delete");
}
preDropStorageDescriptor(part.getSd());
pm.deletePersistent(part);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
Aggregations