use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestStreaming method testAddPartition.
@Test
public void testAddPartition() throws Exception {
List<String> newPartVals = new ArrayList<String>(2);
newPartVals.add(PART1_CONTINENT);
newPartVals.add("Nepal");
HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, newPartVals);
// Ensure partition is absent
try {
msClient.getPartition(endPt.database, endPt.table, endPt.partitionVals);
Assert.assertTrue("Partition already exists", false);
} catch (NoSuchObjectException e) {
// expect this exception
}
// Create partition
Assert.assertNotNull(endPt.newConnection(true, "UT_" + Thread.currentThread().getName()));
// Ensure partition is present
Partition p = msClient.getPartition(endPt.database, endPt.table, endPt.partitionVals);
Assert.assertNotNull("Did not find added partition", p);
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class MutatorClient method checkTable.
private void checkTable(IMetaStoreClient metaStoreClient, AcidTable acidTable) throws ConnectionException {
try {
LOG.debug("Checking table {}.", acidTable.getQualifiedName());
Table metaStoreTable = metaStoreClient.getTable(acidTable.getDatabaseName(), acidTable.getTableName());
if (acidTable.getTableType() == TableType.SINK) {
Map<String, String> parameters = metaStoreTable.getParameters();
if (!Boolean.parseBoolean(parameters.get(TRANSACTIONAL_PARAM_KEY))) {
throw new ConnectionException("Cannot stream to table that is not transactional: '" + acidTable.getQualifiedName() + "'.");
}
int totalBuckets = metaStoreTable.getSd().getNumBuckets();
LOG.debug("Table {} has {} buckets.", acidTable.getQualifiedName(), totalBuckets);
if (totalBuckets <= 0) {
throw new ConnectionException("Cannot stream to table that has not been bucketed: '" + acidTable.getQualifiedName() + "'.");
}
String outputFormat = metaStoreTable.getSd().getOutputFormat();
LOG.debug("Table {} has {} OutputFormat.", acidTable.getQualifiedName(), outputFormat);
acidTable.setTable(metaStoreTable);
}
} catch (NoSuchObjectException e) {
throw new ConnectionException("Invalid table '" + acidTable.getQualifiedName() + "'", e);
} catch (TException e) {
throw new ConnectionException("Error communicating with the meta store", e);
}
LOG.debug("Table {} OK.", acidTable.getQualifiedName());
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class RetryingHMSHandler method invokeInternal.
public Result invokeInternal(final Object proxy, final Method method, final Object[] args) throws Throwable {
boolean gotNewConnectUrl = false;
boolean reloadConf = HiveConf.getBoolVar(origConf, HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
long retryInterval = HiveConf.getTimeVar(origConf, HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
int retryLimit = HiveConf.getIntVar(origConf, HiveConf.ConfVars.HMSHANDLERATTEMPTS);
long timeout = HiveConf.getTimeVar(origConf, HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
Deadline.registerIfNot(timeout);
if (reloadConf) {
MetaStoreInit.updateConnectionURL(origConf, getActiveConf(), null, metaStoreInitData);
}
int retryCount = 0;
Throwable caughtException = null;
while (true) {
try {
if (reloadConf || gotNewConnectUrl) {
baseHandler.setConf(getActiveConf());
}
Object object = null;
boolean isStarted = Deadline.startTimer(method.getName());
try {
object = method.invoke(baseHandler, args);
} finally {
if (isStarted) {
Deadline.stopTimer();
}
}
return new Result(object, retryCount);
} catch (javax.jdo.JDOException e) {
caughtException = e;
} catch (UndeclaredThrowableException e) {
if (e.getCause() != null) {
if (e.getCause() instanceof javax.jdo.JDOException) {
// Due to reflection, the jdo exception is wrapped in
// invocationTargetException
caughtException = e.getCause();
} else if (e.getCause() instanceof MetaException && e.getCause().getCause() != null && e.getCause().getCause() instanceof javax.jdo.JDOException) {
// The JDOException may be wrapped further in a MetaException
caughtException = e.getCause().getCause();
} else {
LOG.error(ExceptionUtils.getStackTrace(e.getCause()));
throw e.getCause();
}
} else {
LOG.error(ExceptionUtils.getStackTrace(e));
throw e;
}
} catch (InvocationTargetException e) {
if (e.getCause() instanceof javax.jdo.JDOException) {
// Due to reflection, the jdo exception is wrapped in
// invocationTargetException
caughtException = e.getCause();
} else if (e.getCause() instanceof NoSuchObjectException || e.getTargetException().getCause() instanceof NoSuchObjectException) {
String methodName = method.getName();
if (!methodName.startsWith("get_database") && !methodName.startsWith("get_table") && !methodName.startsWith("get_partition") && !methodName.startsWith("get_function")) {
LOG.error(ExceptionUtils.getStackTrace(e.getCause()));
}
throw e.getCause();
} else if (e.getCause() instanceof MetaException && e.getCause().getCause() != null) {
if (e.getCause().getCause() instanceof javax.jdo.JDOException || e.getCause().getCause() instanceof NucleusException) {
// The JDOException or the Nucleus Exception may be wrapped further in a MetaException
caughtException = e.getCause().getCause();
} else if (e.getCause().getCause() instanceof DeadlineException) {
// The Deadline Exception needs no retry and be thrown immediately.
Deadline.clear();
LOG.error("Error happens in method " + method.getName() + ": " + ExceptionUtils.getStackTrace(e.getCause()));
throw e.getCause();
} else {
LOG.error(ExceptionUtils.getStackTrace(e.getCause()));
throw e.getCause();
}
} else {
LOG.error(ExceptionUtils.getStackTrace(e.getCause()));
throw e.getCause();
}
}
if (retryCount >= retryLimit) {
LOG.error("HMSHandler Fatal error: " + ExceptionUtils.getStackTrace(caughtException));
MetaException me = new MetaException(caughtException.getMessage());
me.initCause(caughtException);
throw me;
}
assert (retryInterval >= 0);
retryCount++;
LOG.error(String.format("Retrying HMSHandler after %d ms (attempt %d of %d)", retryInterval, retryCount, retryLimit) + " with error: " + ExceptionUtils.getStackTrace(caughtException));
Thread.sleep(retryInterval);
// If we have a connection error, the JDO connection URL hook might
// provide us with a new URL to access the datastore.
String lastUrl = MetaStoreInit.getConnectionURL(getActiveConf());
gotNewConnectUrl = MetaStoreInit.updateConnectionURL(origConf, getActiveConf(), lastUrl, metaStoreInitData);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method getDatabase.
@Override
public Database getDatabase(String name) throws NoSuchObjectException {
MetaException ex = null;
Database db = null;
try {
db = getDatabaseInternal(name);
} catch (MetaException e) {
// Signature restriction to NSOE, and NSOE being a flat exception prevents us from
// setting the cause of the NSOE as the MetaException. We should not lose the info
// we got here, but it's very likely that the MetaException is irrelevant and is
// actually an NSOE message, so we should log it and throw an NSOE with the msg.
ex = e;
}
if (db == null) {
LOG.warn("Failed to get database " + name + ", returning NoSuchObjectException", ex);
throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
}
return db;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method getMDatabase.
@SuppressWarnings("nls")
private MDatabase getMDatabase(String name) throws NoSuchObjectException {
MDatabase mdb = null;
boolean commited = false;
Query query = null;
try {
openTransaction();
name = HiveStringUtils.normalizeIdentifier(name);
query = pm.newQuery(MDatabase.class, "name == dbname");
query.declareParameters("java.lang.String dbname");
query.setUnique(true);
mdb = (MDatabase) query.execute(name);
pm.retrieve(mdb);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
if (query != null) {
query.closeAll();
}
}
if (mdb == null) {
throw new NoSuchObjectException("There is no database named " + name);
}
return mdb;
}
Aggregations