use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class SQLOperation method runQuery.
private void runQuery() throws HiveSQLException {
try {
OperationState opState = getState();
// Operation may have been cancelled by another thread
if (opState.isTerminal()) {
log.info("Not running the query. Operation is already in terminal state: " + opState + ", perhaps cancelled due to query timeout or by another thread.");
return;
}
// In Hive server mode, we are not able to retry in the FetchTask
// case, when calling fetch queries since execute() has returned.
// For now, we disable the test attempts.
driver.run();
} catch (Throwable e) {
/**
* If the operation was cancelled by another thread, or the execution timed out, Driver#run
* may return a non-zero response code. We will simply return if the operation state is
* CANCELED, TIMEDOUT, CLOSED or FINISHED, otherwise throw an exception
*/
if (getState().isTerminal()) {
log.warn("Ignore exception in terminal state: {}", getState(), e);
return;
}
setState(OperationState.ERROR);
if (e instanceof CommandProcessorException) {
throw toSQLException("Error while compiling statement", (CommandProcessorException) e);
} else if (e instanceof HiveSQLException) {
throw (HiveSQLException) e;
} else if (e instanceof OutOfMemoryError) {
throw (OutOfMemoryError) e;
} else {
throw new HiveSQLException("Error running query", e);
}
}
setState(OperationState.FINISHED);
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class SQLOperation method prepare.
/**
* Compile the query and extract metadata
*
* @throws HiveSQLException
*/
private void prepare(QueryState queryState) throws HiveSQLException {
setState(OperationState.RUNNING);
try {
driver = DriverFactory.newDriver(queryState, queryInfo);
// queryTimeout == 0 means no timeout
if (queryTimeout > 0L) {
timeoutExecutor = Executors.newSingleThreadScheduledExecutor();
timeoutExecutor.schedule(() -> {
try {
final String queryId = queryState.getQueryId();
log.info("Query timed out after: {} seconds. Cancelling the execution now: {}", queryTimeout, queryId);
SQLOperation.this.cancel(OperationState.TIMEDOUT);
} catch (HiveSQLException e) {
log.error("Error cancelling the query after timeout: {} seconds", queryTimeout, e);
}
return null;
}, queryTimeout, TimeUnit.SECONDS);
}
queryInfo.setQueryDisplay(driver.getQueryDisplay());
if (operationLog != null) {
queryInfo.setOperationLogLocation(operationLog.toString());
}
// set the operation handle information in Driver, so that thrift API users
// can use the operation handle they receive, to lookup query information in
// Yarn ATS, also used in logging so remove padding for better display
String guid64 = Base64.getUrlEncoder().withoutPadding().encodeToString(getHandle().getHandleIdentifier().toTHandleIdentifier().getGuid());
driver.setOperationId(guid64);
// In Hive server mode, we are not able to retry in the FetchTask
// case, when calling fetch queries since execute() has returned.
// For now, we disable the test attempts.
driver.compileAndRespond(statement);
if (queryState.getQueryTag() != null && queryState.getQueryId() != null) {
parentSession.updateQueryTag(queryState.getQueryId(), queryState.getQueryTag());
}
setHasResultSet(driver.hasResultSet());
} catch (CommandProcessorException e) {
setState(OperationState.ERROR);
throw toSQLException("Error while compiling statement", e);
} catch (Throwable e) {
setState(OperationState.ERROR);
if (e instanceof OutOfMemoryError) {
throw e;
}
throw new HiveSQLException("Error running query", e);
}
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class TestReplicationScenariosAcrossInstances method testIncrementalDumpEmptyDumpDirectory.
@Test
public void testIncrementalDumpEmptyDumpDirectory() throws Throwable {
WarehouseInstance.Tuple tuple = primary.dump(primaryDbName);
replica.load(replicatedDbName, primaryDbName).status(replicatedDbName).verifyResult(tuple.lastReplicationId);
tuple = primary.dump(primaryDbName, Collections.emptyList());
replica.load(replicatedDbName, primaryDbName).status(replicatedDbName).verifyResult(tuple.lastReplicationId);
// create events for some other database and then dump the primaryDbName to dump an empty directory.
String testDbName = primaryDbName + "_test";
tuple = primary.run(" create database " + testDbName).run("create table " + testDbName + ".tbl (fld int)").dump(primaryDbName, Collections.emptyList());
// Incremental load to existing database with empty dump directory should set the repl id to the last event at src.
replica.load(replicatedDbName, primaryDbName).status(replicatedDbName).verifyResult(tuple.lastReplicationId);
// Bootstrap load from an empty dump directory should return empty load directory error.
tuple = primary.dump("someJunkDB", Collections.emptyList());
try {
replica.runCommand("REPL LOAD someJunkDB into someJunkDB");
assert false;
} catch (CommandProcessorException e) {
assertTrue(e.getMessage().toLowerCase().contains("semanticException no data to load in path".toLowerCase()));
}
primary.run(" drop database if exists " + testDbName + " cascade");
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class TestReplicationScenarios method testIncrementalReplWithEventsMissing.
@Test
public void testIncrementalReplWithEventsMissing() throws IOException, TException {
String testName = "incrementalReplWithEventsMissing";
String dbName = createDB(testName, driver);
String replDbName = dbName + "_dupe";
Tuple bootstrapDump = bootstrapLoadAndVerify(dbName, replDbName);
String replDumpId = bootstrapDump.lastReplId;
// CREATE_TABLE - INSERT - TRUNCATE - INSERT - The result is just one record.
String[] unptn_data = new String[] { "eleven" };
run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver);
run("INSERT INTO TABLE " + dbName + ".unptned values('ten')", driver);
run("TRUNCATE TABLE " + dbName + ".unptned", driver);
run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver);
// Inject a behaviour where some events missing from notification_log table.
// This ensures the incremental dump doesn't get all events for replication.
BehaviourInjection<NotificationEventResponse, NotificationEventResponse> eventIdSkipper = new BehaviourInjection<NotificationEventResponse, NotificationEventResponse>() {
@Nullable
@Override
public NotificationEventResponse apply(@Nullable NotificationEventResponse eventIdList) {
if (null != eventIdList) {
List<NotificationEvent> eventIds = eventIdList.getEvents();
List<NotificationEvent> outEventIds = new ArrayList<NotificationEvent>();
for (int i = 0; i < eventIds.size(); i++) {
NotificationEvent event = eventIds.get(i);
// Skip all the INSERT events
if (event.getDbName().equalsIgnoreCase(dbName) && event.getEventType().equalsIgnoreCase("INSERT")) {
injectionPathCalled = true;
continue;
}
outEventIds.add(event);
}
// Return the new list
return new NotificationEventResponse(outEventIds);
} else {
return null;
}
}
};
InjectableBehaviourObjectStore.setGetNextNotificationBehaviour(eventIdSkipper);
try {
advanceDumpDir();
try {
driver.run("REPL DUMP " + dbName);
assert false;
} catch (CommandProcessorException e) {
assertTrue(e.getCauseMessage() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getMsg());
}
eventIdSkipper.assertInjectionsPerformed(true, false);
} finally {
// reset the behaviour
InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour();
}
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class TestReplicationScenarios method testDumpWithTableDirMissing.
@Test
public void testDumpWithTableDirMissing() throws IOException {
String dbName = createDB(testName.getMethodName(), driver);
run("CREATE TABLE " + dbName + ".normal(a int)", driver);
run("INSERT INTO " + dbName + ".normal values (1)", driver);
Database db = null;
Path path = null;
try {
metaStoreClient.getDatabase(dbName);
path = new Path(db.getManagedLocationUri());
} catch (Exception e) {
path = new Path(System.getProperty("test.warehouse.dir", "/tmp/warehouse/managed"));
path = new Path(path, dbName.toLowerCase() + ".db");
}
path = new Path(path, "normal");
FileSystem fs = path.getFileSystem(hconf);
fs.delete(path);
advanceDumpDir();
try {
driver.run("REPL DUMP " + dbName);
assert false;
} catch (CommandProcessorException e) {
Assert.assertEquals(e.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode());
}
run("DROP TABLE " + dbName + ".normal", driver);
run("drop database " + dbName, true, driver);
}
Aggregations