use of org.apache.drill.exec.rpc.RpcException in project drill by axbaretto.
the class TestConvertFunctions method testBigIntVarCharReturnTripConvertLogical_ScalarReplaceON.
// TODO(DRILL-2326) temporary until we fix the scalar replacement bug for this case
@Test
// Because this test sometimes fails, sometimes succeeds
@Ignore
public void testBigIntVarCharReturnTripConvertLogical_ScalarReplaceON() throws Exception {
final OptionValue srOption = QueryTestUtil.setupScalarReplacementOption(bits[0], ScalarReplacementOption.ON);
boolean caughtException = false;
try {
// this used to fail (with a JUnit assertion) until we fix the SR bug
// Something in DRILL-5116 seemed to fix this problem, so the test now
// succeeds - sometimes.
testBigIntVarCharReturnTripConvertLogical();
} catch (RpcException e) {
caughtException = true;
} finally {
QueryTestUtil.restoreScalarReplacementOption(bits[0], srOption.string_val);
}
// Yes: sometimes this works, sometimes it does not...
assertTrue(!caughtException || caughtException);
}
use of org.apache.drill.exec.rpc.RpcException in project drill by axbaretto.
the class ConnectTriesPropertyTestClusterBits method testInvalidConnectTriesValue.
@Test
public void testInvalidConnectTriesValue() throws Exception {
Properties props = new Properties();
props.setProperty("tries", "abc");
// Test with Cluster Cordinator connection
DrillClient client = new DrillClient(drillConfig, remoteServiceSet.getCoordinator());
try {
client.connect(props);
fail();
} catch (RpcException ex) {
assertTrue(ex instanceof InvalidConnectionInfoException);
client.close();
}
}
use of org.apache.drill.exec.rpc.RpcException in project drill by axbaretto.
the class UserServerRequestHandler method handle.
@Override
public void handle(BitToUserConnection connection, int rpcType, ByteBuf pBody, ByteBuf dBody, ResponseSender responseSender) throws RpcException {
switch(rpcType) {
case RpcType.RUN_QUERY_VALUE:
logger.debug("Received query to run. Returning query handle.");
try {
final RunQuery query = RunQuery.PARSER.parseFrom(new ByteBufInputStream(pBody));
final QueryId queryId = worker.submitWork(connection, query);
responseSender.send(new Response(RpcType.QUERY_HANDLE, queryId));
break;
} catch (InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding RunQuery body.", e);
}
case RpcType.CANCEL_QUERY_VALUE:
try {
final QueryId queryId = QueryId.PARSER.parseFrom(new ByteBufInputStream(pBody));
final Ack ack = worker.cancelQuery(queryId);
responseSender.send(new Response(RpcType.ACK, ack));
break;
} catch (InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding QueryId body.", e);
}
case RpcType.RESUME_PAUSED_QUERY_VALUE:
try {
final QueryId queryId = QueryId.PARSER.parseFrom(new ByteBufInputStream(pBody));
final Ack ack = worker.resumeQuery(queryId);
responseSender.send(new Response(RpcType.ACK, ack));
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding QueryId body.", e);
}
case RpcType.GET_QUERY_PLAN_FRAGMENTS_VALUE:
try {
final GetQueryPlanFragments req = GetQueryPlanFragments.PARSER.parseFrom(new ByteBufInputStream(pBody));
responseSender.send(new Response(RpcType.QUERY_PLAN_FRAGMENTS, worker.getQueryPlan(connection, req)));
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding GetQueryPlanFragments body.", e);
}
case RpcType.GET_CATALOGS_VALUE:
try {
final GetCatalogsReq req = GetCatalogsReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
worker.submitCatalogMetadataWork(connection.getSession(), req, responseSender);
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding GetCatalogsReq body.", e);
}
case RpcType.GET_SCHEMAS_VALUE:
try {
final GetSchemasReq req = GetSchemasReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
worker.submitSchemasMetadataWork(connection.getSession(), req, responseSender);
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding GetSchemasReq body.", e);
}
case RpcType.GET_TABLES_VALUE:
try {
final GetTablesReq req = GetTablesReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
worker.submitTablesMetadataWork(connection.getSession(), req, responseSender);
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding GetTablesReq body.", e);
}
case RpcType.GET_COLUMNS_VALUE:
try {
final GetColumnsReq req = GetColumnsReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
worker.submitColumnsMetadataWork(connection.getSession(), req, responseSender);
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding GetColumnsReq body.", e);
}
case RpcType.CREATE_PREPARED_STATEMENT_VALUE:
try {
final CreatePreparedStatementReq req = CreatePreparedStatementReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
worker.submitPreparedStatementWork(connection, req, responseSender);
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding CreatePreparedStatementReq body.", e);
}
case RpcType.GET_SERVER_META_VALUE:
try {
final GetServerMetaReq req = GetServerMetaReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
worker.submitServerMetadataWork(connection.getSession(), req, responseSender);
break;
} catch (final InvalidProtocolBufferException e) {
throw new RpcException("Failure while decoding CreatePreparedStatementReq body.", e);
}
default:
throw new UnsupportedOperationException(String.format("UserServerRequestHandler received rpc of unknown type. Type was %d.", rpcType));
}
}
use of org.apache.drill.exec.rpc.RpcException in project drill by axbaretto.
the class QueryResultHandler method resultArrived.
/**
* Maps internal low-level API protocol to {@link UserResultsListener}-level API protocol.
* handles data result messages
*/
public void resultArrived(ByteBuf pBody) throws RpcException {
final QueryResult queryResult = RpcBus.get(pBody, QueryResult.PARSER);
final QueryId queryId = queryResult.getQueryId();
final QueryState queryState = queryResult.getQueryState();
if (logger.isDebugEnabled()) {
logger.debug("resultArrived: queryState: {}, queryId = {}", queryState, QueryIdHelper.getQueryId(queryId));
}
assert queryResult.hasQueryState() : "received query result without QueryState";
final boolean isFailureResult = QueryState.FAILED == queryState;
// CANCELED queries are handled the same way as COMPLETED
final boolean isTerminalResult;
switch(queryState) {
case FAILED:
case CANCELED:
case COMPLETED:
isTerminalResult = true;
break;
default:
logger.error("Unexpected/unhandled QueryState " + queryState + " (for query " + queryId + ")");
isTerminalResult = false;
break;
}
assert isFailureResult || queryResult.getErrorCount() == 0 : "Error count for the query batch is non-zero but QueryState != FAILED";
UserResultsListener resultsListener = newUserResultsListener(queryId);
try {
if (isFailureResult) {
// Failure case--pass on via submissionFailed(...).
resultsListener.submissionFailed(new UserRemoteException(queryResult.getError(0)));
// Note: Listener is removed in finally below.
} else if (isTerminalResult) {
try {
resultsListener.queryCompleted(queryState);
} catch (Exception e) {
resultsListener.submissionFailed(UserException.systemError(e).build(logger));
}
} else {
logger.warn("queryState {} was ignored", queryState);
}
} finally {
if (isTerminalResult) {
// for it?
if ((!(resultsListener instanceof BufferingResultsListener) || ((BufferingResultsListener) resultsListener).output != null)) {
queryIdToResultsListenersMap.remove(queryId, resultsListener);
}
}
}
}
use of org.apache.drill.exec.rpc.RpcException in project drill by axbaretto.
the class UserClient method prepareSaslHandshake.
@Override
protected void prepareSaslHandshake(final RpcConnectionHandler<UserToBitConnection> connectionHandler, List<String> serverAuthMechanisms) {
try {
final Map<String, String> saslProperties = properties.stringPropertiesAsMap();
// Set correct QOP property and Strength based on server needs encryption or not.
// If ChunkMode is enabled then negotiate for buffer size equal to wrapChunkSize,
// If ChunkMode is disabled then negotiate for MAX_WRAPPED_SIZE buffer size.
saslProperties.putAll(SaslProperties.getSaslProperties(connection.isEncryptionEnabled(), connection.getMaxWrappedSize()));
final AuthenticatorFactory factory = getAuthenticatorFactory(properties, serverAuthMechanisms);
final String mechanismName = factory.getSimpleName();
logger.trace("Will try to authenticate to server using {} mechanism with encryption context {}", mechanismName, connection.getEncryptionCtxtString());
// Update the thread context class loader to current class loader
// See DRILL-6063 for detailed description
final ClassLoader oldThreadCtxtCL = Thread.currentThread().getContextClassLoader();
final ClassLoader newThreadCtxtCL = this.getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(newThreadCtxtCL);
final UserGroupInformation ugi = factory.createAndLoginUser(saslProperties);
// Reset the thread context class loader to original one
Thread.currentThread().setContextClassLoader(oldThreadCtxtCL);
startSaslHandshake(connectionHandler, saslProperties, ugi, factory, RpcType.SASL_MESSAGE);
} catch (final IOException e) {
logger.error("Failed while doing setup for starting SASL handshake for connection", connection.getName());
final Exception ex = new RpcException(String.format("Failed to initiate authentication for connection %s", connection.getName()), e);
connectionHandler.connectionFailed(RpcConnectionHandler.FailureType.AUTHENTICATION, ex);
}
}
Aggregations