use of org.apache.hyracks.api.dataset.ResultSetId in project asterixdb by apache.
the class QueryTranslator method compileAndExecute.
@Override
public void compileAndExecute(IHyracksClientConnection hcc, IHyracksDataset hdc, ResultDelivery resultDelivery, ResultMetadata outMetadata, Stats stats, String clientContextId, IStatementExecutorContext ctx) throws Exception {
int resultSetIdCounter = 0;
FileSplit outputFile = null;
IAWriterFactory writerFactory = PrinterBasedWriterFactory.INSTANCE;
IResultSerializerFactoryProvider resultSerializerFactoryProvider = ResultSerializerFactoryProvider.INSTANCE;
Map<String, String> config = new HashMap<>();
/* Since the system runs a large number of threads, when HTTP requests don't return, it becomes difficult to
* find the thread running the request to determine where it has stopped.
* Setting the thread name helps make that easier
*/
String threadName = Thread.currentThread().getName();
Thread.currentThread().setName(QueryTranslator.class.getSimpleName());
try {
for (Statement stmt : statements) {
if (sessionConfig.is(SessionConfig.FORMAT_HTML)) {
sessionOutput.out().println(ApiServlet.HTML_STATEMENT_SEPARATOR);
}
validateOperation(appCtx, activeDataverse, stmt);
// Rewrite the statement's AST.
rewriteStatement(stmt);
MetadataProvider metadataProvider = new MetadataProvider(appCtx, activeDataverse, componentProvider);
metadataProvider.setWriterFactory(writerFactory);
metadataProvider.setResultSerializerFactoryProvider(resultSerializerFactoryProvider);
metadataProvider.setOutputFile(outputFile);
metadataProvider.setConfig(config);
switch(stmt.getKind()) {
case Statement.Kind.SET:
handleSetStatement(stmt, config);
break;
case Statement.Kind.DATAVERSE_DECL:
activeDataverse = handleUseDataverseStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_DATAVERSE:
handleCreateDataverseStatement(metadataProvider, stmt);
break;
case Statement.Kind.DATASET_DECL:
handleCreateDatasetStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.CREATE_INDEX:
handleCreateIndexStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.TYPE_DECL:
handleCreateTypeStatement(metadataProvider, stmt);
break;
case Statement.Kind.NODEGROUP_DECL:
handleCreateNodeGroupStatement(metadataProvider, stmt);
break;
case Statement.Kind.DATAVERSE_DROP:
handleDataverseDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.DATASET_DROP:
handleDatasetDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.INDEX_DROP:
handleIndexDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.TYPE_DROP:
handleTypeDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.NODEGROUP_DROP:
handleNodegroupDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_FUNCTION:
handleCreateFunctionStatement(metadataProvider, stmt);
break;
case Statement.Kind.FUNCTION_DROP:
handleFunctionDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.LOAD:
handleLoadStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.INSERT:
case Statement.Kind.UPSERT:
if (((InsertStatement) stmt).getReturnExpression() != null) {
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
}
handleInsertUpsertStatement(metadataProvider, stmt, hcc, hdc, resultDelivery, outMetadata, stats, false, clientContextId, ctx);
break;
case Statement.Kind.DELETE:
handleDeleteStatement(metadataProvider, stmt, hcc, false);
break;
case Statement.Kind.CREATE_FEED:
handleCreateFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.DROP_FEED:
handleDropFeedStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.DROP_FEED_POLICY:
handleDropFeedPolicyStatement(metadataProvider, stmt);
break;
case Statement.Kind.CONNECT_FEED:
handleConnectFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.DISCONNECT_FEED:
handleDisconnectFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.START_FEED:
handleStartFeedStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.STOP_FEED:
handleStopFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_FEED_POLICY:
handleCreateFeedPolicyStatement(metadataProvider, stmt);
break;
case Statement.Kind.QUERY:
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
handleQuery(metadataProvider, (Query) stmt, hcc, hdc, resultDelivery, outMetadata, stats, clientContextId, ctx);
break;
case Statement.Kind.COMPACT:
handleCompactStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.EXTERNAL_DATASET_REFRESH:
handleExternalDatasetRefreshStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.WRITE:
Pair<IAWriterFactory, FileSplit> result = handleWriteStatement(stmt);
writerFactory = (result.first != null) ? result.first : writerFactory;
outputFile = result.second;
break;
case Statement.Kind.RUN:
handleRunStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.FUNCTION_DECL:
// No op
break;
case Statement.Kind.EXTENSION:
((IExtensionStatement) stmt).handle(this, metadataProvider, hcc, hdc, resultDelivery, stats, resultSetIdCounter);
break;
default:
throw new CompilationException("Unknown function");
}
}
} finally {
Thread.currentThread().setName(threadName);
}
}
use of org.apache.hyracks.api.dataset.ResultSetId in project asterixdb by apache.
the class MetadataProvider method getResultHandleRuntime.
@Override
public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> getResultHandleRuntime(IDataSink sink, int[] printColumns, IPrinterFactory[] printerFactories, RecordDescriptor inputDesc, boolean ordered, JobSpecification spec) throws AlgebricksException {
ResultSetDataSink rsds = (ResultSetDataSink) sink;
ResultSetSinkId rssId = rsds.getId();
ResultSetId rsId = rssId.getResultSetId();
ResultWriterOperatorDescriptor resultWriter = null;
try {
IResultSerializerFactory resultSerializedAppenderFactory = resultSerializerFactoryProvider.getAqlResultSerializerFactoryProvider(printColumns, printerFactories, getWriterFactory());
resultWriter = new ResultWriterOperatorDescriptor(spec, rsId, ordered, getResultAsyncMode(), resultSerializedAppenderFactory);
} catch (IOException e) {
throw new AlgebricksException(e);
}
return new Pair<>(resultWriter, null);
}
use of org.apache.hyracks.api.dataset.ResultSetId in project asterixdb by apache.
the class ReplicateOperatorTest method test.
@Test
public void test() throws Exception {
final int outputArity = 2;
JobSpecification spec = new JobSpecification();
String inputFileName = "data" + File.separator + "nc1" + File.separator + "words.txt";
File[] outputFile = new File[outputArity];
for (int i = 0; i < outputArity; i++) {
outputFile[i] = File.createTempFile("replicateop", null);
outputFile[i].deleteOnExit();
}
FileSplit[] inputSplits = new FileSplit[] { new ManagedFileSplit(NC1_ID, inputFileName) };
String[] locations = new String[] { NC1_ID };
DelimitedDataTupleParserFactory stringParser = new DelimitedDataTupleParserFactory(new IValueParserFactory[] { UTF8StringParserFactory.INSTANCE }, '