use of org.apache.hyracks.algebricks.data.IAWriterFactory in project asterixdb by apache.
the class QueryTranslator method compileAndExecute.
@Override
public void compileAndExecute(IHyracksClientConnection hcc, IHyracksDataset hdc, ResultDelivery resultDelivery, ResultMetadata outMetadata, Stats stats, String clientContextId, IStatementExecutorContext ctx) throws Exception {
int resultSetIdCounter = 0;
FileSplit outputFile = null;
IAWriterFactory writerFactory = PrinterBasedWriterFactory.INSTANCE;
IResultSerializerFactoryProvider resultSerializerFactoryProvider = ResultSerializerFactoryProvider.INSTANCE;
Map<String, String> config = new HashMap<>();
/* Since the system runs a large number of threads, when HTTP requests don't return, it becomes difficult to
* find the thread running the request to determine where it has stopped.
* Setting the thread name helps make that easier
*/
String threadName = Thread.currentThread().getName();
Thread.currentThread().setName(QueryTranslator.class.getSimpleName());
try {
for (Statement stmt : statements) {
if (sessionConfig.is(SessionConfig.FORMAT_HTML)) {
sessionOutput.out().println(ApiServlet.HTML_STATEMENT_SEPARATOR);
}
validateOperation(appCtx, activeDataverse, stmt);
// Rewrite the statement's AST.
rewriteStatement(stmt);
MetadataProvider metadataProvider = new MetadataProvider(appCtx, activeDataverse, componentProvider);
metadataProvider.setWriterFactory(writerFactory);
metadataProvider.setResultSerializerFactoryProvider(resultSerializerFactoryProvider);
metadataProvider.setOutputFile(outputFile);
metadataProvider.setConfig(config);
switch(stmt.getKind()) {
case Statement.Kind.SET:
handleSetStatement(stmt, config);
break;
case Statement.Kind.DATAVERSE_DECL:
activeDataverse = handleUseDataverseStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_DATAVERSE:
handleCreateDataverseStatement(metadataProvider, stmt);
break;
case Statement.Kind.DATASET_DECL:
handleCreateDatasetStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.CREATE_INDEX:
handleCreateIndexStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.TYPE_DECL:
handleCreateTypeStatement(metadataProvider, stmt);
break;
case Statement.Kind.NODEGROUP_DECL:
handleCreateNodeGroupStatement(metadataProvider, stmt);
break;
case Statement.Kind.DATAVERSE_DROP:
handleDataverseDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.DATASET_DROP:
handleDatasetDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.INDEX_DROP:
handleIndexDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.TYPE_DROP:
handleTypeDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.NODEGROUP_DROP:
handleNodegroupDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_FUNCTION:
handleCreateFunctionStatement(metadataProvider, stmt);
break;
case Statement.Kind.FUNCTION_DROP:
handleFunctionDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.LOAD:
handleLoadStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.INSERT:
case Statement.Kind.UPSERT:
if (((InsertStatement) stmt).getReturnExpression() != null) {
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
}
handleInsertUpsertStatement(metadataProvider, stmt, hcc, hdc, resultDelivery, outMetadata, stats, false, clientContextId, ctx);
break;
case Statement.Kind.DELETE:
handleDeleteStatement(metadataProvider, stmt, hcc, false);
break;
case Statement.Kind.CREATE_FEED:
handleCreateFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.DROP_FEED:
handleDropFeedStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.DROP_FEED_POLICY:
handleDropFeedPolicyStatement(metadataProvider, stmt);
break;
case Statement.Kind.CONNECT_FEED:
handleConnectFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.DISCONNECT_FEED:
handleDisconnectFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.START_FEED:
handleStartFeedStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.STOP_FEED:
handleStopFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_FEED_POLICY:
handleCreateFeedPolicyStatement(metadataProvider, stmt);
break;
case Statement.Kind.QUERY:
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
handleQuery(metadataProvider, (Query) stmt, hcc, hdc, resultDelivery, outMetadata, stats, clientContextId, ctx);
break;
case Statement.Kind.COMPACT:
handleCompactStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.EXTERNAL_DATASET_REFRESH:
handleExternalDatasetRefreshStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.WRITE:
Pair<IAWriterFactory, FileSplit> result = handleWriteStatement(stmt);
writerFactory = (result.first != null) ? result.first : writerFactory;
outputFile = result.second;
break;
case Statement.Kind.RUN:
handleRunStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.FUNCTION_DECL:
// No op
break;
case Statement.Kind.EXTENSION:
((IExtensionStatement) stmt).handle(this, metadataProvider, hcc, hdc, resultDelivery, stats, resultSetIdCounter);
break;
default:
throw new CompilationException("Unknown function");
}
}
} finally {
Thread.currentThread().setName(threadName);
}
}
use of org.apache.hyracks.algebricks.data.IAWriterFactory in project asterixdb by apache.
the class QueryTranslator method handleWriteStatement.
protected Pair<IAWriterFactory, FileSplit> handleWriteStatement(Statement stmt) throws InstantiationException, IllegalAccessException, ClassNotFoundException {
WriteStatement ws = (WriteStatement) stmt;
File f = new File(ws.getFileName());
FileSplit outputFile = new UnmanagedFileSplit(ws.getNcName().getValue(), f.getPath());
IAWriterFactory writerFactory = null;
if (ws.getWriterClassName() != null) {
writerFactory = (IAWriterFactory) Class.forName(ws.getWriterClassName()).newInstance();
}
return new Pair<>(writerFactory, outputFile);
}
Aggregations