use of com.facebook.presto.execution.scheduler.TableWriteInfo in project presto by prestodb.
the class TestMemoryRevokingScheduler method createTestingOperatorContexts.
private TestOperatorContext createTestingOperatorContexts(SqlTask sqlTask, String operatorName) {
// update task to update underlying taskHolderReference with taskExecution + create a new taskContext
sqlTask.updateTask(TEST_SESSION, Optional.of(PLAN_FRAGMENT), ImmutableList.of(new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.of(SPLIT), false)), createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(OUT, 0).withNoMoreBufferIds(), Optional.of(new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty())));
// use implicitly created task context from updateTask. It should be the only task in this QueryContext's tasks
TaskContext taskContext = sqlTask.getQueryContext().getTaskContextByTaskId(sqlTask.getTaskId());
PipelineContext pipelineContext = taskContext.addPipelineContext(0, false, false, false);
DriverContext driverContext = pipelineContext.addDriverContext();
TestOperatorContext testOperatorContext = new TestOperatorContext(1, new PlanNodeId("na"), "na", driverContext, singleThreadedExecutor, driverContext.getDriverMemoryContext().newMemoryTrackingContext(), operatorName);
driverContext.addOperatorContext(testOperatorContext);
return testOperatorContext;
}
use of com.facebook.presto.execution.scheduler.TableWriteInfo in project presto by prestodb.
the class MockRemoteTaskFactory method createTableScanTask.
public MockRemoteTask createTableScanTask(TaskId taskId, InternalNode newNode, List<Split> splits, NodeTaskMap.NodeStatsTracker nodeStatsTracker) {
VariableReferenceExpression variable = new VariableReferenceExpression(Optional.empty(), "column", VARCHAR);
PlanNodeId sourceId = new PlanNodeId("sourceId");
PlanFragment testFragment = new PlanFragment(new PlanFragmentId(0), new TableScanNode(Optional.empty(), sourceId, new TableHandle(new ConnectorId("test"), new TestingTableHandle(), TestingTransactionHandle.create(), Optional.of(TestingHandle.INSTANCE)), ImmutableList.of(variable), ImmutableMap.of(variable, new TestingColumnHandle("column")), TupleDomain.all(), TupleDomain.all()), ImmutableSet.of(variable), SOURCE_DISTRIBUTION, ImmutableList.of(sourceId), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(variable)), StageExecutionDescriptor.ungroupedExecution(), false, StatsAndCosts.empty(), Optional.empty());
ImmutableMultimap.Builder<PlanNodeId, Split> initialSplits = ImmutableMultimap.builder();
for (Split sourceSplit : splits) {
initialSplits.put(sourceId, sourceSplit);
}
return createRemoteTask(TEST_SESSION, taskId, newNode, testFragment, initialSplits.build(), createInitialEmptyOutputBuffers(BROADCAST), nodeStatsTracker, true, new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty()));
}
use of com.facebook.presto.execution.scheduler.TableWriteInfo in project presto by prestodb.
the class TestSqlTask method testSimpleQuery.
@Test
public void testSimpleQuery() throws Exception {
SqlTask sqlTask = createInitialTask();
TaskInfo taskInfo = sqlTask.updateTask(TEST_SESSION, Optional.of(PLAN_FRAGMENT), ImmutableList.of(new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.of(SPLIT), true)), createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(OUT, 0).withNoMoreBufferIds(), Optional.of(new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty())));
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.RUNNING);
taskInfo = sqlTask.getTaskInfo();
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.RUNNING);
BufferResult results = sqlTask.getTaskResults(OUT, 0, new DataSize(1, MEGABYTE)).get();
assertEquals(results.isBufferComplete(), false);
assertEquals(results.getSerializedPages().size(), 1);
assertEquals(results.getSerializedPages().get(0).getPositionCount(), 1);
for (boolean moreResults = true; moreResults; moreResults = !results.isBufferComplete()) {
results = sqlTask.getTaskResults(OUT, results.getToken() + results.getSerializedPages().size(), new DataSize(1, MEGABYTE)).get();
}
assertEquals(results.getSerializedPages().size(), 0);
// complete the task by calling abort on it
TaskInfo info = sqlTask.abortTaskResults(OUT);
assertEquals(info.getOutputBuffers().getState(), BufferState.FINISHED);
taskInfo = sqlTask.getTaskInfo(taskInfo.getTaskStatus().getState()).get(1, SECONDS);
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.FINISHED);
taskInfo = sqlTask.getTaskInfo();
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.FINISHED);
}
use of com.facebook.presto.execution.scheduler.TableWriteInfo in project presto by prestodb.
the class TestSqlTask method testAbort.
@Test
public void testAbort() throws Exception {
SqlTask sqlTask = createInitialTask();
TaskInfo taskInfo = sqlTask.updateTask(TEST_SESSION, Optional.of(PLAN_FRAGMENT), ImmutableList.of(new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.of(SPLIT), true)), createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(OUT, 0).withNoMoreBufferIds(), Optional.of(new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty())));
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.RUNNING);
taskInfo = sqlTask.getTaskInfo();
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.RUNNING);
sqlTask.abortTaskResults(OUT);
taskInfo = sqlTask.getTaskInfo(taskInfo.getTaskStatus().getState()).get(1, SECONDS);
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.FINISHED);
taskInfo = sqlTask.getTaskInfo();
assertEquals(taskInfo.getTaskStatus().getState(), TaskState.FINISHED);
}
use of com.facebook.presto.execution.scheduler.TableWriteInfo in project presto by prestodb.
the class TestSqlStageExecution method testFinalStageInfoInternal.
private void testFinalStageInfoInternal() throws Exception {
NodeTaskMap nodeTaskMap = new NodeTaskMap(new FinalizerService());
StageId stageId = new StageId(new QueryId("query"), 0);
SqlStageExecution stage = createSqlStageExecution(new StageExecutionId(stageId, 0), createExchangePlanFragment(), new MockRemoteTaskFactory(executor, scheduledExecutor), TEST_SESSION, true, nodeTaskMap, executor, new NoOpFailureDetector(), new SplitSchedulerStats(), new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty()));
stage.setOutputBuffers(createInitialEmptyOutputBuffers(ARBITRARY));
// add listener that fetches stage info when the final status is available
SettableFuture<StageExecutionInfo> finalStageInfo = SettableFuture.create();
stage.addFinalStageInfoListener(finalStageInfo::set);
// in a background thread add a ton of tasks
CountDownLatch latch = new CountDownLatch(1000);
Future<?> addTasksTask = executor.submit(() -> {
try {
for (int i = 0; i < 1_000_000; i++) {
if (Thread.interrupted()) {
return;
}
InternalNode node = new InternalNode("source" + i, URI.create("http://10.0.0." + (i / 10_000) + ":" + (i % 10_000)), NodeVersion.UNKNOWN, false);
stage.scheduleTask(node, i);
latch.countDown();
}
} finally {
while (latch.getCount() > 0) {
latch.countDown();
}
}
});
// wait for some tasks to be created, and then abort the query
latch.await(1, MINUTES);
assertFalse(stage.getStageExecutionInfo().getTasks().isEmpty());
stage.abort();
// once the final stage info is available, verify that it is complete
StageExecutionInfo stageInfo = finalStageInfo.get(1, MINUTES);
assertFalse(stageInfo.getTasks().isEmpty());
assertTrue(stageInfo.isFinal());
assertSame(stage.getStageExecutionInfo(), stageInfo);
// cancel the background thread adding tasks
addTasksTask.cancel(true);
}
Aggregations