use of io.prestosql.operator.TaskContext in project hetu-core by openlookeng.
the class TestSpatialJoinOperator method testYield.
@Test
public void testYield() {
// create a filter function that yields for every probe match
// verify we will yield #match times totally
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
// force a yield for every match
AtomicInteger filterFunctionCalls = new AtomicInteger();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> {
filterFunctionCalls.incrementAndGet();
driverContext.getYieldSignal().forceYieldForTesting();
return true;
}));
RowPagesBuilder buildPages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR)).row(POLYGON_A, "A").pageBreak().row(POLYGON_B, "B");
PagesSpatialIndexFactory pagesSpatialIndexFactory = buildIndex(driverContext, (build, probe, r) -> build.contains(probe), Optional.empty(), Optional.of(filterFunction), buildPages);
// 10 points in polygon A (x0...x9)
// 10 points in polygons A and B (y0...y9)
// 10 points in polygon B (z0...z9)
// 40 total matches
RowPagesBuilder probePages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR));
for (int i = 0; i < 10; i++) {
probePages.row(stPoint(1 + 0.1 * i, 1 + 0.1 * i), "x" + i);
}
for (int i = 0; i < 10; i++) {
probePages.row(stPoint(4.5 + 0.01 * i, 4.5 + 0.01 * i), "y" + i);
}
for (int i = 0; i < 10; i++) {
probePages.row(stPoint(6 + 0.1 * i, 6 + 0.1 * i), "z" + i);
}
List<Page> probeInput = probePages.build();
OperatorFactory joinOperatorFactory = new SpatialJoinOperatorFactory(2, new PlanNodeId("test"), INNER, probePages.getTypes(), Ints.asList(1), 0, Optional.empty(), pagesSpatialIndexFactory);
Operator operator = joinOperatorFactory.createOperator(driverContext);
assertTrue(operator.needsInput());
operator.addInput(probeInput.get(0));
operator.finish();
// we will yield 40 times due to filterFunction
for (int i = 0; i < 40; i++) {
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
assertNull(operator.getOutput());
assertEquals(filterFunctionCalls.get(), i + 1, "Expected join to stop processing (yield) after calling filter function once");
driverContext.getYieldSignal().reset();
}
// delayed yield is not going to prevent operator from producing a page now (yield won't be forced because filter function won't be called anymore)
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
Page output = operator.getOutput();
assertNotNull(output);
// make sure we have 40 matches
assertEquals(output.getPositionCount(), 40);
}
use of io.prestosql.operator.TaskContext in project hetu-core by openlookeng.
the class TestSpatialJoinOperator method testDuplicateProbeFactory.
@Test(dataProvider = "testDuplicateProbeFactoryDataProvider")
public void testDuplicateProbeFactory(boolean createSecondaryOperators) throws Exception {
TaskContext taskContext = createTaskContext();
PipelineContext pipelineContext = taskContext.addPipelineContext(0, true, true, false);
DriverContext probeDriver = pipelineContext.addDriverContext();
DriverContext buildDriver = pipelineContext.addDriverContext();
RowPagesBuilder buildPages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR, DOUBLE)).row(stPoint(0, 0), "0_0", 1.5);
PagesSpatialIndexFactory pagesSpatialIndexFactory = buildIndex(buildDriver, (build, probe, r) -> build.distance(probe) <= r.getAsDouble(), Optional.of(2), Optional.empty(), buildPages);
RowPagesBuilder probePages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR)).row(stPoint(0, 1), "0_1");
OperatorFactory firstFactory = new SpatialJoinOperatorFactory(2, new PlanNodeId("test"), INNER, probePages.getTypes(), Ints.asList(1), 0, Optional.empty(), pagesSpatialIndexFactory);
for (int i = 0; i < 3; i++) {
DriverContext secondDriver = pipelineContext.addDriverContext();
OperatorFactory secondFactory = firstFactory.duplicate();
if (createSecondaryOperators) {
try (Operator secondOperator = secondFactory.createOperator(secondDriver)) {
assertEquals(toPages(secondOperator, emptyIterator()), ImmutableList.of());
}
}
secondFactory.noMoreOperators();
}
MaterializedResult expected = resultBuilder(taskContext.getSession(), ImmutableList.of(VARCHAR, VARCHAR)).row("0_1", "0_0").build();
assertOperatorEquals(firstFactory, probeDriver, probePages.build(), expected);
}
use of io.prestosql.operator.TaskContext in project hetu-core by openlookeng.
the class SqlTaskExecutionFactory method create.
public SqlTaskExecution create(String taskInstanceId, Session session, QueryContext queryContext, TaskStateMachine taskStateMachine, OutputBuffer outputBuffer, PlanFragment fragment, List<TaskSource> sources, OptionalInt totalPartitions, Optional<PlanNodeId> consumer, Map<String, CommonTableExecutionContext> cteCtx) {
TaskContext taskContext = queryContext.addTaskContext(taskInstanceId, taskStateMachine, session, perOperatorCpuTimerEnabled, cpuTimerEnabled, totalPartitions, consumer, new PagesSerdeFactory(metadata.getFunctionAndTypeManager().getBlockEncodingSerde(), isExchangeCompressionEnabled(session)));
LocalExecutionPlan localExecutionPlan;
try (SetThreadName ignored = new SetThreadName("Task-%s", taskStateMachine.getTaskId())) {
try {
localExecutionPlan = planner.plan(taskContext, fragment.getRoot(), TypeProvider.copyOf(fragment.getSymbols()), fragment.getPartitioningScheme(), fragment.getStageExecutionDescriptor(), fragment.getPartitionedSources(), outputBuffer, fragment.getFeederCTEId(), fragment.getFeederCTEParentId(), cteCtx);
} catch (Throwable e) {
// planning failed
taskStateMachine.failed(e);
throwIfUnchecked(e);
throw new RuntimeException(e);
}
}
return createSqlTaskExecution(taskStateMachine, taskContext, outputBuffer, sources, localExecutionPlan, taskExecutor, taskNotificationExecutor, splitMonitor);
}
use of io.prestosql.operator.TaskContext in project hetu-core by openlookeng.
the class SqlTask method createTaskStatus.
private TaskStatus createTaskStatus(TaskHolder taskHolder) {
// Always return a new TaskInfo with a larger version number;
// otherwise a client will not accept the update
long versionNumber = nextTaskInfoVersion.getAndIncrement();
TaskState state = taskStateMachine.getState();
List<ExecutionFailureInfo> failures = ImmutableList.of();
if (state == FAILED) {
failures = toFailures(taskStateMachine.getFailureCauses());
}
int queuedPartitionedDrivers = 0;
int runningPartitionedDrivers = 0;
DataSize physicalWrittenDataSize = new DataSize(0, BYTE);
DataSize userMemoryReservation = new DataSize(0, BYTE);
DataSize systemMemoryReservation = new DataSize(0, BYTE);
DataSize revocableMemoryReservation = new DataSize(0, BYTE);
// TODO: add a mechanism to avoid sending the whole completedDriverGroups set over the wire for every task status reply
Set<Lifespan> completedDriverGroups = ImmutableSet.of();
long fullGcCount = 0;
Duration fullGcTime = new Duration(0, MILLISECONDS);
Map<Long, SnapshotInfo> snapshotCaptureResult = ImmutableMap.of();
Optional<RestoreResult> snapshotRestoreResult = Optional.empty();
TaskInfo finalTaskInfo = taskHolder.getFinalTaskInfo();
if (finalTaskInfo != null) {
TaskStats taskStats = finalTaskInfo.getStats();
queuedPartitionedDrivers = taskStats.getQueuedPartitionedDrivers();
runningPartitionedDrivers = taskStats.getRunningPartitionedDrivers();
physicalWrittenDataSize = taskStats.getPhysicalWrittenDataSize();
userMemoryReservation = taskStats.getUserMemoryReservation();
systemMemoryReservation = taskStats.getSystemMemoryReservation();
revocableMemoryReservation = taskStats.getRevocableMemoryReservation();
fullGcCount = taskStats.getFullGcCount();
fullGcTime = taskStats.getFullGcTime();
if (isSnapshotEnabled) {
// Add snapshot result
snapshotCaptureResult = finalTaskInfo.getTaskStatus().getSnapshotCaptureResult();
snapshotRestoreResult = finalTaskInfo.getTaskStatus().getSnapshotRestoreResult();
}
} else if (taskHolder.getTaskExecution() != null) {
long physicalWrittenBytes = 0;
TaskContext taskContext = taskHolder.getTaskExecution().getTaskContext();
for (PipelineContext pipelineContext : taskContext.getPipelineContexts()) {
PipelineStatus pipelineStatus = pipelineContext.getPipelineStatus();
queuedPartitionedDrivers += pipelineStatus.getQueuedPartitionedDrivers();
runningPartitionedDrivers += pipelineStatus.getRunningPartitionedDrivers();
physicalWrittenBytes += pipelineContext.getPhysicalWrittenDataSize();
}
physicalWrittenDataSize = succinctBytes(physicalWrittenBytes);
userMemoryReservation = taskContext.getMemoryReservation();
systemMemoryReservation = taskContext.getSystemMemoryReservation();
revocableMemoryReservation = taskContext.getRevocableMemoryReservation();
completedDriverGroups = taskContext.getCompletedDriverGroups();
fullGcCount = taskContext.getFullGcCount();
fullGcTime = taskContext.getFullGcTime();
if (isSnapshotEnabled) {
// Add snapshot result
TaskSnapshotManager snapshotManager = taskHolder.taskExecution.getTaskContext().getSnapshotManager();
snapshotCaptureResult = snapshotManager.getSnapshotCaptureResult();
snapshotRestoreResult = Optional.ofNullable(snapshotManager.getSnapshotRestoreResult());
}
}
return new TaskStatus(taskStateMachine.getTaskId(), confirmationInstanceId, versionNumber, state, location, nodeId, completedDriverGroups, failures, queuedPartitionedDrivers, runningPartitionedDrivers, isOutputBufferOverutilized(), physicalWrittenDataSize, userMemoryReservation, systemMemoryReservation, revocableMemoryReservation, fullGcCount, fullGcTime, snapshotCaptureResult, snapshotRestoreResult);
}
use of io.prestosql.operator.TaskContext in project hetu-core by openlookeng.
the class TestMultiInputSnapshotState method testStaticConstructor.
@Test
public void testStaticConstructor() {
ScheduledExecutorService scheduler = newScheduledThreadPool(4);
TaskContext taskContext = createTaskContext(scheduler, scheduler, TEST_SNAPSHOT_SESSION);
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
OperatorContext operatorContext = driverContext.addOperatorContext(1, new PlanNodeId("planNodeId"), "test");
MultiInputSnapshotState inputSnapshotState = MultiInputSnapshotState.forOperator(mock(MultiInputRestorable.class), operatorContext);
processPage(inputSnapshotState, source1, regularPage);
inputSnapshotState = MultiInputSnapshotState.forTaskComponent(mock(MultiInputRestorable.class), taskContext, TestMultiInputSnapshotState::createSnapshotStateId);
processPage(inputSnapshotState, source1, regularPage);
}
Aggregations