use of io.trino.operator.TaskContext in project trino by trinodb.
the class SqlTask method createTaskStatus.
private TaskStatus createTaskStatus(TaskHolder taskHolder) {
// Obtain task status version before building actual TaskStatus object.
// This way any task updates won't be lost since all updates happen
// before version number is increased.
long versionNumber = taskStatusVersion.get();
TaskState state = taskStateMachine.getState();
List<ExecutionFailureInfo> failures = ImmutableList.of();
if (state == FAILED) {
failures = toFailures(taskStateMachine.getFailureCauses());
}
int queuedPartitionedDrivers = 0;
long queuedPartitionedSplitsWeight = 0L;
int runningPartitionedDrivers = 0;
long runningPartitionedSplitsWeight = 0L;
DataSize physicalWrittenDataSize = DataSize.ofBytes(0);
DataSize userMemoryReservation = DataSize.ofBytes(0);
DataSize revocableMemoryReservation = DataSize.ofBytes(0);
// TODO: add a mechanism to avoid sending the whole completedDriverGroups set over the wire for every task status reply
Set<Lifespan> completedDriverGroups = ImmutableSet.of();
long fullGcCount = 0;
Duration fullGcTime = new Duration(0, MILLISECONDS);
long dynamicFiltersVersion = INITIAL_DYNAMIC_FILTERS_VERSION;
if (taskHolder.getFinalTaskInfo() != null) {
TaskInfo taskInfo = taskHolder.getFinalTaskInfo();
TaskStats taskStats = taskInfo.getStats();
queuedPartitionedDrivers = taskStats.getQueuedPartitionedDrivers();
queuedPartitionedSplitsWeight = taskStats.getQueuedPartitionedSplitsWeight();
runningPartitionedDrivers = taskStats.getRunningPartitionedDrivers();
runningPartitionedSplitsWeight = taskStats.getRunningPartitionedSplitsWeight();
physicalWrittenDataSize = taskStats.getPhysicalWrittenDataSize();
userMemoryReservation = taskStats.getUserMemoryReservation();
revocableMemoryReservation = taskStats.getRevocableMemoryReservation();
fullGcCount = taskStats.getFullGcCount();
fullGcTime = taskStats.getFullGcTime();
} else if (taskHolder.getTaskExecution() != null) {
long physicalWrittenBytes = 0;
TaskContext taskContext = taskHolder.getTaskExecution().getTaskContext();
for (PipelineContext pipelineContext : taskContext.getPipelineContexts()) {
PipelineStatus pipelineStatus = pipelineContext.getPipelineStatus();
queuedPartitionedDrivers += pipelineStatus.getQueuedPartitionedDrivers();
queuedPartitionedSplitsWeight += pipelineStatus.getQueuedPartitionedSplitsWeight();
runningPartitionedDrivers += pipelineStatus.getRunningPartitionedDrivers();
runningPartitionedSplitsWeight += pipelineStatus.getRunningPartitionedSplitsWeight();
physicalWrittenBytes += pipelineContext.getPhysicalWrittenDataSize();
}
physicalWrittenDataSize = succinctBytes(physicalWrittenBytes);
userMemoryReservation = taskContext.getMemoryReservation();
revocableMemoryReservation = taskContext.getRevocableMemoryReservation();
completedDriverGroups = taskContext.getCompletedDriverGroups();
fullGcCount = taskContext.getFullGcCount();
fullGcTime = taskContext.getFullGcTime();
dynamicFiltersVersion = taskContext.getDynamicFiltersVersion();
}
return new TaskStatus(taskStateMachine.getTaskId(), taskInstanceId, versionNumber, state, location, nodeId, completedDriverGroups, failures, queuedPartitionedDrivers, runningPartitionedDrivers, isOutputBufferOverutilized(), physicalWrittenDataSize, userMemoryReservation, revocableMemoryReservation, fullGcCount, fullGcTime, dynamicFiltersVersion, queuedPartitionedSplitsWeight, runningPartitionedSplitsWeight);
}
use of io.trino.operator.TaskContext in project trino by trinodb.
the class SqlTaskExecutionFactory method create.
public SqlTaskExecution create(Session session, QueryContext queryContext, TaskStateMachine taskStateMachine, OutputBuffer outputBuffer, PlanFragment fragment, Runnable notifyStatusChanged) {
TaskContext taskContext = queryContext.addTaskContext(taskStateMachine, session, notifyStatusChanged, perOperatorCpuTimerEnabled, cpuTimerEnabled);
LocalExecutionPlan localExecutionPlan;
try (SetThreadName ignored = new SetThreadName("Task-%s", taskStateMachine.getTaskId())) {
try {
localExecutionPlan = planner.plan(taskContext, fragment.getRoot(), TypeProvider.copyOf(fragment.getSymbols()), fragment.getPartitioningScheme(), fragment.getStageExecutionDescriptor(), fragment.getPartitionedSources(), outputBuffer);
} catch (Throwable e) {
// planning failed
taskStateMachine.failed(e);
throwIfUnchecked(e);
throw new RuntimeException(e);
}
}
return createSqlTaskExecution(taskStateMachine, taskContext, outputBuffer, localExecutionPlan, taskExecutor, taskNotificationExecutor, splitMonitor);
}
use of io.trino.operator.TaskContext in project trino by trinodb.
the class MemoryRevokingScheduler method requestRevoking.
private void requestRevoking(List<SqlTask> sqlTasks, long remainingBytesToRevoke) {
VoidTraversingQueryContextVisitor<AtomicLong> visitor = new VoidTraversingQueryContextVisitor<>() {
@Override
public Void visitPipelineContext(PipelineContext pipelineContext, AtomicLong remainingBytesToRevoke) {
if (remainingBytesToRevoke.get() <= 0) {
// exit immediately if no work needs to be done
return null;
}
return super.visitPipelineContext(pipelineContext, remainingBytesToRevoke);
}
@Override
public Void visitOperatorContext(OperatorContext operatorContext, AtomicLong remainingBytesToRevoke) {
if (remainingBytesToRevoke.get() > 0) {
long revokedBytes = operatorContext.requestMemoryRevoking();
if (revokedBytes > 0) {
remainingBytesToRevoke.addAndGet(-revokedBytes);
log.debug("requested revoking %s; remaining %s", revokedBytes, remainingBytesToRevoke.get());
}
}
return null;
}
};
AtomicLong remainingBytesToRevokeAtomic = new AtomicLong(remainingBytesToRevoke);
for (SqlTask task : sqlTasks) {
Optional<TaskContext> taskContext = task.getTaskContext();
if (taskContext.isPresent()) {
taskContext.get().accept(visitor, remainingBytesToRevokeAtomic);
if (remainingBytesToRevokeAtomic.get() <= 0) {
// No further revoking required
return;
}
}
}
}
use of io.trino.operator.TaskContext in project trino by trinodb.
the class TestSpatialJoinOperator method testEmptyBuild.
@Test
public void testEmptyBuild() {
TaskContext taskContext = createTaskContext();
RowPagesBuilder buildPages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR));
RowPagesBuilder probePages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR)).row(POINT_X, "x").row(null, "null").row(POINT_Y, "y").pageBreak().row(POINT_Z, "z").pageBreak().row(POINT_W, "w");
MaterializedResult expected = resultBuilder(taskContext.getSession(), ImmutableList.of(VARCHAR, VARCHAR)).build();
assertSpatialJoin(taskContext, INNER, buildPages, probePages, expected);
}
use of io.trino.operator.TaskContext in project trino by trinodb.
the class TestSpatialJoinOperator method testDuplicateProbeFactory.
@Test(dataProvider = "testDuplicateProbeFactoryDataProvider")
public void testDuplicateProbeFactory(boolean createSecondaryOperators) throws Exception {
TaskContext taskContext = createTaskContext();
PipelineContext pipelineContext = taskContext.addPipelineContext(0, true, true, false);
DriverContext probeDriver = pipelineContext.addDriverContext();
DriverContext buildDriver = pipelineContext.addDriverContext();
RowPagesBuilder buildPages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR, DOUBLE)).row(stPoint(0, 0), "0_0", 1.5);
PagesSpatialIndexFactory pagesSpatialIndexFactory = buildIndex(buildDriver, (build, probe, r) -> build.distance(probe) <= r.getAsDouble(), Optional.of(2), Optional.empty(), buildPages);
RowPagesBuilder probePages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR)).row(stPoint(0, 1), "0_1");
OperatorFactory firstFactory = new SpatialJoinOperatorFactory(2, new PlanNodeId("test"), INNER, probePages.getTypes(), Ints.asList(1), 0, Optional.empty(), pagesSpatialIndexFactory);
for (int i = 0; i < 3; i++) {
DriverContext secondDriver = pipelineContext.addDriverContext();
OperatorFactory secondFactory = firstFactory.duplicate();
if (createSecondaryOperators) {
try (Operator secondOperator = secondFactory.createOperator(secondDriver)) {
assertEquals(toPages(secondOperator, emptyIterator()), ImmutableList.of());
}
}
secondFactory.noMoreOperators();
}
MaterializedResult expected = resultBuilder(taskContext.getSession(), ImmutableList.of(VARCHAR, VARCHAR)).row("0_1", "0_0").build();
assertOperatorEquals(firstFactory, probeDriver, probePages.build(), expected);
}
Aggregations