use of io.prestosql.execution.Lifespan in project hetu-core by openlookeng.
the class TestHttpRemoteTask method testRegular.
@Test(timeOut = 30000)
public void testRegular() throws Exception {
AtomicLong lastActivityNanos = new AtomicLong(System.nanoTime());
TestingTaskResource testingTaskResource = new TestingTaskResource(lastActivityNanos, FailureScenario.NO_FAILURE);
HttpRemoteTaskFactory httpRemoteTaskFactory = createHttpRemoteTaskFactory(testingTaskResource);
RemoteTask remoteTask = createRemoteTask(httpRemoteTaskFactory);
testingTaskResource.setInitialTaskInfo(remoteTask.getTaskInfo());
remoteTask.start();
Lifespan lifespan = Lifespan.driverGroup(3);
remoteTask.addSplits(ImmutableMultimap.of(TABLE_SCAN_NODE_ID, new Split(new CatalogName("test"), TestingSplit.createLocalSplit(), lifespan)));
poll(() -> testingTaskResource.getTaskSource(TABLE_SCAN_NODE_ID) != null);
poll(() -> testingTaskResource.getTaskSource(TABLE_SCAN_NODE_ID).getSplits().size() == 1);
remoteTask.noMoreSplits(TABLE_SCAN_NODE_ID, lifespan);
poll(() -> testingTaskResource.getTaskSource(TABLE_SCAN_NODE_ID).getNoMoreSplitsForLifespan().size() == 1);
remoteTask.noMoreSplits(TABLE_SCAN_NODE_ID);
poll(() -> testingTaskResource.getTaskSource(TABLE_SCAN_NODE_ID).isNoMoreSplits());
remoteTask.cancel();
poll(() -> remoteTask.getTaskStatus().getState().isDone());
poll(() -> remoteTask.getTaskInfo().getTaskStatus().getState().isDone());
httpRemoteTaskFactory.stop();
}
use of io.prestosql.execution.Lifespan in project hetu-core by openlookeng.
the class MarkerSplitSource method getNextBatchImpl.
private ListenableFuture<SplitBatch> getNextBatchImpl(ConnectorPartitionHandle partitionHandle, Lifespan lifespan, int maxSize) {
checkArgument(maxSize > 0, "Cannot fetch a batch of zero size");
if (resumingSnapshotId.isPresent()) {
sentInitialMarker = true;
boolean lastBatch = sourceExhausted && bufferPosition == splitBuffer.size();
SplitBatch batch = recordSnapshot(lifespan, true, resumingSnapshotId.getAsLong(), lastBatch);
resumingSnapshotId = OptionalLong.empty();
return Futures.immediateFuture(batch);
}
if (!sentInitialMarker) {
sentInitialMarker = true;
// Send initial empty marker, to trigger creation of tasks. This marker is ignored by SqlTaskExecution.
Split marker = new Split(getCatalogName(), MarkerSplit.snapshotSplit(getCatalogName(), 0), lifespan);
SplitBatch batch = new SplitBatch(Collections.singletonList(marker), false);
return Futures.immediateFuture(batch);
}
if (sourceExhausted && bufferPosition == splitBuffer.size()) {
if (!unionSources.isEmpty() && !remainingUnionSources.contains(this)) {
boolean lastBatch = remainingUnionSources.isEmpty();
OptionalLong snapshotId = announcer.shouldGenerateMarker(this);
if (snapshotId.isPresent() && (!lastMarkerForUnion.isPresent() || snapshotId.getAsLong() <= lastMarkerForUnion.getAsLong())) {
SplitBatch batch = recordSnapshot(lifespan, false, snapshotId.getAsLong(), lastBatch);
return Futures.immediateFuture(batch);
}
if (lastBatch) {
sentFinalMarker = true;
deactivate();
}
SplitBatch batch = new SplitBatch(ImmutableList.of(), lastBatch);
return Futures.immediateFuture(batch);
}
// Force send last-batch marker
long sid = announcer.forceGenerateMarker(this);
SplitBatch batch = recordSnapshot(lifespan, false, sid, true);
return Futures.immediateFuture(batch);
}
OptionalLong snapshotId = announcer.shouldGenerateMarker(this);
if (snapshotId.isPresent()) {
SplitBatch batch = recordSnapshot(lifespan, false, snapshotId.getAsLong(), false);
return Futures.immediateFuture(batch);
}
if (!remainingDependencies.isEmpty()) {
// which will pass through join operators, and can be completed.
return Futures.immediateFuture(new SplitBatch(Collections.emptyList(), false));
}
// Get next batch of "data" splits, then determine if marker should be added.
ListenableFuture<SplitBatch> result = prepareNextBatch(partitionHandle, lifespan, maxSize);
result = Futures.transform(result, batch -> {
if (batch != null) {
List<Split> splits = batch.getSplits();
incrementSplitCount(splits.size());
if (batch.isLastBatch()) {
if (splits.size() == 0) {
// Force generate a marker for last batch. Marker can't be mixed with data splits.
long sid = announcer.forceGenerateMarker(this);
batch = recordSnapshot(lifespan, false, sid, true);
} else {
// Don't send last-batch signal yet. Next call will generate a marker with last-batch.
batch = new SplitBatch(splits, false);
}
}
}
return batch;
}, directExecutor());
return result;
}
use of io.prestosql.execution.Lifespan in project hetu-core by openlookeng.
the class HttpRemoteTask method processTaskUpdate.
private synchronized void processTaskUpdate(TaskInfo newValue, List<TaskSource> sources) {
updateTaskInfo(newValue);
// remove acknowledged splits, which frees memory
for (TaskSource source : sources) {
PlanNodeId planNodeId = source.getPlanNodeId();
int removed = 0;
for (ScheduledSplit split : source.getSplits()) {
if (pendingSplits.remove(planNodeId, split)) {
removed++;
}
}
if (source.isNoMoreSplits()) {
noMoreSplits.put(planNodeId, false);
}
for (Lifespan lifespan : source.getNoMoreSplitsForLifespan()) {
pendingNoMoreSplitsForLifespan.remove(planNodeId, lifespan);
}
if (planFragment.isPartitionedSources(planNodeId)) {
pendingSourceSplitCount -= removed;
}
}
updateSplitQueueSpace();
partitionedSplitCountTracker.setPartitionedSplitCount(getPartitionedSplitCount());
}
use of io.prestosql.execution.Lifespan in project hetu-core by openlookeng.
the class HttpRemoteTask method getSource.
private synchronized TaskSource getSource(PlanNodeId planNodeId) {
Set<ScheduledSplit> splits = ImmutableSet.copyOf(pendingSplits.get(planNodeId));
boolean pendingNoMoreSplits = Boolean.TRUE.equals(this.noMoreSplits.get(planNodeId));
boolean tmpNoMoreSplits = this.noMoreSplits.containsKey(planNodeId);
Set<Lifespan> noMoreSplitsForLifespan = pendingNoMoreSplitsForLifespan.get(planNodeId);
TaskSource element = null;
if (!splits.isEmpty() || !noMoreSplitsForLifespan.isEmpty() || pendingNoMoreSplits) {
element = new TaskSource(planNodeId, splits, noMoreSplitsForLifespan, tmpNoMoreSplits);
}
return element;
}
use of io.prestosql.execution.Lifespan in project hetu-core by openlookeng.
the class TestIndexCacheRemoval method setupBeforeClass.
@BeforeClass
public void setupBeforeClass() {
PropertyService.setProperty(HetuConstant.FILTER_ENABLED, true);
PropertyService.setProperty(HetuConstant.INDEXSTORE_FILESYSTEM_PROFILE, "local-config-default");
PropertyService.setProperty(HetuConstant.FILTER_CACHE_MAX_MEMORY, (long) (new DataSize(numberOfIndexTypes * 2, KILOBYTE).getValue(KILOBYTE)));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_TTL, new Duration(10, TimeUnit.MINUTES));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_LOADING_DELAY, new Duration(loadDelay, TimeUnit.MILLISECONDS));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_LOADING_THREADS, 2L);
PropertyService.setProperty(HetuConstant.FILTER_CACHE_SOFT_REFERENCE, false);
CatalogName catalogName = new CatalogName(catalog);
connectorSplit = mock(ConnectorSplit.class);
Lifespan lifespan = mock(Lifespan.class);
split = new Split(catalogName, connectorSplit, lifespan);
when(connectorSplit.getFilePath()).thenReturn(testPath);
when(connectorSplit.getLastModifiedTime()).thenReturn(testLastModifiedTime);
}
Aggregations