use of io.prestosql.metadata.Split in project hetu-core by openlookeng.
the class MarkerSplitSource method recordSnapshot.
private SplitBatch recordSnapshot(Lifespan lifespan, boolean resuming, long snapshotId, boolean lastBatch) {
boolean localLastBatch = updateUnionsources(snapshotId, lastBatch);
if (!resuming) {
if (!firstSnapshot.isPresent()) {
firstSnapshot = OptionalLong.of(snapshotId);
}
snapshotBufferPositions.put(snapshotId, bufferPosition);
}
LOG.debug("Generating snapshot %d (resuming=%b) after %d splits for source: %s (%s)", snapshotId, resuming, bufferPosition, source.getCatalogName(), source.toString());
MarkerSplit split = resuming ? MarkerSplit.resumeSplit(getCatalogName(), snapshotId) : MarkerSplit.snapshotSplit(getCatalogName(), snapshotId);
Split marker = new Split(getCatalogName(), split, lifespan);
SplitBatch batch = new SplitBatch(Collections.singletonList(marker), localLastBatch);
if (localLastBatch) {
sentFinalMarker = true;
deactivate();
}
return batch;
}
use of io.prestosql.metadata.Split in project hetu-core by openlookeng.
the class HttpRemoteTask method addSplits.
@Override
public synchronized void addSplits(Multimap<PlanNodeId, Split> splitsBySource) {
requireNonNull(splitsBySource, "splitsBySource is null");
// only add pending split if not done
if (getTaskStatus().getState().isDone()) {
return;
}
boolean tmpNeedsUpdate = false;
for (Entry<PlanNodeId, Collection<Split>> entry : splitsBySource.asMap().entrySet()) {
PlanNodeId sourceId = entry.getKey();
Collection<Split> splits = entry.getValue();
checkState(!noMoreSplits.containsKey(sourceId), "noMoreSplits has already been set for %s", sourceId);
int added = 0;
for (Split split : splits) {
if (pendingSplits.put(sourceId, new ScheduledSplit(nextSplitId.getAndIncrement(), sourceId, split))) {
added++;
}
}
if (planFragment.isPartitionedSources(sourceId)) {
pendingSourceSplitCount += added;
partitionedSplitCountTracker.setPartitionedSplitCount(getPartitionedSplitCount());
}
tmpNeedsUpdate = true;
}
updateSplitQueueSpace();
if (tmpNeedsUpdate) {
this.needsUpdate.set(true);
scheduleUpdate();
}
}
use of io.prestosql.metadata.Split in project hetu-core by openlookeng.
the class BenchmarkScanFilterAndProjectOperator method benchmarkColumnOriented.
@Benchmark
public List<Page> benchmarkColumnOriented(Context context) {
DriverContext driverContext = context.createTaskContext().addPipelineContext(0, true, true, false).addDriverContext();
SourceOperator operator = (SourceOperator) context.getOperatorFactory().createOperator(driverContext);
ImmutableList.Builder<Page> outputPages = ImmutableList.builder();
operator.addSplit(new Split(new CatalogName("test"), createLocalSplit(), Lifespan.taskWide()));
operator.noMoreSplits();
for (int loops = 0; !operator.isFinished() && loops < 1_000_000; loops++) {
Page outputPage = operator.getOutput();
if (outputPage != null) {
outputPages.add(outputPage);
}
}
return outputPages.build();
}
use of io.prestosql.metadata.Split in project hetu-core by openlookeng.
the class TestIndexCacheRemoval method setupBeforeClass.
@BeforeClass
public void setupBeforeClass() {
PropertyService.setProperty(HetuConstant.FILTER_ENABLED, true);
PropertyService.setProperty(HetuConstant.INDEXSTORE_FILESYSTEM_PROFILE, "local-config-default");
PropertyService.setProperty(HetuConstant.FILTER_CACHE_MAX_MEMORY, (long) (new DataSize(numberOfIndexTypes * 2, KILOBYTE).getValue(KILOBYTE)));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_TTL, new Duration(10, TimeUnit.MINUTES));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_LOADING_DELAY, new Duration(loadDelay, TimeUnit.MILLISECONDS));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_LOADING_THREADS, 2L);
PropertyService.setProperty(HetuConstant.FILTER_CACHE_SOFT_REFERENCE, false);
CatalogName catalogName = new CatalogName(catalog);
connectorSplit = mock(ConnectorSplit.class);
Lifespan lifespan = mock(Lifespan.class);
split = new Split(catalogName, connectorSplit, lifespan);
when(connectorSplit.getFilePath()).thenReturn(testPath);
when(connectorSplit.getLastModifiedTime()).thenReturn(testLastModifiedTime);
}
use of io.prestosql.metadata.Split in project hetu-core by openlookeng.
the class TestSplitFiltering method testGetFilteredSplit.
/**
* This test will not actually filter any splits since the indexes will not be found,
* instead it's just testing the flow. The actual filtering is tested in other classes.
*/
@Test
public void testGetFilteredSplit() {
PropertyService.setProperty(HetuConstant.FILTER_ENABLED, true);
PropertyService.setProperty(HetuConstant.INDEXSTORE_URI, "/tmp/hetu/indices");
PropertyService.setProperty(HetuConstant.INDEXSTORE_FILESYSTEM_PROFILE, "local-config-default");
PropertyService.setProperty(HetuConstant.FILTER_CACHE_TTL, new Duration(10, TimeUnit.MINUTES));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_LOADING_DELAY, new Duration(5000, TimeUnit.MILLISECONDS));
PropertyService.setProperty(HetuConstant.FILTER_CACHE_LOADING_THREADS, 2L);
RowExpression expression = PlanBuilder.comparison(OperatorType.EQUAL, new VariableReferenceExpression("a", VarcharType.VARCHAR), new ConstantExpression(utf8Slice("test_value"), VarcharType.VARCHAR));
SqlStageExecution stage = TestUtil.getTestStage(expression);
List<Split> mockSplits = new ArrayList<>();
MockSplit mock = new MockSplit("hdfs://hacluster/AppData/BIProd/DWD/EVT/bogus_table/000000_0", 0, 10, 0);
MockSplit mock1 = new MockSplit("hdfs://hacluster/AppData/BIProd/DWD/EVT/bogus_table/000000_1", 0, 10, 0);
MockSplit mock2 = new MockSplit("hdfs://hacluster/AppData/BIProd/DWD/EVT/bogus_table/000001_0", 0, 10, 0);
MockSplit mock3 = new MockSplit("hdfs://hacluster/AppData/BIProd/DWD/EVT/bogus_table/000000_4", 0, 10, 0);
mockSplits.add(new Split(new CatalogName("bogus_catalog"), mock, Lifespan.taskWide()));
mockSplits.add(new Split(new CatalogName("bogus_catalog"), mock1, Lifespan.taskWide()));
mockSplits.add(new Split(new CatalogName("bogus_catalog"), mock2, Lifespan.taskWide()));
mockSplits.add(new Split(new CatalogName("bogus_catalog"), mock3, Lifespan.taskWide()));
SplitSource.SplitBatch nextSplits = new SplitSource.SplitBatch(mockSplits, true);
HeuristicIndexerManager indexerManager = new HeuristicIndexerManager(new FileSystemClientManager(), new HetuMetaStoreManager());
Pair<Optional<RowExpression>, Map<Symbol, ColumnHandle>> pair = SplitFiltering.getExpression(stage);
List<Split> filteredSplits = SplitFiltering.getFilteredSplit(pair.getFirst(), SplitFiltering.getFullyQualifiedName(stage), pair.getSecond(), nextSplits, indexerManager);
assertNotNull(filteredSplits);
assertEquals(filteredSplits.size(), 4);
}
Aggregations