use of io.trino.plugin.thrift.api.TrinoThriftSplitBatch in project trino by trinodb.
the class ThriftTpchService method getSplitsSync.
private static TrinoThriftSplitBatch getSplitsSync(TrinoThriftSchemaTableName schemaTableName, int maxSplitCount, TrinoThriftNullableToken nextToken) {
int totalParts = DEFAULT_NUMBER_OF_SPLITS;
// last sent part
int partNumber = nextToken.getToken() == null ? 0 : Ints.fromByteArray(nextToken.getToken().getId());
int numberOfSplits = min(maxSplitCount, totalParts - partNumber);
List<TrinoThriftSplit> splits = new ArrayList<>(numberOfSplits);
for (int i = 0; i < numberOfSplits; i++) {
SplitInfo splitInfo = normalSplit(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partNumber + 1, totalParts);
splits.add(new TrinoThriftSplit(new TrinoThriftId(SPLIT_INFO_CODEC.toJsonBytes(splitInfo)), ImmutableList.of()));
partNumber++;
}
TrinoThriftId newNextToken = partNumber < totalParts ? new TrinoThriftId(Ints.toByteArray(partNumber)) : null;
return new TrinoThriftSplitBatch(splits, newNextToken);
}
use of io.trino.plugin.thrift.api.TrinoThriftSplitBatch in project trino by trinodb.
the class ThriftIndexPageSource method loadAllSplits.
private boolean loadAllSplits() {
if (haveSplits) {
return true;
}
// check if request for splits was sent
if (splitFuture == null) {
// didn't start fetching splits, send the first request now
splitFuture = sendSplitRequest(null);
statusFuture = toCompletableFuture(nonCancellationPropagating(splitFuture));
}
if (!splitFuture.isDone()) {
// split request is in progress
return false;
}
// split request is ready
TrinoThriftSplitBatch batch = getFutureValue(splitFuture);
splits.addAll(batch.getSplits());
// check if it's possible to request more splits
if (batch.getNextToken() != null) {
// can get more splits, send request
splitFuture = sendSplitRequest(batch.getNextToken());
statusFuture = toCompletableFuture(nonCancellationPropagating(splitFuture));
return false;
} else {
// no more splits
splitFuture = null;
statusFuture = null;
haveSplits = true;
return true;
}
}
use of io.trino.plugin.thrift.api.TrinoThriftSplitBatch in project trino by trinodb.
the class ThriftIndexPageSource method sendSplitRequest.
private ListenableFuture<TrinoThriftSplitBatch> sendSplitRequest(@Nullable TrinoThriftId nextToken) {
long start = System.nanoTime();
ListenableFuture<TrinoThriftSplitBatch> future = client.get(thriftHeaders).getIndexSplits(schemaTableName, lookupColumnNames, outputColumnNames, keys, outputConstraint, MAX_SPLIT_COUNT, new TrinoThriftNullableToken(nextToken));
future = catchingThriftException(future);
future.addListener(() -> readTimeNanos.addAndGet(System.nanoTime() - start), directExecutor());
return future;
}
use of io.trino.plugin.thrift.api.TrinoThriftSplitBatch in project trino by trinodb.
the class ThriftIndexedTpchService method getIndexSplitsSync.
@Override
protected TrinoThriftSplitBatch getIndexSplitsSync(TrinoThriftSchemaTableName schemaTableName, List<String> indexColumnNames, TrinoThriftPageResult keys, int maxSplitCount, TrinoThriftNullableToken nextToken) throws TrinoThriftServiceException {
checkArgument(NUMBER_OF_INDEX_SPLITS <= maxSplitCount, "maxSplitCount for lookup splits is too low");
checkArgument(nextToken.getToken() == null, "no continuation is supported for lookup splits");
int totalKeys = keys.getRowCount();
int partSize = totalKeys / NUMBER_OF_INDEX_SPLITS;
List<TrinoThriftSplit> splits = new ArrayList<>(NUMBER_OF_INDEX_SPLITS);
for (int splitIndex = 0; splitIndex < NUMBER_OF_INDEX_SPLITS; splitIndex++) {
int begin = partSize * splitIndex;
int end = partSize * (splitIndex + 1);
if (splitIndex + 1 == NUMBER_OF_INDEX_SPLITS) {
// add remainder to the last split
end = totalKeys;
}
if (begin == end) {
// split is empty, skip it
continue;
}
SplitInfo splitInfo = indexSplit(schemaTableName.getSchemaName(), schemaTableName.getTableName(), indexColumnNames, thriftPageToList(keys, begin, end));
splits.add(new TrinoThriftSplit(new TrinoThriftId(SPLIT_INFO_CODEC.toJsonBytes(splitInfo)), ImmutableList.of()));
}
return new TrinoThriftSplitBatch(splits, null);
}
Aggregations