use of io.trino.spi.connector.RecordCursor in project trino by trinodb.
the class HivePageSourceProvider method createHivePageSource.
public static Optional<ConnectorPageSource> createHivePageSource(Set<HivePageSourceFactory> pageSourceFactories, Set<HiveRecordCursorProvider> cursorProviders, Configuration configuration, ConnectorSession session, Path path, OptionalInt bucketNumber, long start, long length, long estimatedFileSize, Properties schema, TupleDomain<HiveColumnHandle> effectivePredicate, List<HiveColumnHandle> columns, TypeManager typeManager, Optional<BucketConversion> bucketConversion, Optional<BucketValidation> bucketValidation, boolean s3SelectPushdownEnabled, Optional<AcidInfo> acidInfo, boolean originalFile, AcidTransaction transaction, List<ColumnMapping> columnMappings) {
if (effectivePredicate.isNone()) {
return Optional.of(new EmptyPageSource());
}
List<ColumnMapping> regularAndInterimColumnMappings = ColumnMapping.extractRegularAndInterimColumnMappings(columnMappings);
Optional<BucketAdaptation> bucketAdaptation = createBucketAdaptation(bucketConversion, bucketNumber, regularAndInterimColumnMappings);
Optional<BucketValidator> bucketValidator = createBucketValidator(path, bucketValidation, bucketNumber, regularAndInterimColumnMappings);
for (HivePageSourceFactory pageSourceFactory : pageSourceFactories) {
List<HiveColumnHandle> desiredColumns = toColumnHandles(regularAndInterimColumnMappings, true, typeManager);
Optional<ReaderPageSource> readerWithProjections = pageSourceFactory.createPageSource(configuration, session, path, start, length, estimatedFileSize, schema, desiredColumns, effectivePredicate, acidInfo, bucketNumber, originalFile, transaction);
if (readerWithProjections.isPresent()) {
ConnectorPageSource pageSource = readerWithProjections.get().get();
Optional<ReaderColumns> readerProjections = readerWithProjections.get().getReaderColumns();
Optional<ReaderProjectionsAdapter> adapter = Optional.empty();
if (readerProjections.isPresent()) {
adapter = Optional.of(hiveProjectionsAdapter(desiredColumns, readerProjections.get()));
}
return Optional.of(new HivePageSource(columnMappings, bucketAdaptation, bucketValidator, adapter, typeManager, pageSource));
}
}
for (HiveRecordCursorProvider provider : cursorProviders) {
// GenericHiveRecordCursor will automatically do the coercion without HiveCoercionRecordCursor
boolean doCoercion = !(provider instanceof GenericHiveRecordCursorProvider);
List<HiveColumnHandle> desiredColumns = toColumnHandles(regularAndInterimColumnMappings, doCoercion, typeManager);
Optional<ReaderRecordCursorWithProjections> readerWithProjections = provider.createRecordCursor(configuration, session, path, start, length, estimatedFileSize, schema, desiredColumns, effectivePredicate, typeManager, s3SelectPushdownEnabled);
if (readerWithProjections.isPresent()) {
RecordCursor delegate = readerWithProjections.get().getRecordCursor();
Optional<ReaderColumns> projections = readerWithProjections.get().getProjectedReaderColumns();
if (projections.isPresent()) {
ReaderProjectionsAdapter projectionsAdapter = hiveProjectionsAdapter(desiredColumns, projections.get());
delegate = new HiveReaderProjectionsAdaptingRecordCursor(delegate, projectionsAdapter);
}
checkArgument(acidInfo.isEmpty(), "Acid is not supported");
if (bucketAdaptation.isPresent()) {
delegate = new HiveBucketAdapterRecordCursor(bucketAdaptation.get().getBucketColumnIndices(), bucketAdaptation.get().getBucketColumnHiveTypes(), bucketAdaptation.get().getBucketingVersion(), bucketAdaptation.get().getTableBucketCount(), bucketAdaptation.get().getPartitionBucketCount(), bucketAdaptation.get().getBucketToKeep(), typeManager, delegate);
}
// Need to wrap RcText and RcBinary into a wrapper, which will do the coercion for mismatch columns
if (doCoercion) {
delegate = new HiveCoercionRecordCursor(regularAndInterimColumnMappings, typeManager, delegate);
}
// bucket adaptation already validates that data is in the right bucket
if (bucketAdaptation.isEmpty() && bucketValidator.isPresent()) {
delegate = bucketValidator.get().wrapRecordCursor(delegate, typeManager);
}
HiveRecordCursor hiveRecordCursor = new HiveRecordCursor(columnMappings, delegate);
List<Type> columnTypes = columns.stream().map(HiveColumnHandle::getType).collect(toList());
return Optional.of(new RecordPageSource(columnTypes, hiveRecordCursor));
}
}
return Optional.empty();
}
use of io.trino.spi.connector.RecordCursor in project trino by trinodb.
the class TestShardMetadataRecordCursor method testSimple.
@Test
public void testSimple() {
ShardManager shardManager = createShardManager(dbi);
// Add shards to the table
long tableId = 1;
OptionalInt bucketNumber = OptionalInt.empty();
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
UUID uuid3 = UUID.randomUUID();
ShardInfo shardInfo1 = new ShardInfo(uuid1, bucketNumber, ImmutableSet.of("node1"), ImmutableList.of(), 1, 10, 100, 0x1234);
ShardInfo shardInfo2 = new ShardInfo(uuid2, bucketNumber, ImmutableSet.of("node2"), ImmutableList.of(), 2, 20, 200, 0xCAFEBABEDEADBEEFL);
ShardInfo shardInfo3 = new ShardInfo(uuid3, bucketNumber, ImmutableSet.of("node3"), ImmutableList.of(), 3, 30, 300, 0xFEDCBA0987654321L);
List<ShardInfo> shards = ImmutableList.of(shardInfo1, shardInfo2, shardInfo3);
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, ImmutableList.of(new ColumnInfo(1, BIGINT), new ColumnInfo(2, DATE)), shards, Optional.empty(), 0);
Slice schema = utf8Slice(DEFAULT_TEST_ORDERS.getSchemaName());
Slice table = utf8Slice(DEFAULT_TEST_ORDERS.getTableName());
DateTime date1 = DateTime.parse("2015-01-01T00:00");
DateTime date2 = DateTime.parse("2015-01-02T00:00");
TupleDomain<Integer> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.<Integer, Domain>builder().put(0, Domain.singleValue(createVarcharType(10), schema)).put(1, Domain.create(ValueSet.ofRanges(lessThanOrEqual(createVarcharType(10), table)), true)).put(8, Domain.create(ValueSet.ofRanges(lessThanOrEqual(BIGINT, date1.getMillis()), greaterThan(BIGINT, date2.getMillis())), true)).put(9, Domain.create(ValueSet.ofRanges(lessThanOrEqual(BIGINT, date1.getMillis()), greaterThan(BIGINT, date2.getMillis())), true)).buildOrThrow());
List<MaterializedRow> actual;
try (RecordCursor cursor = new ShardMetadataSystemTable(dbi).cursor(null, SESSION, tupleDomain)) {
actual = getMaterializedResults(cursor, SHARD_METADATA.getColumns());
}
assertEquals(actual.size(), 3);
List<MaterializedRow> expected = ImmutableList.of(new MaterializedRow(DEFAULT_PRECISION, schema, table, utf8Slice(uuid1.toString()), null, 100L, 10L, 1L, utf8Slice("0000000000001234"), null, null, null, null), new MaterializedRow(DEFAULT_PRECISION, schema, table, utf8Slice(uuid2.toString()), null, 200L, 20L, 2L, utf8Slice("cafebabedeadbeef"), null, null, null, null), new MaterializedRow(DEFAULT_PRECISION, schema, table, utf8Slice(uuid3.toString()), null, 300L, 30L, 3L, utf8Slice("fedcba0987654321"), null, null, null, null));
assertEquals(actual, expected);
}
use of io.trino.spi.connector.RecordCursor in project trino by trinodb.
the class TrinoThriftBlock method convertColumnToBlock.
private static Block convertColumnToBlock(RecordSet recordSet, int columnIndex, int positions) {
Type type = recordSet.getColumnTypes().get(columnIndex);
BlockBuilder output = type.createBlockBuilder(null, positions);
Class<?> javaType = type.getJavaType();
RecordCursor cursor = recordSet.cursor();
for (int position = 0; position < positions; position++) {
checkState(cursor.advanceNextPosition(), "cursor has less values than expected");
if (cursor.isNull(columnIndex)) {
output.appendNull();
} else {
if (javaType == boolean.class) {
type.writeBoolean(output, cursor.getBoolean(columnIndex));
} else if (javaType == long.class) {
type.writeLong(output, cursor.getLong(columnIndex));
} else if (javaType == double.class) {
type.writeDouble(output, cursor.getDouble(columnIndex));
} else if (javaType == Slice.class) {
Slice slice = cursor.getSlice(columnIndex);
type.writeSlice(output, slice, 0, slice.length());
} else {
type.writeObject(output, cursor.getObject(columnIndex));
}
}
}
checkState(!cursor.advanceNextPosition(), "cursor has more values than expected");
return output.build();
}
use of io.trino.spi.connector.RecordCursor in project trino by trinodb.
the class TrinoThriftTypeUtils method fromLongBasedColumn.
public static TrinoThriftBlock fromLongBasedColumn(RecordSet recordSet, int columnIndex, int positions, BiFunction<boolean[], long[], TrinoThriftBlock> result) {
if (positions == 0) {
return result.apply(null, null);
}
boolean[] nulls = null;
long[] longs = null;
RecordCursor cursor = recordSet.cursor();
for (int position = 0; position < positions; position++) {
checkState(cursor.advanceNextPosition(), "cursor has less values than expected");
if (cursor.isNull(columnIndex)) {
if (nulls == null) {
nulls = new boolean[positions];
}
nulls[position] = true;
} else {
if (longs == null) {
longs = new long[positions];
}
longs[position] = cursor.getLong(columnIndex);
}
}
checkState(!cursor.advanceNextPosition(), "cursor has more values than expected");
return result.apply(nulls, longs);
}
use of io.trino.spi.connector.RecordCursor in project trino by trinodb.
the class TpchIndexedData method indexTable.
private static IndexedTable indexTable(RecordSet recordSet, List<String> outputColumns, List<String> keyColumns) {
List<Integer> keyPositions = keyColumns.stream().map(columnName -> {
int position = outputColumns.indexOf(columnName);
checkState(position != -1);
return position;
}).collect(toImmutableList());
ImmutableListMultimap.Builder<MaterializedTuple, MaterializedTuple> indexedValuesBuilder = ImmutableListMultimap.builder();
List<Type> outputTypes = recordSet.getColumnTypes();
List<Type> keyTypes = extractPositionValues(outputTypes, keyPositions);
RecordCursor cursor = recordSet.cursor();
while (cursor.advanceNextPosition()) {
List<Object> values = extractValues(cursor, outputTypes);
List<Object> keyValues = extractPositionValues(values, keyPositions);
indexedValuesBuilder.put(new MaterializedTuple(keyValues), new MaterializedTuple(values));
}
return new IndexedTable(keyColumns, keyTypes, outputColumns, outputTypes, indexedValuesBuilder.build());
}
Aggregations