use of io.prestosql.spi.type.BigintType.BIGINT in project hetu-core by openlookeng.
the class TestPushAggregationThroughOuterJoin method testDoesNotFireWhenNotDistinct.
@Test
public void testDoesNotFireWhenNotDistinct() {
tester().assertThat(new PushAggregationThroughOuterJoin()).on(p -> p.aggregation(ab -> ab.source(p.join(JoinNode.Type.LEFT, p.values(ImmutableList.of(p.symbol("COL1")), ImmutableList.of(constantExpressions(BIGINT, 10L), constantExpressions(BIGINT, 11L))), p.values(new Symbol("COL2")), ImmutableList.of(new JoinNode.EquiJoinClause(new Symbol("COL1"), new Symbol("COL2"))), ImmutableList.of(new Symbol("COL1"), new Symbol("COL2")), Optional.empty(), Optional.empty(), Optional.empty())).addAggregation(new Symbol("AVG"), PlanBuilder.expression("avg(COL2)"), ImmutableList.of(DOUBLE)).singleGroupingSet(new Symbol("COL1")))).doesNotFire();
// https://github.com/prestodb/presto/issues/10592
tester().assertThat(new PushAggregationThroughOuterJoin()).on(p -> p.aggregation(ab -> ab.source(p.join(JoinNode.Type.LEFT, p.project(Assignments.builder().put(p.symbol("COL1", BIGINT), p.variable("COL1")).build(), p.aggregation(builder -> builder.singleGroupingSet(p.symbol("COL1"), p.symbol("unused")).source(p.values(ImmutableList.of(p.symbol("COL1"), p.symbol("unused")), ImmutableList.of(constantExpressions(BIGINT, 10L, 1L), constantExpressions(BIGINT, 10L, 2L)))))), p.values(p.symbol("COL2")), ImmutableList.of(new JoinNode.EquiJoinClause(p.symbol("COL1"), p.symbol("COL2"))), ImmutableList.of(p.symbol("COL1"), p.symbol("COL2")), Optional.empty(), Optional.empty(), Optional.empty())).addAggregation(p.symbol("AVG", DOUBLE), PlanBuilder.expression("avg(COL2)"), ImmutableList.of(DOUBLE)).singleGroupingSet(p.symbol("COL1")))).doesNotFire();
}
use of io.prestosql.spi.type.BigintType.BIGINT in project hetu-core by openlookeng.
the class OrcPageSourceFactory method createOrcPageSource.
public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
for (HiveColumnHandle column : columns) {
checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
}
checkArgument(!effectivePredicate.isNone());
OrcDataSource orcDataSource;
try {
// Always create a lazy Stream. HDFS stream opened only when required.
FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
}));
orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
} catch (Exception e) {
if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
}
AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
try {
OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
OrcFileTail fileTail;
if (orcCacheProperties.isFileTailCacheEnabled()) {
try {
OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
} catch (UncheckedExecutionException | ExecutionException executionException) {
handleCacheLoadException(executionException);
log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
} else {
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
ImmutableList<String> acidColumnNames = null;
List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
// Only Hive ACID files will begin with bucket_
boolean fileNameContainsBucket = path.getName().contains("bucket");
if (isFullAcid && fileNameContainsBucket) {
// Skip the acid schema check in case of non-ACID files
acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
verifyAcidSchema(reader, path);
Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
if (AcidUtils.isDeleteDelta(path.getParent())) {
// Avoid reading column data from delete_delta files.
// Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
fileColumns = ImmutableList.of();
} else {
fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
}
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
}
Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
if (useOrcColumnNames || isFullAcid) {
verifyFileHasColumnNames(fileColumns, path);
// Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
}
TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
for (HiveColumnHandle column : columns) {
OrcColumn orcColumn = null;
if (useOrcColumnNames || isFullAcid) {
orcColumn = fileColumnsByName.get(column.getName());
} else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
orcColumn = fileColumns.get(column.getHiveColumnIndex());
}
Type readType = typeManager.getType(column.getTypeSignature());
if (orcColumn != null) {
int sourceIndex = fileReadColumns.size();
columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
fileReadColumns.add(orcColumn);
fileReadTypes.add(readType);
Domain domain = effectivePredicateDomains.get(column);
if (domain != null) {
predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
}
} else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
HiveType hiveType = column.getHiveType();
StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
} else {
columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
}
}
Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
boolean eagerload = false;
if (indexes.isPresent()) {
eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
}
return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
} catch (Exception e) {
try {
orcDataSource.close();
} catch (IOException ignored) {
}
if (e instanceof PrestoException) {
throw (PrestoException) e;
}
String message = splitError(e, path, start, length);
if (e instanceof BlockMissingException) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
use of io.prestosql.spi.type.BigintType.BIGINT in project hetu-core by openlookeng.
the class TestBTreeIndex method testLessThan.
@Test
public void testLessThan() throws IOException, IndexLookUpException {
BTreeIndex index = new BTreeIndex();
for (int i = 0; i < 100; i++) {
List<Pair> pairs = new ArrayList<>();
Long key = Long.valueOf(100 + i);
String value = "value" + i;
pairs.add(new Pair(key, value));
Pair pair = new Pair("dummyCol", pairs);
index.addKeyValues(Collections.singletonList(pair));
}
File file = getFile();
index.serialize(new FileOutputStream(file));
BTreeIndex readIndex = new BTreeIndex();
readIndex.deserialize(new FileInputStream(file));
RowExpression comparisonExpression = simplePredicate(OperatorType.LESS_THAN, "dummyCol", BIGINT, 120L);
Iterator<String> result = readIndex.lookUp(comparisonExpression);
assertNotNull(result, "Result shouldn't be null");
assertTrue(result.hasNext());
Object[] arr = IntStream.iterate(0, n -> n + 1).limit(20).mapToObj(i -> "value" + i).toArray();
Arrays.sort(arr);
for (int i = 0; i < 20; i++) {
assertEquals(arr[i], result.next());
}
assertFalse(result.hasNext());
index.close();
}
use of io.prestosql.spi.type.BigintType.BIGINT in project hetu-core by openlookeng.
the class OrcTester method preprocessWriteValueHive.
private static Object preprocessWriteValueHive(Type type, Object value) {
if (value == null) {
return null;
}
if (type.equals(BOOLEAN)) {
return value;
}
if (type.equals(TINYINT)) {
return ((Number) value).byteValue();
}
if (type.equals(SMALLINT)) {
return ((Number) value).shortValue();
}
if (type.equals(INTEGER)) {
return ((Number) value).intValue();
}
if (type.equals(BIGINT)) {
return ((Number) value).longValue();
}
if (type.equals(REAL)) {
return ((Number) value).floatValue();
}
if (type.equals(DOUBLE)) {
return ((Number) value).doubleValue();
}
if (type instanceof VarcharType) {
return value;
}
if (type instanceof CharType) {
return new HiveChar((String) value, ((CharType) type).getLength());
}
if (type.equals(VARBINARY)) {
return ((SqlVarbinary) value).getBytes();
}
if (type.equals(DATE)) {
return Date.ofEpochDay(((SqlDate) value).getDays());
}
if (type.equals(TIMESTAMP)) {
return Timestamp.ofEpochMilli(((SqlTimestamp) value).getMillis());
}
if (type instanceof DecimalType) {
return HiveDecimal.create(((SqlDecimal) value).toBigDecimal());
}
if (type.getTypeSignature().getBase().equals(StandardTypes.ARRAY)) {
Type elementType = type.getTypeParameters().get(0);
return ((List<?>) value).stream().map(element -> preprocessWriteValueHive(elementType, element)).collect(toList());
}
if (type.getTypeSignature().getBase().equals(StandardTypes.MAP)) {
Type keyType = type.getTypeParameters().get(0);
Type valueType = type.getTypeParameters().get(1);
Map<Object, Object> newMap = new HashMap<>();
for (Entry<?, ?> entry : ((Map<?, ?>) value).entrySet()) {
newMap.put(preprocessWriteValueHive(keyType, entry.getKey()), preprocessWriteValueHive(valueType, entry.getValue()));
}
return newMap;
}
if (type.getTypeSignature().getBase().equals(StandardTypes.ROW)) {
List<?> fieldValues = (List<?>) value;
List<Type> fieldTypes = type.getTypeParameters();
List<Object> newStruct = new ArrayList<>();
for (int fieldId = 0; fieldId < fieldValues.size(); fieldId++) {
newStruct.add(preprocessWriteValueHive(fieldTypes.get(fieldId), fieldValues.get(fieldId)));
}
return newStruct;
}
throw new IllegalArgumentException("unsupported type: " + type);
}
use of io.prestosql.spi.type.BigintType.BIGINT in project boostkit-bigdata by kunpengcompute.
the class OrcPageSourceFactory method createOrcPageSource.
public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
for (HiveColumnHandle column : columns) {
checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
}
checkArgument(!effectivePredicate.isNone());
OrcDataSource orcDataSource;
try {
// Always create a lazy Stream. HDFS stream opened only when required.
FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
}));
orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
} catch (Exception e) {
if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
}
AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
try {
OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
OrcFileTail fileTail;
if (orcCacheProperties.isFileTailCacheEnabled()) {
try {
OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
} catch (UncheckedExecutionException | ExecutionException executionException) {
handleCacheLoadException(executionException);
log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
} else {
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
ImmutableList<String> acidColumnNames = null;
List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
// Only Hive ACID files will begin with bucket_
boolean fileNameContainsBucket = path.getName().contains("bucket");
if (isFullAcid && fileNameContainsBucket) {
// Skip the acid schema check in case of non-ACID files
acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
verifyAcidSchema(reader, path);
Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
if (AcidUtils.isDeleteDelta(path.getParent())) {
// Avoid reading column data from delete_delta files.
// Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
fileColumns = ImmutableList.of();
} else {
fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
}
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
}
Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
if (useOrcColumnNames || isFullAcid) {
verifyFileHasColumnNames(fileColumns, path);
// Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
}
TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
for (HiveColumnHandle column : columns) {
OrcColumn orcColumn = null;
if (useOrcColumnNames || isFullAcid) {
orcColumn = fileColumnsByName.get(column.getName());
} else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
orcColumn = fileColumns.get(column.getHiveColumnIndex());
}
Type readType = typeManager.getType(column.getTypeSignature());
if (orcColumn != null) {
int sourceIndex = fileReadColumns.size();
columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
fileReadColumns.add(orcColumn);
fileReadTypes.add(readType);
Domain domain = effectivePredicateDomains.get(column);
if (domain != null) {
predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
}
} else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
HiveType hiveType = column.getHiveType();
StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
} else {
columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
}
}
Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
boolean eagerload = false;
if (indexes.isPresent()) {
eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
}
return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
} catch (Exception e) {
try {
orcDataSource.close();
} catch (IOException ignored) {
}
if (e instanceof PrestoException) {
throw (PrestoException) e;
}
String message = splitError(e, path, start, length);
if (e instanceof BlockMissingException) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
Aggregations