use of io.prestosql.spi.type.IntegerType.INTEGER in project boostkit-bigdata by kunpengcompute.
the class TestMergingPageIterator method testMerging.
@Test
public void testMerging() {
List<Type> types = ImmutableList.of(INTEGER, INTEGER);
List<Integer> sortIndexes = ImmutableList.of(1);
List<SortOrder> sortOrders = ImmutableList.of(SortOrder.ASC_NULLS_FIRST);
List<List<Page>> pageLists = new ArrayList<>();
PageBuilder pageBuilder = new PageBuilder(types);
for (int i = 0; i < 10; i++) {
Iterator<Integer> values = IntStream.range(0, 1000).map(ignored -> ThreadLocalRandom.current().nextInt(100_000)).mapToObj(n -> ((n % 100) == 0) ? null : n).sorted(nullsFirst(naturalOrder())).iterator();
List<Page> pages = new ArrayList<>();
for (int j = 0; j < 10; j++) {
for (int k = 0; k < 100; k++) {
Integer n = values.next();
pageBuilder.declarePosition();
if (n == null) {
pageBuilder.getBlockBuilder(0).appendNull();
pageBuilder.getBlockBuilder(1).appendNull();
} else {
INTEGER.writeLong(pageBuilder.getBlockBuilder(0), n);
INTEGER.writeLong(pageBuilder.getBlockBuilder(1), n * 22L);
}
}
pages.add(pageBuilder.build());
pageBuilder.reset();
}
pageLists.add(pages);
assertFalse(values.hasNext());
}
List<Iterator<Page>> pages = pageLists.stream().map(List::iterator).collect(toList());
Iterator<Page> iterator = new MergingPageIterator(pages, types, sortIndexes, sortOrders);
List<Long> values = new ArrayList<>();
while (iterator.hasNext()) {
Page page = iterator.next();
for (int i = 0; i < page.getPositionCount(); i++) {
if (page.getBlock(0).isNull(i)) {
assertTrue(page.getBlock(1).isNull(i));
values.add(null);
} else {
long x = INTEGER.getLong(page.getBlock(0), i);
long y = INTEGER.getLong(page.getBlock(1), i);
assertEquals(y, x * 22);
values.add(x);
}
}
}
assertThat(values).isSortedAccordingTo(nullsFirst(naturalOrder()));
}
use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.
the class HiveFileFormatBenchmark method createTpchDataSet.
private static <E extends TpchEntity> TestData createTpchDataSet(FileFormat format, TpchTable<E> tpchTable, List<TpchColumn<E>> columns) {
List<String> columnNames = columns.stream().map(TpchColumn::getColumnName).collect(toList());
List<Type> columnTypes = columns.stream().map(HiveFileFormatBenchmark::getColumnType).map(type -> format.supportsDate() || !DATE.equals(type) ? type : createUnboundedVarcharType()).collect(toList());
PageBuilder pageBuilder = new PageBuilder(columnTypes);
ImmutableList.Builder<Page> pages = ImmutableList.builder();
long dataSize = 0;
for (E row : tpchTable.createGenerator(10, 1, 1)) {
pageBuilder.declarePosition();
for (int i = 0; i < columns.size(); i++) {
TpchColumn<E> column = columns.get(i);
BlockBuilder blockBuilder = pageBuilder.getBlockBuilder(i);
switch(column.getType().getBase()) {
case IDENTIFIER:
BIGINT.writeLong(blockBuilder, column.getIdentifier(row));
break;
case INTEGER:
INTEGER.writeLong(blockBuilder, column.getInteger(row));
break;
case DATE:
if (format.supportsDate()) {
DATE.writeLong(blockBuilder, column.getDate(row));
} else {
createUnboundedVarcharType().writeString(blockBuilder, column.getString(row));
}
break;
case DOUBLE:
DOUBLE.writeDouble(blockBuilder, column.getDouble(row));
break;
case VARCHAR:
createUnboundedVarcharType().writeSlice(blockBuilder, Slices.utf8Slice(column.getString(row)));
break;
default:
throw new IllegalArgumentException("Unsupported type " + column.getType());
}
}
if (pageBuilder.isFull()) {
Page page = pageBuilder.build();
pages.add(page);
pageBuilder.reset();
dataSize += page.getSizeInBytes();
if (dataSize >= MIN_DATA_SIZE) {
break;
}
}
}
return new TestData(columnNames, columnTypes, pages.build());
}
use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.
the class ExtractSpatialJoins method tryCreateSpatialJoin.
private static Result tryCreateSpatialJoin(Context context, JoinNode joinNode, RowExpression filter, PlanNodeId nodeId, List<Symbol> outputSymbols, CallExpression spatialFunction, Optional<RowExpression> radius, Metadata metadata, SplitManager splitManager, PageSourceManager pageSourceManager, TypeAnalyzer typeAnalyzer) {
// TODO Add support for distributed left spatial joins
Optional<String> spatialPartitioningTableName = joinNode.getType() == INNER ? getSpatialPartitioningTableName(context.getSession()) : Optional.empty();
Optional<KdbTree> kdbTree = spatialPartitioningTableName.map(tableName -> loadKdbTree(tableName, context.getSession(), metadata, splitManager, pageSourceManager, nodeId));
List<RowExpression> arguments = spatialFunction.getArguments();
verify(arguments.size() == 2);
RowExpression firstArgument = arguments.get(0);
RowExpression secondArgument = arguments.get(1);
Type sphericalGeographyType = metadata.getType(SPHERICAL_GEOGRAPHY_TYPE_SIGNATURE);
if (firstArgument.getType().equals(sphericalGeographyType) || secondArgument.getType().equals(sphericalGeographyType)) {
if (joinNode.getType() != INNER) {
return Result.empty();
}
}
Set<Symbol> firstSymbols = extractUnique(firstArgument);
Set<Symbol> secondSymbols = extractUnique(secondArgument);
if (firstSymbols.isEmpty() || secondSymbols.isEmpty()) {
return Result.empty();
}
Optional<Symbol> newFirstSymbol = newGeometrySymbol(context, firstArgument);
Optional<Symbol> newSecondSymbol = newGeometrySymbol(context, secondArgument);
PlanNode leftNode = joinNode.getLeft();
PlanNode rightNode = joinNode.getRight();
PlanNode newLeftNode;
PlanNode newRightNode;
// Check if the order of arguments of the spatial function matches the order of join sides
int alignment = checkAlignment(joinNode, firstSymbols, secondSymbols);
if (alignment > 0) {
newLeftNode = newFirstSymbol.map(symbol -> addProjection(context, leftNode, symbol, firstArgument)).orElse(leftNode);
newRightNode = newSecondSymbol.map(symbol -> addProjection(context, rightNode, symbol, secondArgument)).orElse(rightNode);
} else if (alignment < 0) {
newLeftNode = newSecondSymbol.map(symbol -> addProjection(context, leftNode, symbol, secondArgument)).orElse(leftNode);
newRightNode = newFirstSymbol.map(symbol -> addProjection(context, rightNode, symbol, firstArgument)).orElse(rightNode);
} else {
return Result.empty();
}
RowExpression newFirstArgument = mapToExpression(newFirstSymbol, firstArgument, context);
RowExpression newSecondArgument = mapToExpression(newSecondSymbol, secondArgument, context);
Optional<Symbol> leftPartitionSymbol = Optional.empty();
Optional<Symbol> rightPartitionSymbol = Optional.empty();
if (kdbTree.isPresent()) {
leftPartitionSymbol = Optional.of(context.getSymbolAllocator().newSymbol("pid", INTEGER));
rightPartitionSymbol = Optional.of(context.getSymbolAllocator().newSymbol("pid", INTEGER));
if (alignment > 0) {
newLeftNode = addPartitioningNodes(metadata, context, newLeftNode, leftPartitionSymbol.get(), kdbTree.get(), newFirstArgument, Optional.empty());
newRightNode = addPartitioningNodes(metadata, context, newRightNode, rightPartitionSymbol.get(), kdbTree.get(), newSecondArgument, radius);
} else {
newLeftNode = addPartitioningNodes(metadata, context, newLeftNode, leftPartitionSymbol.get(), kdbTree.get(), newSecondArgument, Optional.empty());
newRightNode = addPartitioningNodes(metadata, context, newRightNode, rightPartitionSymbol.get(), kdbTree.get(), newFirstArgument, radius);
}
}
CallExpression newSpatialFunction = new CallExpression(spatialFunction.getDisplayName(), spatialFunction.getFunctionHandle(), spatialFunction.getType(), ImmutableList.of(newFirstArgument, newSecondArgument), Optional.empty());
RowExpression newFilter = replaceExpression(filter, ImmutableMap.of(spatialFunction, newSpatialFunction));
return Result.ofPlanNode(new SpatialJoinNode(nodeId, SpatialJoinNode.Type.fromJoinNodeType(joinNode.getType()), newLeftNode, newRightNode, outputSymbols, newFilter, leftPartitionSymbol, rightPartitionSymbol, kdbTree.map(KdbTreeUtils::toJson)));
}
use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.
the class TestSignatureBinder method testFunction.
@Test
public void testFunction() {
Signature simple = functionSignature().returnType(parseTypeSignature("boolean")).argumentTypes(parseTypeSignature("function(integer,integer)")).build();
assertThat(simple).boundTo("integer").fails();
assertThat(simple).boundTo("function(integer,integer)").succeeds();
// TODO: Support coercion of return type of lambda
assertThat(simple).boundTo("function(integer,smallint)").withCoercion().fails();
assertThat(simple).boundTo("function(integer,bigint)").withCoercion().fails();
Signature applyTwice = functionSignature().returnType(parseTypeSignature("V")).argumentTypes(parseTypeSignature("T"), parseTypeSignature("function(T,U)"), parseTypeSignature("function(U,V)")).typeVariableConstraints(typeVariable("T"), typeVariable("U"), typeVariable("V")).build();
assertThat(applyTwice).boundTo("integer", "integer", "integer").fails();
assertThat(applyTwice).boundTo("integer", "function(integer,varchar)", "function(varchar,double)").produces(BoundVariables.builder().setTypeVariable("T", INTEGER).setTypeVariable("U", VARCHAR).setTypeVariable("V", DOUBLE).build());
assertThat(applyTwice).boundTo("integer", new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(varchar,double)"))).produces(BoundVariables.builder().setTypeVariable("T", INTEGER).setTypeVariable("U", VARCHAR).setTypeVariable("V", DOUBLE).build());
assertThat(applyTwice).boundTo(// pass function argument to non-function position of a function
new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(varchar,double)"))).fails();
assertThat(applyTwice).boundTo(new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), // pass non-function argument to function position of a function
"integer", new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(varchar,double)"))).fails();
Signature flatMap = functionSignature().returnType(parseTypeSignature("array(T)")).argumentTypes(parseTypeSignature("array(T)"), parseTypeSignature("function(T, array(T))")).typeVariableConstraints(typeVariable("T")).build();
assertThat(flatMap).boundTo("array(integer)", "function(integer, array(integer))").produces(BoundVariables.builder().setTypeVariable("T", INTEGER).build());
Signature varargApply = functionSignature().returnType(parseTypeSignature("T")).argumentTypes(parseTypeSignature("T"), parseTypeSignature("function(T, T)")).typeVariableConstraints(typeVariable("T")).setVariableArity(true).build();
assertThat(varargApply).boundTo("integer", "function(integer, integer)", "function(integer, integer)", "function(integer, integer)").produces(BoundVariables.builder().setTypeVariable("T", INTEGER).build());
assertThat(varargApply).boundTo("integer", "function(integer, integer)", "function(integer, double)", "function(double, double)").fails();
Signature loop = functionSignature().returnType(parseTypeSignature("T")).argumentTypes(parseTypeSignature("T"), parseTypeSignature("function(T, T)")).typeVariableConstraints(typeVariable("T")).build();
assertThat(loop).boundTo("integer", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, BIGINT).getTypeSignature())).fails();
assertThat(loop).boundTo("integer", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, BIGINT).getTypeSignature())).withCoercion().produces(BoundVariables.builder().setTypeVariable("T", BIGINT).build());
// TODO: Support coercion of return type of lambda
assertThat(loop).withCoercion().boundTo("integer", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, SMALLINT).getTypeSignature())).fails();
// TODO: Support coercion of return type of lambda
// Without coercion support for return type of lambda, the return type of lambda must be `varchar(x)` to avoid need for coercions.
Signature varcharApply = functionSignature().returnType(parseTypeSignature("varchar")).argumentTypes(parseTypeSignature("varchar"), parseTypeSignature("function(varchar, varchar(x))", ImmutableSet.of("x"))).build();
assertThat(varcharApply).withCoercion().boundTo("varchar(10)", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, createVarcharType(1)).getTypeSignature())).succeeds();
Signature sortByKey = functionSignature().returnType(parseTypeSignature("array(T)")).argumentTypes(parseTypeSignature("array(T)"), parseTypeSignature("function(T,E)")).typeVariableConstraints(typeVariable("T"), orderableTypeParameter("E")).build();
assertThat(sortByKey).boundTo("array(integer)", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, VARCHAR).getTypeSignature())).produces(BoundVariables.builder().setTypeVariable("T", INTEGER).setTypeVariable("E", VARCHAR).build());
}
use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.
the class OrcPageSourceFactory method createOrcPageSource.
public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
for (HiveColumnHandle column : columns) {
checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
}
checkArgument(!effectivePredicate.isNone());
OrcDataSource orcDataSource;
try {
// Always create a lazy Stream. HDFS stream opened only when required.
FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
}));
orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
} catch (Exception e) {
if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
}
AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
try {
OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
OrcFileTail fileTail;
if (orcCacheProperties.isFileTailCacheEnabled()) {
try {
OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
} catch (UncheckedExecutionException | ExecutionException executionException) {
handleCacheLoadException(executionException);
log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
} else {
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
ImmutableList<String> acidColumnNames = null;
List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
// Only Hive ACID files will begin with bucket_
boolean fileNameContainsBucket = path.getName().contains("bucket");
if (isFullAcid && fileNameContainsBucket) {
// Skip the acid schema check in case of non-ACID files
acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
verifyAcidSchema(reader, path);
Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
if (AcidUtils.isDeleteDelta(path.getParent())) {
// Avoid reading column data from delete_delta files.
// Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
fileColumns = ImmutableList.of();
} else {
fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
}
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
}
Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
if (useOrcColumnNames || isFullAcid) {
verifyFileHasColumnNames(fileColumns, path);
// Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
}
TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
for (HiveColumnHandle column : columns) {
OrcColumn orcColumn = null;
if (useOrcColumnNames || isFullAcid) {
orcColumn = fileColumnsByName.get(column.getName());
} else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
orcColumn = fileColumns.get(column.getHiveColumnIndex());
}
Type readType = typeManager.getType(column.getTypeSignature());
if (orcColumn != null) {
int sourceIndex = fileReadColumns.size();
columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
fileReadColumns.add(orcColumn);
fileReadTypes.add(readType);
Domain domain = effectivePredicateDomains.get(column);
if (domain != null) {
predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
}
} else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
HiveType hiveType = column.getHiveType();
StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
} else {
columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
}
}
Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
boolean eagerload = false;
if (indexes.isPresent()) {
eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
}
return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
} catch (Exception e) {
try {
orcDataSource.close();
} catch (IOException ignored) {
}
if (e instanceof PrestoException) {
throw (PrestoException) e;
}
String message = splitError(e, path, start, length);
if (e instanceof BlockMissingException) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
Aggregations