use of com.huawei.boostkit.omnidata.model.Predicate in project boostkit-bigdata by kunpengcompute.
the class OrcPageSourceFactory method createOrcPageSource.
public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
for (HiveColumnHandle column : columns) {
checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
}
checkArgument(!effectivePredicate.isNone());
OrcDataSource orcDataSource;
try {
// Always create a lazy Stream. HDFS stream opened only when required.
FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
}));
orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
} catch (Exception e) {
if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
}
AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
try {
OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
OrcFileTail fileTail;
if (orcCacheProperties.isFileTailCacheEnabled()) {
try {
OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
} catch (UncheckedExecutionException | ExecutionException executionException) {
handleCacheLoadException(executionException);
log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
} else {
fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
}
OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
ImmutableList<String> acidColumnNames = null;
List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
// Only Hive ACID files will begin with bucket_
boolean fileNameContainsBucket = path.getName().contains("bucket");
if (isFullAcid && fileNameContainsBucket) {
// Skip the acid schema check in case of non-ACID files
acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
verifyAcidSchema(reader, path);
Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
if (AcidUtils.isDeleteDelta(path.getParent())) {
// Avoid reading column data from delete_delta files.
// Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
fileColumns = ImmutableList.of();
} else {
fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
}
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
fileReadTypes.add(BIGINT);
fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
fileReadTypes.add(INTEGER);
}
Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
if (useOrcColumnNames || isFullAcid) {
verifyFileHasColumnNames(fileColumns, path);
// Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
}
TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
for (HiveColumnHandle column : columns) {
OrcColumn orcColumn = null;
if (useOrcColumnNames || isFullAcid) {
orcColumn = fileColumnsByName.get(column.getName());
} else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
orcColumn = fileColumns.get(column.getHiveColumnIndex());
}
Type readType = typeManager.getType(column.getTypeSignature());
if (orcColumn != null) {
int sourceIndex = fileReadColumns.size();
columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
fileReadColumns.add(orcColumn);
fileReadTypes.add(readType);
Domain domain = effectivePredicateDomains.get(column);
if (domain != null) {
predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
}
} else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
HiveType hiveType = column.getHiveType();
StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
} else {
columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
}
}
Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
boolean eagerload = false;
if (indexes.isPresent()) {
eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
}
return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
} catch (Exception e) {
try {
orcDataSource.close();
} catch (IOException ignored) {
}
if (e instanceof PrestoException) {
throw (PrestoException) e;
}
String message = splitError(e, path, start, length);
if (e instanceof BlockMissingException) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
use of com.huawei.boostkit.omnidata.model.Predicate in project boostkit-bigdata by kunpengcompute.
the class DataIoAdapter method getPageIterator.
/**
* Contact with Omni-Data-Server
* @param pageCandidate file split info
* @param sparkOutPut data schema
* @param partitionColumn partition column
* @param filterOutPut filter schema
* @param pushDownOperators push down expressions
* @return WritableColumnVector data result info
* @throws TaskExecutionException connect to omni-data-server failed exception
* @notice 3rd parties api throws Exception, function has to catch basic Exception
*/
public Iterator<WritableColumnVector[]> getPageIterator(PageCandidate pageCandidate, Seq<Attribute> sparkOutPut, Seq<Attribute> partitionColumn, Seq<Attribute> filterOutPut, PushDownInfo pushDownOperators) throws TaskExecutionException, UnknownHostException {
// initCandidates
initCandidates(pageCandidate, filterOutPut);
// create AggregationInfo
// init agg candidates
List<Attribute> partitionColumnBatch = JavaConverters.seqAsJavaList(partitionColumn);
for (Attribute attribute : partitionColumnBatch) {
partitionColumnName.add(attribute.name());
}
List<AggExeInfo> aggExecutionList = JavaConverters.seqAsJavaList(pushDownOperators.aggExecutions());
if (aggExecutionList.size() == 0) {
initColumnInfo(sparkOutPut);
}
DataSource dataSource = initDataSource(pageCandidate);
RowExpression rowExpression = initFilter(pushDownOperators.filterExecutions());
Optional<RowExpression> prestoFilter = rowExpression == null ? Optional.empty() : Optional.of(rowExpression);
Optional<AggregationInfo> aggregations = initAggAndGroupInfo(aggExecutionList);
// create limitLong
OptionalLong limitLong = NdpUtils.convertLimitExeInfo(pushDownOperators.limitExecution());
Predicate predicate = new Predicate(omnidataTypes, omnidataColumns, prestoFilter, omnidataProjections, ImmutableMap.of(), ImmutableMap.of(), aggregations, limitLong);
TaskSource taskSource = new TaskSource(dataSource, predicate, 1048576);
SparkDeserializer deserializer = initSparkDeserializer();
WritableColumnVector[] page = null;
int failedTimes = 0;
String[] sdiHostArray = pageCandidate.getSdiHosts().split(",");
int randomIndex = (int) (Math.random() * sdiHostArray.length);
Iterator<String> sdiHosts = Arrays.stream(sdiHostArray).iterator();
Set<String> sdiHostSet = new HashSet<>();
sdiHostSet.add(sdiHostArray[randomIndex]);
while (sdiHosts.hasNext()) {
String sdiHost;
if (failedTimes == 0) {
sdiHost = sdiHostArray[randomIndex];
} else {
sdiHost = sdiHosts.next();
if (sdiHostSet.contains(sdiHost)) {
continue;
}
}
String ipAddress = InetAddress.getByName(sdiHost).getHostAddress();
Properties properties = new Properties();
properties.put("omnidata.client.target.list", ipAddress);
LOG.info("Push down node info: [hostname :{} ,ip :{}]", sdiHost, ipAddress);
try {
orcDataReader = new DataReaderImpl<SparkDeserializer>(properties, taskSource, deserializer);
hasNextPage = true;
page = (WritableColumnVector[]) orcDataReader.getNextPageBlocking();
if (orcDataReader.isFinished()) {
orcDataReader.close();
hasNextPage = false;
}
break;
} catch (OmniDataException omniDataException) {
OmniErrorCode errorCode = omniDataException.getErrorCode();
switch(errorCode) {
case OMNIDATA_INSUFFICIENT_RESOURCES:
LOG.warn("OMNIDATA_INSUFFICIENT_RESOURCES: " + "OmniData-server's push down queue is full, " + "begin to find next OmniData-server");
break;
case OMNIDATA_UNSUPPORTED_OPERATOR:
LOG.warn("OMNIDATA_UNSUPPORTED_OPERATOR: " + "OmniDataException: exist unsupported operator");
break;
case OMNIDATA_GENERIC_ERROR:
LOG.warn("OMNIDATA_GENERIC_ERROR: Current OmniData-server unavailable, " + "begin to find next OmniData-server");
break;
case OMNIDATA_NOT_FOUND:
LOG.warn("OMNIDATA_NOT_FOUND: Current OmniData-Server not found, " + "begin to find next OmniData-server");
break;
case OMNIDATA_INVALID_ARGUMENT:
LOG.warn("OMNIDATA_INVALID_ARGUMENT: INVALID_ARGUMENT, " + "exist unsupported operator or dataType");
break;
case OMNIDATA_IO_ERROR:
LOG.warn("OMNIDATA_IO_ERROR: Current OmniData-Server io exception, " + "begin to find next OmniData-server");
break;
default:
LOG.warn("OmniDataException: OMNIDATA_ERROR.");
}
LOG.warn("Push down failed node info [hostname :{} ,ip :{}]", sdiHost, ipAddress);
++failedTimes;
} catch (Exception e) {
LOG.warn("Push down failed node info [hostname :{} ,ip :{}]", sdiHost, ipAddress, e);
++failedTimes;
}
}
int retryTime = Math.min(TASK_FAILED_TIMES, sdiHostArray.length);
if (failedTimes >= retryTime) {
LOG.warn("No Omni-data-server to Connect, Task has tried {} times.", retryTime);
throw new TaskExecutionException("No Omni-data-server to Connect");
}
List<WritableColumnVector[]> l = new ArrayList<>();
l.add(page);
return l.iterator();
}
Aggregations