use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestHashAggregationOperator method testMemoryLimit.
@Test(dataProvider = "hashEnabled", expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded per-node user memory limit of 10B.*")
public void testMemoryLimit(boolean hashEnabled) {
Metadata localMetadata = createTestMetadataManager();
InternalAggregationFunction maxVarcharColumn = localMetadata.getFunctionAndTypeManager().getAggregateFunctionImplementation(new Signature(QualifiedObjectName.valueOfDefaultFunction("max"), AGGREGATE, parseTypeSignature(StandardTypes.VARCHAR), parseTypeSignature(StandardTypes.VARCHAR)));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0, 100, 0).addSequencePage(10, 100, 0, 200, 0).addSequencePage(10, 100, 0, 300, 0).build();
DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(10, Unit.BYTE)).addPipelineContext(0, true, true, false).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_SUM.bind(ImmutableList.of(3), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(3), Optional.empty()), maxVarcharColumn.bind(ImmutableList.of(2), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(new DataSize(16, MEGABYTE)), joinCompiler, false);
toPages(operatorFactory, driverContext, input);
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestHiveIntegrationSmokeTest method getHiveInsertTableHandle.
private HiveInsertTableHandle getHiveInsertTableHandle(Session session, String tableName) {
getQueryRunner().getMetadata().cleanupQuery(session);
Metadata metadata = ((DistributedQueryRunner) getQueryRunner()).getCoordinator().getMetadata();
return transaction(getQueryRunner().getTransactionManager(), getQueryRunner().getAccessControl()).execute(session, transactionSession -> {
QualifiedObjectName objectName = new QualifiedObjectName(catalog, TPCH_SCHEMA, tableName);
Optional<TableHandle> handle = metadata.getTableHandle(transactionSession, objectName);
InsertTableHandle insertTableHandle = metadata.beginInsert(transactionSession, handle.get(), false);
HiveInsertTableHandle hiveInsertTableHandle = (HiveInsertTableHandle) insertTableHandle.getConnectorHandle();
metadata.finishInsert(transactionSession, insertTableHandle, ImmutableList.of(), ImmutableList.of());
return hiveInsertTableHandle;
});
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestHiveIntegrationSmokeTest method getTableMetadata.
private TableMetadata getTableMetadata(String catalog, String schema, String tableName) {
Session session = getSession();
Metadata metadata = ((DistributedQueryRunner) getQueryRunner()).getCoordinator().getMetadata();
return transaction(getQueryRunner().getTransactionManager(), getQueryRunner().getAccessControl()).readOnly().execute(session, transactionSession -> {
Optional<TableHandle> tableHandle = metadata.getTableHandle(transactionSession, new QualifiedObjectName(catalog, schema, tableName));
assertTrue(tableHandle.isPresent());
return metadata.getTableMetadata(transactionSession, tableHandle.get());
});
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class LogicalPlanner method createInsertCubePlan.
private RelationPlan createInsertCubePlan(Analysis analysis, InsertCube insertCubeStatement) {
Analysis.CubeInsert insert = analysis.getCubeInsert().get();
TableMetadata tableMetadata = metadata.getTableMetadata(session, insert.getTarget());
List<ColumnMetadata> visibleTableColumns = tableMetadata.getColumns().stream().filter(column -> !column.isHidden()).collect(toImmutableList());
List<String> visibleTableColumnNames = visibleTableColumns.stream().map(ColumnMetadata::getName).collect(toImmutableList());
RelationPlan plan = createRelationPlan(analysis, insertCubeStatement.getQuery());
Map<String, ColumnHandle> columns = metadata.getColumnHandles(session, insert.getTarget());
Assignments.Builder assignments = Assignments.builder();
for (ColumnMetadata column : tableMetadata.getColumns()) {
if (column.isHidden()) {
continue;
}
Symbol output = planSymbolAllocator.newSymbol(column.getName(), column.getType());
int index = insert.getColumns().indexOf(columns.get(column.getName()));
if (index < 0) {
Expression cast = new Cast(new NullLiteral(), column.getType().getTypeSignature().toString());
assignments.put(output, castToRowExpression(cast));
} else {
Symbol input = plan.getSymbol(index);
Type tableType = column.getType();
Type queryType = planSymbolAllocator.getTypes().get(input);
if (queryType.equals(tableType) || typeCoercion.isTypeOnlyCoercion(queryType, tableType)) {
assignments.put(output, castToRowExpression(toSymbolReference(input)));
} else {
Expression cast = noTruncationCast(toSymbolReference(input), queryType, tableType);
assignments.put(output, castToRowExpression(cast));
}
}
}
ProjectNode projectNode = new ProjectNode(idAllocator.getNextId(), plan.getRoot(), assignments.build());
List<Field> fields = visibleTableColumns.stream().map(column -> Field.newUnqualified(column.getName(), column.getType())).collect(toImmutableList());
Scope scope = Scope.builder().withRelationType(RelationId.anonymous(), new RelationType(fields)).build();
plan = new RelationPlan(projectNode, scope, projectNode.getOutputSymbols());
Optional<NewTableLayout> newTableLayout = metadata.getInsertLayout(session, insert.getTarget());
String catalogName = insert.getTarget().getCatalogName().getCatalogName();
TableStatisticsMetadata statisticsMetadata = metadata.getStatisticsCollectionMetadataForWrite(session, catalogName, tableMetadata.getMetadata());
RelationPlan tableWriterPlan = createTableWriterPlan(analysis, plan, new InsertReference(insert.getTarget(), analysis.isCubeOverwrite()), visibleTableColumnNames, newTableLayout, statisticsMetadata);
Expression rewritten = null;
Set<Identifier> predicateColumns = new HashSet<>();
if (insertCubeStatement.getWhere().isPresent()) {
rewritten = new QueryPlanner(analysis, planSymbolAllocator, idAllocator, buildLambdaDeclarationToSymbolMap(analysis, planSymbolAllocator), metadata, session, namedSubPlan, uniqueIdAllocator).rewriteExpression(tableWriterPlan, insertCubeStatement.getWhere().get(), analysis, buildLambdaDeclarationToSymbolMap(analysis, planSymbolAllocator));
predicateColumns.addAll(ExpressionUtils.getIdentifiers(rewritten));
}
CubeMetadata cubeMetadata = insert.getMetadata();
if (!insertCubeStatement.isOverwrite() && !insertCubeStatement.getWhere().isPresent() && cubeMetadata.getCubeStatus() != CubeStatus.INACTIVE) {
// Means data some data was inserted before, but trying to insert entire dataset
throw new PrestoException(QUERY_REJECTED, "Cannot allow insert. Inserting entire dataset but cube already has partial data");
} else if (insertCubeStatement.getWhere().isPresent()) {
if (!canSupportPredicate(rewritten)) {
throw new PrestoException(QUERY_REJECTED, String.format("Cannot support predicate '%s'", ExpressionFormatter.formatExpression(rewritten, Optional.empty())));
}
if (!insertCubeStatement.isOverwrite() && arePredicatesOverlapping(rewritten, cubeMetadata)) {
throw new PrestoException(QUERY_REJECTED, String.format("Cannot allow insert. Cube already contains data for the given predicate '%s'", ExpressionFormatter.formatExpression(insertCubeStatement.getWhere().get(), Optional.empty())));
}
}
TableHandle sourceTableHandle = insert.getSourceTable();
// At this point it has been verified that source table has not been updated
// so insert into cube should be allowed
LongSupplier tableLastModifiedTimeSupplier = metadata.getTableLastModifiedTimeSupplier(session, sourceTableHandle);
checkState(tableLastModifiedTimeSupplier != null, "Table last modified time is null");
Map<Symbol, Type> predicateColumnsType = predicateColumns.stream().map(identifier -> new Symbol(identifier.getValue())).collect(Collectors.toMap(Function.identity(), symbol -> planSymbolAllocator.getTypes().get(symbol), (key1, ignored) -> key1));
CubeFinishNode cubeFinishNode = new CubeFinishNode(idAllocator.getNextId(), tableWriterPlan.getRoot(), planSymbolAllocator.newSymbol("rows", BIGINT), new CubeUpdateMetadata(tableMetadata.getQualifiedName().toString(), tableLastModifiedTimeSupplier.getAsLong(), rewritten != null ? ExpressionFormatter.formatExpression(rewritten, Optional.empty()) : null, insertCubeStatement.isOverwrite()), predicateColumnsType);
return new RelationPlan(cubeFinishNode, analysis.getScope(insertCubeStatement), cubeFinishNode.getOutputSymbols());
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class PlanFragmenter method analyzeGroupedExecution.
private SubPlan analyzeGroupedExecution(Session session, SubPlan subPlan) {
PlanFragment fragment = subPlan.getFragment();
GroupedExecutionProperties properties = fragment.getRoot().accept(new GroupedExecutionTagger(session, metadata, nodePartitioningManager), null);
if (properties.isSubTreeUseful()) {
boolean preferDynamic = fragment.getRemoteSourceNodes().stream().allMatch(node -> node.getExchangeType() == REPLICATE) && isDynamicSchduleForGroupedExecution(session);
BucketNodeMap bucketNodeMap = nodePartitioningManager.getBucketNodeMap(session, fragment.getPartitioning(), preferDynamic);
if (bucketNodeMap.isDynamic()) {
fragment = fragment.withDynamicLifespanScheduleGroupedExecution(properties.getCapableTableScanNodes());
} else {
fragment = fragment.withFixedLifespanScheduleGroupedExecution(properties.getCapableTableScanNodes());
}
}
ImmutableList.Builder<SubPlan> result = ImmutableList.builder();
for (SubPlan child : subPlan.getChildren()) {
result.add(analyzeGroupedExecution(session, child));
}
return new SubPlan(fragment, result.build());
}
Aggregations