use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class PlanPrinter method textDistributedPlan.
public static String textDistributedPlan(StageInfo outputStageInfo, ValuePrinter valuePrinter, boolean verbose, Metadata metadata) {
Map<PlanNodeId, TableInfo> tableInfos = getAllStages(Optional.of(outputStageInfo)).stream().map(StageInfo::getTables).map(Map::entrySet).flatMap(Collection::stream).collect(toImmutableMap(Entry::getKey, Entry::getValue));
StringBuilder builder = new StringBuilder();
List<StageInfo> allStages = getAllStages(Optional.of(outputStageInfo));
List<PlanFragment> allFragments = allStages.stream().map(StageInfo::getPlan).collect(toImmutableList());
Map<PlanNodeId, PlanNodeStats> aggregatedStats = aggregateStageStats(allStages);
for (StageInfo stageInfo : allStages) {
builder.append(formatFragment(tableScanNode -> tableInfos.get(tableScanNode.getId()), valuePrinter, stageInfo.getPlan(), Optional.of(stageInfo), Optional.of(aggregatedStats), verbose, allFragments, metadata));
}
return builder.toString();
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestScanFilterAndProjectOperator method testRecordCursorYield.
@Test
public void testRecordCursorYield() {
// create a generic long function that yields for projection on every row
// verify we will yield #row times totally
// create a table with 15 rows
int length = 15;
Page input = SequencePageBuilder.createSequencePage(ImmutableList.of(BIGINT), length, 0);
DriverContext driverContext = newDriverContext();
// set up generic long function with a callback to force yield
Metadata localMetadata = functionAssertions.getMetadata();
localMetadata.getFunctionAndTypeManager().registerBuiltInFunctions(ImmutableList.of(new GenericLongFunction("record_cursor", value -> {
driverContext.getYieldSignal().forceYieldForTesting();
return value;
})));
ExpressionCompiler compiler = new ExpressionCompiler(localMetadata, new PageFunctionCompiler(localMetadata, 0));
List<RowExpression> projections = ImmutableList.of(call(QualifiedObjectName.valueOfDefaultFunction("generic_long_record_cursor").toString(), new BuiltInFunctionHandle(internalScalarFunction(QualifiedObjectName.valueOfDefaultFunction("generic_long_record_cursor"), BIGINT.getTypeSignature(), ImmutableList.of(BIGINT.getTypeSignature()))), BIGINT, field(0, BIGINT)));
Supplier<CursorProcessor> cursorProcessor = compiler.compileCursorProcessor(Optional.empty(), projections, "key");
Supplier<PageProcessor> pageProcessor = compiler.compilePageProcessor(Optional.empty(), projections);
ScanFilterAndProjectOperator.ScanFilterAndProjectOperatorFactory factory = new ScanFilterAndProjectOperator.ScanFilterAndProjectOperatorFactory(0, new PlanNodeId("test"), new PlanNodeId("0"), (session, split, table, columns, dynamicFilter) -> new RecordPageSource(new PageRecordSet(ImmutableList.of(BIGINT), input)), cursorProcessor, pageProcessor, TEST_TABLE_HANDLE, ImmutableList.of(), null, ImmutableList.of(BIGINT), new DataSize(0, BYTE), 0, ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, new UUID(0, 0), false, Optional.empty(), 0, 0);
SourceOperator operator = factory.createOperator(driverContext);
operator.addSplit(new Split(new CatalogName("test"), TestingSplit.createLocalSplit(), Lifespan.taskWide()));
operator.noMoreSplits();
// start driver; get null value due to yield for the first 15 times
for (int i = 0; i < length; i++) {
driverContext.getYieldSignal().setWithDelay(SECONDS.toNanos(1000), driverContext.getYieldExecutor());
assertNull(operator.getOutput());
driverContext.getYieldSignal().reset();
}
// the 16th yield is not going to prevent the operator from producing a page
driverContext.getYieldSignal().setWithDelay(SECONDS.toNanos(1000), driverContext.getYieldExecutor());
Page output = operator.getOutput();
driverContext.getYieldSignal().reset();
assertNotNull(output);
assertEquals(toValues(BIGINT, output.getBlock(0)), toValues(BIGINT, input.getBlock(0)));
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestHiveWriterFactory method testSortingPath.
@Test
public void testSortingPath() {
setUp();
String targetPath = "/tmp";
String writePath = "/tmp/table";
Optional<WriteIdInfo> writeIdInfo = Optional.of(new WriteIdInfo(1, 1, 0));
StorageFormat storageFormat = StorageFormat.fromHiveStorageFormat(ORC);
Storage storage = new Storage(storageFormat, "", Optional.empty(), false, ImmutableMap.of());
Table table = new Table("schema", "table", "user", "MANAGED_TABLE", storage, ImmutableList.of(new Column("col_1", HiveType.HIVE_INT, Optional.empty())), ImmutableList.of(), ImmutableMap.of("transactional", "true"), Optional.of("original"), Optional.of("expanded"));
HiveConfig hiveConfig = getHiveConfig();
HivePageSinkMetadata hivePageSinkMetadata = new HivePageSinkMetadata(new SchemaTableName("schema", "table"), Optional.of(table), ImmutableMap.of());
PageSorter pageSorter = new PagesIndexPageSorter(new PagesIndex.TestingFactory(false));
Metadata metadata = createTestMetadataManager();
TypeManager typeManager = new InternalTypeManager(metadata.getFunctionAndTypeManager());
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
LocationService locationService = new HiveLocationService(hdfsEnvironment);
ConnectorSession session = newSession();
HiveWriterFactory hiveWriterFactory = new HiveWriterFactory(getDefaultHiveFileWriterFactories(hiveConfig), "schema", "table", false, HiveACIDWriteType.DELETE, ImmutableList.of(new HiveColumnHandle("col_1", HiveType.HIVE_INT, new TypeSignature("integer", ImmutableList.of()), 0, HiveColumnHandle.ColumnType.REGULAR, Optional.empty())), ORC, ORC, ImmutableMap.of(), OptionalInt.empty(), ImmutableList.of(), new LocationHandle(targetPath, writePath, false, LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY, writeIdInfo), locationService, session.getQueryId(), new HivePageSinkMetadataProvider(hivePageSinkMetadata, CachingHiveMetastore.memoizeMetastore(metastore, 1000), new HiveIdentity(session)), typeManager, hdfsEnvironment, pageSorter, hiveConfig.getWriterSortBufferSize(), hiveConfig.getMaxOpenSortFiles(), false, UTC, session, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(hiveConfig, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(hiveConfig));
HiveWriter hiveWriter = hiveWriterFactory.createWriter(ImmutableList.of(), OptionalInt.empty(), Optional.empty());
assertEquals(((SortingFileWriter) hiveWriter.getFileWriter()).getTempFilePrefix().getName(), ".tmp-sort.bucket_00000");
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestBinaryFileSpiller method setUp.
@BeforeMethod
public void setUp() throws IOException {
Metadata metadata = createTestMetadataManager();
FileSystemClientManager fileSystemClientManager = mock(FileSystemClientManager.class);
when(fileSystemClientManager.getFileSystemClient(any(Path.class))).thenReturn(new HetuLocalFileSystemClient(new LocalConfig(new Properties()), Paths.get(spillPath.getCanonicalPath())));
spillerStats = new SpillerStats();
FeaturesConfig featuresConfig = new FeaturesConfig();
try {
featuresConfig.setSpillerSpillPaths(spillPath.getCanonicalPath());
} catch (IOException e) {
System.out.println(e.getStackTrace());
}
featuresConfig.setSpillMaxUsedSpaceThreshold(1.0);
NodeSpillConfig nodeSpillConfig = new NodeSpillConfig();
singleStreamSpillerFactory = new FileSingleStreamSpillerFactory(metadata, spillerStats, featuresConfig, nodeSpillConfig, fileSystemClientManager);
factory = new GenericSpillerFactory(singleStreamSpillerFactory);
PagesSerdeFactory pagesSerdeFactory = new PagesSerdeFactory(metadata.getFunctionAndTypeManager().getBlockEncodingSerde(), nodeSpillConfig.isSpillCompressionEnabled());
pagesSerde = pagesSerdeFactory.createPagesSerde();
memoryContext = newSimpleAggregatedMemoryContext();
}
use of io.prestosql.metadata.Metadata in project hetu-core by openlookeng.
the class TestHashAggregationOperator method testHashAggregationMemoryReservation.
@Test(dataProvider = "hashEnabledAndMemoryLimitForMergeValues")
public void testHashAggregationMemoryReservation(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) {
Metadata localMetadata = createTestMetadataManager();
InternalAggregationFunction arrayAggColumn = localMetadata.getFunctionAndTypeManager().getAggregateFunctionImplementation(new Signature(QualifiedObjectName.valueOfDefaultFunction("array_agg"), AGGREGATE, parseTypeSignature("array(bigint)"), parseTypeSignature(StandardTypes.BIGINT)));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0).addSequencePage(10, 200, 0).addSequencePage(10, 300, 0).build();
DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(10, Unit.MEGABYTE)).addPipelineContext(0, true, true, false).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, true, ImmutableList.of(arrayAggColumn.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(new DataSize(16, MEGABYTE)), spillEnabled, succinctBytes(memoryLimitForMerge), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler, false);
Operator operator = operatorFactory.createOperator(driverContext);
toPages(operator, input.iterator(), revokeMemoryWhenAddingPages);
assertEquals(operator.getOperatorContext().getOperatorStats().getUserMemoryReservation().toBytes(), 0);
}
Aggregations