use of io.prestosql.sql.gen.OrderingCompiler in project hetu-core by openlookeng.
the class TestOrderByOperator method testCaptureRestoreWithSpillToHdfsEnabled.
/**
* This test is supposed to consume 4 pages and produce the output page with sorted ordering.
* The spilling and capturing('capture1') happened after the first 2 pages added into the operator.
* The operator is rescheduled after 4 pages added (but before finish() is called).
*
* @throws Exception
*/
@Test
public void testCaptureRestoreWithSpillToHdfsEnabled() throws Exception {
// Initialization
Path spillPath = Paths.get("/tmp/hetu/snapshot/");
HetuHdfsFileSystemClient fs = getLocalHdfs();
when(fileSystemClientManager.getFileSystemClient(any(String.class), any(Path.class))).thenReturn(fs);
GenericSpillerFactory genericSpillerFactory = createGenericSpillerFactory(spillPath, fileSystemClientManager, true, "hdfs");
SnapshotConfig snapshotConfig = new SnapshotConfig();
snapshotConfig.setSpillProfile("hdfs");
snapshotConfig.setSpillToHdfs(true);
snapshotUtils = new SnapshotUtils(fileSystemClientManager, snapshotConfig, new InMemoryNodeManager());
snapshotUtils.initialize();
List<Page> input1 = rowPagesBuilder(VARCHAR, BIGINT).row("a", 1L).row("b", 2L).pageBreak().row("b", 3L).row("a", 4L).build();
List<Page> input2 = rowPagesBuilder(VARCHAR, BIGINT).row("c", 4L).row("d", 6L).pageBreak().row("c", 2L).row("d", 3L).build();
OrderByOperatorFactory operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), ImmutableList.of(0, 1), 10, ImmutableList.of(0, 1), ImmutableList.of(ASC_NULLS_LAST, DESC_NULLS_LAST), new PagesIndex.TestingFactory(false), true, Optional.of(genericSpillerFactory), new OrderingCompiler(), false);
DriverContext driverContext = createDriverContext(defaultMemoryLimit, TEST_SNAPSHOT_SESSION);
driverContext.getPipelineContext().getTaskContext().getSnapshotManager().setTotalComponents(1);
OrderByOperator orderByOperator = (OrderByOperator) operatorFactory.createOperator(driverContext);
// Step1: add the first 2 pages
for (Page page : input1) {
orderByOperator.addInput(page);
}
// Step2: spilling happened here
getFutureValue(orderByOperator.startMemoryRevoke());
orderByOperator.finishMemoryRevoke();
// Step3: add a marker page to make 'capture1' happened
MarkerPage marker = MarkerPage.snapshotPage(1);
orderByOperator.addInput(marker);
// Step4: add another 2 pages
for (Page page : input2) {
orderByOperator.addInput(page);
}
// Step5: assume the task is rescheduled due to failure and everything is re-constructed
driverContext = createDriverContext(defaultMemoryLimit, TEST_SNAPSHOT_SESSION);
operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), ImmutableList.of(0, 1), 10, ImmutableList.of(0, 1), ImmutableList.of(ASC_NULLS_LAST, DESC_NULLS_LAST), new PagesIndex.TestingFactory(false), true, Optional.of(genericSpillerFactory), new OrderingCompiler(), false);
orderByOperator = (OrderByOperator) operatorFactory.createOperator(driverContext);
// Step6: restore to 'capture1', the spiller should contains the reference of the first 2 pages for now.
MarkerPage resumeMarker = MarkerPage.resumePage(1);
orderByOperator.addInput(resumeMarker);
// Step7: continue to add another 2 pages
for (Page page : input2) {
orderByOperator.addInput(page);
}
orderByOperator.finish();
// Compare the results
MaterializedResult expected = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT).row("a", 4L).row("a", 1L).row("b", 3L).row("b", 2L).row("c", 4L).row("c", 2L).row("d", 6L).row("d", 3L).build();
ImmutableList.Builder<Page> outputPages = ImmutableList.builder();
Page p = orderByOperator.getOutput();
while (p instanceof MarkerPage) {
p = orderByOperator.getOutput();
}
outputPages.add(p);
MaterializedResult actual = toMaterializedResult(driverContext.getSession(), expected.getTypes(), outputPages.build());
Assert.assertEquals(actual, expected);
}
use of io.prestosql.sql.gen.OrderingCompiler in project hetu-core by openlookeng.
the class TestOrderByOperator method testMemoryLimit.
@Test(expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded per-node user memory limit of 10B.*")
public void testMemoryLimit() {
List<Page> input = rowPagesBuilder(BIGINT, DOUBLE).row(1L, 0.1).row(2L, 0.2).pageBreak().row(-1L, -0.1).row(4L, 0.4).build();
DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(10, Unit.BYTE)).addPipelineContext(0, true, true, false).addDriverContext();
OrderByOperatorFactory operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), ImmutableList.of(1), 10, ImmutableList.of(0), ImmutableList.of(ASC_NULLS_LAST), new PagesIndex.TestingFactory(false), false, Optional.of(spillerFactory), new OrderingCompiler(), false);
toPages(operatorFactory, driverContext, input);
}
use of io.prestosql.sql.gen.OrderingCompiler in project hetu-core by openlookeng.
the class TestOrderByOperator method testMultipleOutputPages.
@Test(dataProvider = "spillEnabled")
public void testMultipleOutputPages(boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimit) {
// make operator produce multiple pages during finish phase
int numberOfRows = 80_000;
List<Page> input = rowPagesBuilder(BIGINT, DOUBLE).addSequencePage(numberOfRows, 0, 0).build();
OrderByOperatorFactory operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), ImmutableList.of(1), 10, ImmutableList.of(0), ImmutableList.of(DESC_NULLS_LAST), new PagesIndex.TestingFactory(false), spillEnabled, Optional.of(spillerFactory), new OrderingCompiler(), false);
DriverContext driverContext = createDriverContext(memoryLimit, TEST_SESSION);
MaterializedResult.Builder expectedBuilder = resultBuilder(driverContext.getSession(), DOUBLE);
for (int i = 0; i < numberOfRows; ++i) {
expectedBuilder.row((double) numberOfRows - i - 1);
}
MaterializedResult expected = expectedBuilder.build();
List<Page> pages = toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages);
assertGreaterThan(pages.size(), 1, "Expected more than one output page");
MaterializedResult actual = toMaterializedResult(driverContext.getSession(), expected.getTypes(), pages);
assertEquals(actual.getMaterializedRows(), expected.getMaterializedRows());
assertTrue(spillEnabled == (spillerFactory.getSpillsCount() > 0), format("Spill state mismatch. Expected spill: %s, spill count: %s", spillEnabled, spillerFactory.getSpillsCount()));
}
use of io.prestosql.sql.gen.OrderingCompiler in project hetu-core by openlookeng.
the class TestOrderByOperator method testCaptureRestoreWithSpill.
/**
* This test is supposed to consume 4 pages and produce the output page with sorted ordering.
* The spilling and capturing('capture1') happened after the first 2 pages added into the operator.
* The operator is rescheduled after 4 pages added (but before finish() is called).
*
* @throws Exception
*/
@Test
public void testCaptureRestoreWithSpill() throws Exception {
// Initialization
Path spillPath = Paths.get("/tmp/hetu/snapshot/");
GenericSpillerFactory genericSpillerFactory = createGenericSpillerFactory(spillPath, fileSystemClientManager, false, null);
SnapshotConfig snapshotConfig = new SnapshotConfig();
snapshotUtils = new SnapshotUtils(fileSystemClientManager, snapshotConfig, new InMemoryNodeManager());
snapshotUtils.initialize();
List<Page> input1 = rowPagesBuilder(VARCHAR, BIGINT).row("a", 1L).row("b", 2L).pageBreak().row("b", 3L).row("a", 4L).build();
List<Page> input2 = rowPagesBuilder(VARCHAR, BIGINT).row("c", 4L).row("d", 6L).pageBreak().row("c", 2L).row("d", 3L).build();
OrderByOperatorFactory operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), ImmutableList.of(0, 1), 10, ImmutableList.of(0, 1), ImmutableList.of(ASC_NULLS_LAST, DESC_NULLS_LAST), new PagesIndex.TestingFactory(false), true, Optional.of(genericSpillerFactory), new OrderingCompiler(), false);
DriverContext driverContext = createDriverContext(defaultMemoryLimit, TEST_SNAPSHOT_SESSION);
driverContext.getPipelineContext().getTaskContext().getSnapshotManager().setTotalComponents(1);
OrderByOperator orderByOperator = (OrderByOperator) operatorFactory.createOperator(driverContext);
// Step1: add the first 2 pages
for (Page page : input1) {
orderByOperator.addInput(page);
}
// Step2: spilling happened here
getFutureValue(orderByOperator.startMemoryRevoke());
orderByOperator.finishMemoryRevoke();
// Step3: add a marker page to make 'capture1' happened
MarkerPage marker = MarkerPage.snapshotPage(1);
orderByOperator.addInput(marker);
// Step4: add another 2 pages
for (Page page : input2) {
orderByOperator.addInput(page);
}
// Step5: assume the task is rescheduled due to failure and everything is re-constructed
driverContext = createDriverContext(defaultMemoryLimit, TEST_SNAPSHOT_SESSION);
operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), ImmutableList.of(0, 1), 10, ImmutableList.of(0, 1), ImmutableList.of(ASC_NULLS_LAST, DESC_NULLS_LAST), new PagesIndex.TestingFactory(false), true, Optional.of(genericSpillerFactory), new OrderingCompiler(), false);
orderByOperator = (OrderByOperator) operatorFactory.createOperator(driverContext);
// Step6: restore to 'capture1', the spiller should contains the reference of the first 2 pages for now.
MarkerPage resumeMarker = MarkerPage.resumePage(1);
orderByOperator.addInput(resumeMarker);
// Step7: continue to add another 2 pages
for (Page page : input2) {
orderByOperator.addInput(page);
}
orderByOperator.finish();
// Compare the results
MaterializedResult expected = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT).row("a", 4L).row("a", 1L).row("b", 3L).row("b", 2L).row("c", 4L).row("c", 2L).row("d", 6L).row("d", 3L).build();
ImmutableList.Builder<Page> outputPages = ImmutableList.builder();
Page p = orderByOperator.getOutput();
while (p instanceof MarkerPage) {
p = orderByOperator.getOutput();
}
outputPages.add(p);
MaterializedResult actual = toMaterializedResult(driverContext.getSession(), expected.getTypes(), outputPages.build());
Assert.assertEquals(actual, expected);
}
use of io.prestosql.sql.gen.OrderingCompiler in project hetu-core by openlookeng.
the class TestOrderByOperator method testMultiFieldKeySnapshot.
@Test
public void testMultiFieldKeySnapshot() {
List<Page> input = rowPagesBuilder(VARCHAR, BIGINT).row("a", 1L).row("b", 2L).pageBreak().row("b", 3L).row("a", 4L).build();
OrderByOperatorFactory operatorFactory = new OrderByOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), ImmutableList.of(0, 1), 10, ImmutableList.of(0, 1), ImmutableList.of(ASC_NULLS_LAST, DESC_NULLS_LAST), new PagesIndex.TestingFactory(false), false, Optional.of(spillerFactory), new OrderingCompiler(), false);
DriverContext driverContext = createDriverContext(0, TEST_SESSION);
MaterializedResult expected = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT).row("a", 4L).row("a", 1L).row("b", 3L).row("b", 2L).build();
assertOperatorEqualsWithSimpleSelfStateComparison(operatorFactory, driverContext, input, expected, false, createExpectedMapping());
}
Aggregations