use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testMultiplePartialFlushes.
@Test(dataProvider = "hashEnabledAndMemoryLimitBeforeSpillValues")
public void testMultiplePartialFlushes(boolean hashEnabled, long memoryLimitBeforeSpill, long memoryLimitForMergeWithMemory) throws Exception {
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(500, 0).addSequencePage(500, 500).addSequencePage(500, 1000).addSequencePage(500, 1500).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.PARTIAL, ImmutableList.of(LONG_SUM.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(1, Unit.KILOBYTE), memoryLimitBeforeSpill > 0, succinctBytes(memoryLimitBeforeSpill), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler);
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(4, Unit.KILOBYTE)).addPipelineContext(0, true, true).addDriverContext();
try (Operator operator = operatorFactory.createOperator(driverContext)) {
List<Page> expectedPages = rowPagesBuilder(BIGINT, BIGINT).addSequencePage(2000, 0, 0).build();
MaterializedResult expected = resultBuilder(driverContext.getSession(), BIGINT, BIGINT).pages(expectedPages).build();
Iterator<Page> inputIterator = input.iterator();
// Fill up the aggregation
while (operator.needsInput() && inputIterator.hasNext()) {
operator.addInput(inputIterator.next());
}
// Drain the output (partial flush)
List<Page> outputPages = new ArrayList<>();
while (true) {
Page output = operator.getOutput();
if (output == null) {
break;
}
outputPages.add(output);
}
// There should be some pages that were drained
assertTrue(!outputPages.isEmpty());
// The operator need input again since this was a partial flush
assertTrue(operator.needsInput());
// Now, drive the operator to completion
outputPages.addAll(toPages(operator, inputIterator));
MaterializedResult actual;
if (hashEnabled) {
// Drop the hashChannel for all pages
List<Page> actualPages = dropChannel(outputPages, ImmutableList.of(1));
List<Type> expectedTypes = without(operator.getTypes(), ImmutableList.of(1));
actual = toMaterializedResult(operator.getOperatorContext().getSession(), expectedTypes, actualPages);
} else {
actual = toMaterializedResult(operator.getOperatorContext().getSession(), operator.getTypes(), outputPages);
}
assertEquals(actual.getTypes(), expected.getTypes());
assertEqualsIgnoreOrder(actual.getMaterializedRows(), expected.getMaterializedRows());
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testMergeWithMemorySpill.
@Test
public void testMergeWithMemorySpill() {
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(BIGINT);
int smallPagesSpillThresholdSize = 150000;
List<Page> input = rowPagesBuilder.addSequencePage(smallPagesSpillThresholdSize, 0).addSequencePage(10, smallPagesSpillThresholdSize).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(LONG_SUM.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 1, new DataSize(16, MEGABYTE), true, new DataSize(smallPagesSpillThresholdSize, Unit.BYTE), succinctBytes(Integer.MAX_VALUE), spillerFactory, joinCompiler);
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(1, Unit.KILOBYTE)).addPipelineContext(0, true, true).addDriverContext();
MaterializedResult.Builder resultBuilder = resultBuilder(driverContext.getSession(), BIGINT);
for (int i = 0; i < smallPagesSpillThresholdSize + 10; ++i) {
resultBuilder.row((long) i, (long) i);
}
assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, resultBuilder.build(), false, Optional.of(hashChannels.size()));
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashJoinOperator method testMemoryLimit.
@Test(expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded local memory limit of.*", dataProvider = "hashEnabledValues")
public void testMemoryLimit(boolean parallelBuild, boolean probeHashEnabled, boolean buildHashEnabled) throws Exception {
TaskContext taskContext = TestingTaskContext.createTaskContext(executor, TEST_SESSION, new DataSize(100, BYTE));
RowPagesBuilder buildPages = rowPagesBuilder(buildHashEnabled, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)).addSequencePage(10, 20, 30, 40);
buildHash(parallelBuild, taskContext, Ints.asList(0), buildPages, Optional.empty());
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHttpPageBufferClient method testExceptionFromResponseHandler.
@Test
public void testExceptionFromResponseHandler() throws Exception {
TestingTicker ticker = new TestingTicker();
AtomicReference<Duration> tickerIncrement = new AtomicReference<>(new Duration(0, TimeUnit.SECONDS));
TestingHttpClient.Processor processor = (input) -> {
Duration delta = tickerIncrement.get();
ticker.increment(delta.toMillis(), TimeUnit.MILLISECONDS);
throw new RuntimeException("Foo");
};
CyclicBarrier requestComplete = new CyclicBarrier(2);
TestingClientCallback callback = new TestingClientCallback(requestComplete);
URI location = URI.create("http://localhost:8080");
HttpPageBufferClient client = new HttpPageBufferClient(new TestingHttpClient(processor, executor), new DataSize(10, Unit.MEGABYTE), new Duration(1, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), location, callback, executor, ticker);
assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");
// request processor will throw exception, verify the request is marked a completed
// this starts the error stopwatch
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertEquals(callback.getFailedBuffers(), 0);
assertStatus(client, location, "queued", 0, 1, 1, 1, "not scheduled");
// advance time forward, but not enough to fail the client
tickerIncrement.set(new Duration(30, TimeUnit.SECONDS));
// verify that the client has not failed
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 2);
assertEquals(callback.getFinishedBuffers(), 0);
assertEquals(callback.getFailedBuffers(), 0);
assertStatus(client, location, "queued", 0, 2, 2, 2, "not scheduled");
// advance time forward beyond the minimum error duration
tickerIncrement.set(new Duration(31, TimeUnit.SECONDS));
// verify that the client has failed
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 3);
assertEquals(callback.getFinishedBuffers(), 0);
assertEquals(callback.getFailedBuffers(), 1);
assertInstanceOf(callback.getFailure(), PageTransportTimeoutException.class);
assertContains(callback.getFailure().getMessage(), WORKER_NODE_ERROR + " (http://localhost:8080/0 - 3 failures, time since last success 61.00s)");
assertStatus(client, location, "queued", 0, 3, 3, 3, "not scheduled");
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHttpPageBufferClient method testHappyPath.
@Test
public void testHappyPath() throws Exception {
Page expectedPage = new Page(100);
DataSize expectedMaxSize = new DataSize(11, Unit.MEGABYTE);
MockExchangeRequestProcessor processor = new MockExchangeRequestProcessor(expectedMaxSize);
CyclicBarrier requestComplete = new CyclicBarrier(2);
TestingClientCallback callback = new TestingClientCallback(requestComplete);
URI location = URI.create("http://localhost:8080");
HttpPageBufferClient client = new HttpPageBufferClient(new TestingHttpClient(processor, executor), expectedMaxSize, new Duration(1, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), location, callback, executor);
assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");
// fetch a page and verify
processor.addPage(location, expectedPage);
callback.resetStats();
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 1);
assertPageEquals(expectedPage, callback.getPages().get(0));
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertStatus(client, location, "queued", 1, 1, 1, 0, "not scheduled");
// fetch no data and verify
callback.resetStats();
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertStatus(client, location, "queued", 1, 2, 2, 0, "not scheduled");
// fetch two more pages and verify
processor.addPage(location, expectedPage);
processor.addPage(location, expectedPage);
callback.resetStats();
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 2);
assertPageEquals(expectedPage, callback.getPages().get(0));
assertPageEquals(expectedPage, callback.getPages().get(1));
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertEquals(callback.getFailedBuffers(), 0);
callback.resetStats();
assertStatus(client, location, "queued", 3, 3, 3, 0, "not scheduled");
// finish and verify
callback.resetStats();
processor.setComplete(location);
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
// get the buffer complete signal
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 1);
// schedule the delete call to the buffer
callback.resetStats();
client.scheduleRequest();
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getFinishedBuffers(), 1);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 0);
assertEquals(callback.getFailedBuffers(), 0);
assertStatus(client, location, "closed", 3, 5, 5, 0, "not scheduled");
}
Aggregations