use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHttpPageBufferClient method testLifecycle.
@Test
public void testLifecycle() throws Exception {
CyclicBarrier beforeRequest = new CyclicBarrier(2);
CyclicBarrier afterRequest = new CyclicBarrier(2);
StaticRequestProcessor processor = new StaticRequestProcessor(beforeRequest, afterRequest);
processor.setResponse(new TestingResponse(HttpStatus.NO_CONTENT, ImmutableListMultimap.of(), new byte[0]));
CyclicBarrier requestComplete = new CyclicBarrier(2);
TestingClientCallback callback = new TestingClientCallback(requestComplete);
URI location = URI.create("http://localhost:8080");
HttpPageBufferClient client = new HttpPageBufferClient(new TestingHttpClient(processor, executor), new DataSize(10, Unit.MEGABYTE), new Duration(1, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), location, callback, executor);
assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");
client.scheduleRequest();
beforeRequest.await(10, TimeUnit.SECONDS);
assertStatus(client, location, "running", 0, 1, 0, 0, "PROCESSING_REQUEST");
assertEquals(client.isRunning(), true);
afterRequest.await(10, TimeUnit.SECONDS);
requestComplete.await(10, TimeUnit.SECONDS);
assertStatus(client, location, "queued", 0, 1, 1, 1, "not scheduled");
client.close();
beforeRequest.await(10, TimeUnit.SECONDS);
assertStatus(client, location, "closed", 0, 1, 1, 1, "PROCESSING_REQUEST");
afterRequest.await(10, TimeUnit.SECONDS);
requestComplete.await(10, TimeUnit.SECONDS);
assertStatus(client, location, "closed", 0, 1, 2, 1, "not scheduled");
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class MockExchangeRequestProcessor method handle.
@Override
public Response handle(Request request) throws Exception {
if (request.getMethod().equalsIgnoreCase("DELETE")) {
return new TestingResponse(HttpStatus.NO_CONTENT, ImmutableListMultimap.of(), new byte[0]);
}
// verify we got a data size and it parses correctly
assertTrue(!request.getHeaders().get(PrestoHeaders.PRESTO_MAX_SIZE).isEmpty());
DataSize maxSize = DataSize.valueOf(request.getHeader(PrestoHeaders.PRESTO_MAX_SIZE));
assertEquals(maxSize, expectedMaxSize);
RequestLocation requestLocation = new RequestLocation(request.getUri());
URI location = requestLocation.getLocation();
BufferResult result = buffers.getUnchecked(location).getPages(requestLocation.getSequenceId(), maxSize);
byte[] bytes = new byte[0];
HttpStatus status;
if (!result.getSerializedPages().isEmpty()) {
DynamicSliceOutput sliceOutput = new DynamicSliceOutput(64);
PagesSerdeUtil.writeSerializedPages(sliceOutput, result.getSerializedPages());
bytes = sliceOutput.slice().getBytes();
status = HttpStatus.OK;
} else {
status = HttpStatus.NO_CONTENT;
}
return new TestingResponse(status, ImmutableListMultimap.of(CONTENT_TYPE, PRESTO_PAGES, PRESTO_TASK_INSTANCE_ID, String.valueOf(result.getTaskInstanceId()), PRESTO_PAGE_TOKEN, String.valueOf(result.getToken()), PRESTO_PAGE_NEXT_TOKEN, String.valueOf(result.getNextToken()), PRESTO_BUFFER_COMPLETE, String.valueOf(result.isBufferComplete())), bytes);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testHashAggregationWithGlobals.
@Test(dataProvider = "hashEnabled")
public void testHashAggregationWithGlobals(boolean hashEnabled) throws Exception {
MetadataManager metadata = MetadataManager.createTestMetadataManager();
InternalAggregationFunction countVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("count", AGGREGATE, parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.VARCHAR)));
InternalAggregationFunction countBooleanColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("count", AGGREGATE, parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.BOOLEAN)));
InternalAggregationFunction maxVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("max", AGGREGATE, parseTypeSignature(StandardTypes.VARCHAR), parseTypeSignature(StandardTypes.VARCHAR)));
Optional<Integer> groupIdChannel = Optional.of(1);
List<Integer> groupByChannels = Ints.asList(1, 2);
List<Integer> globalAggregationGroupIds = Ints.asList(42, 49);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, groupByChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BIGINT, BOOLEAN);
List<Page> input = rowPagesBuilder.build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), groupByChannels, globalAggregationGroupIds, Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_SUM.bind(ImmutableList.of(4), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(4), Optional.empty()), maxVarcharColumn.bind(ImmutableList.of(2), Optional.empty()), countVarcharColumn.bind(ImmutableList.of(0), Optional.empty()), countBooleanColumn.bind(ImmutableList.of(5), Optional.empty())), rowPagesBuilder.getHashChannel(), groupIdChannel, 100_000, new DataSize(16, MEGABYTE), joinCompiler);
MaterializedResult expected = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT, BIGINT, BIGINT, DOUBLE, VARCHAR, BIGINT, BIGINT).row(null, 42L, 0L, null, null, null, 0L, 0L).row(null, 49L, 0L, null, null, null, 0L, 0L).build();
assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, hashEnabled, Optional.of(groupByChannels.size()));
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testSpillerFailure.
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".* Failed to spill")
public void testSpillerFailure() {
MetadataManager metadata = MetadataManager.createTestMetadataManager();
InternalAggregationFunction maxVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("max", AGGREGATE, parseTypeSignature(StandardTypes.VARCHAR), parseTypeSignature(StandardTypes.VARCHAR)));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0, 100, 0).addSequencePage(10, 100, 0, 200, 0).addSequencePage(10, 100, 0, 300, 0).build();
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(10, Unit.BYTE)).addPipelineContext(0, true, true).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_SUM.bind(ImmutableList.of(3), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(3), Optional.empty()), maxVarcharColumn.bind(ImmutableList.of(2), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), true, succinctBytes(8), succinctBytes(Integer.MAX_VALUE), new FailingSpillerFactory(), joinCompiler);
toPages(operatorFactory, driverContext, input);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testMultiSliceAggregationOutput.
@Test(dataProvider = "hashEnabled")
public void testMultiSliceAggregationOutput(boolean hashEnabled) {
// estimate the number of entries required to create 1.5 pages of results
int fixedWidthSize = SIZE_OF_LONG + SIZE_OF_DOUBLE + SIZE_OF_DOUBLE;
int multiSlicePositionCount = (int) (1.5 * PageBuilderStatus.DEFAULT_MAX_PAGE_SIZE_IN_BYTES / fixedWidthSize);
multiSlicePositionCount = Math.min((int) (1.5 * DEFAULT_MAX_BLOCK_SIZE_IN_BYTES / SIZE_OF_DOUBLE), multiSlicePositionCount);
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(multiSlicePositionCount, 0, 0).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(1), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), joinCompiler);
assertEquals(toPages(operatorFactory, driverContext, input).size(), 2);
}
Aggregations