use of io.airlift.units.DataSize in project presto by prestodb.
the class TestOrcStorageManager method testMaxFileSize.
@Test
public void testMaxFileSize() throws Exception {
List<Long> columnIds = ImmutableList.of(3L, 7L);
List<Type> columnTypes = ImmutableList.of(BIGINT, createVarcharType(5));
List<Page> pages = rowPagesBuilder(columnTypes).row(123L, "hello").row(456L, "bye").build();
// Set maxFileSize to 1 byte, so adding any page makes the StoragePageSink full
OrcStorageManager manager = createOrcStorageManager(20, new DataSize(1, BYTE));
StoragePageSink sink = createStoragePageSink(manager, columnIds, columnTypes);
sink.appendPages(pages);
assertTrue(sink.isFull());
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestOrcStorageManager method testMaxShardRows.
@Test
public void testMaxShardRows() throws Exception {
OrcStorageManager manager = createOrcStorageManager(2, new DataSize(2, MEGABYTE));
List<Long> columnIds = ImmutableList.of(3L, 7L);
List<Type> columnTypes = ImmutableList.of(BIGINT, createVarcharType(10));
StoragePageSink sink = createStoragePageSink(manager, columnIds, columnTypes);
List<Page> pages = rowPagesBuilder(columnTypes).row(123L, "hello").row(456L, "bye").build();
sink.appendPages(pages);
assertTrue(sink.isFull());
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class OrcTestingUtil method createReaderNoRows.
public static OrcRecordReader createReaderNoRows(OrcDataSource dataSource) throws IOException {
OrcReader orcReader = new OrcReader(dataSource, new OrcMetadataReader(), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE));
assertEquals(orcReader.getColumnNames().size(), 0);
return createRecordReader(orcReader, ImmutableMap.of());
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestFileResourceGroupConfigurationManager method testConfiguration.
@Test
public void testConfiguration() {
ResourceGroupConfigurationManager manager = parse("resource_groups_config.json");
ResourceGroup global = new TestingResourceGroup(new ResourceGroupId("global"));
manager.configure(global, new SelectionContext(true, "user", Optional.empty(), 1));
assertEquals(global.getSoftMemoryLimit(), new DataSize(1, MEGABYTE));
assertEquals(global.getSoftCpuLimit(), new Duration(1, HOURS));
assertEquals(global.getHardCpuLimit(), new Duration(1, DAYS));
assertEquals(global.getCpuQuotaGenerationMillisPerSecond(), 1000 * 24);
assertEquals(global.getMaxQueuedQueries(), 1000);
assertEquals(global.getMaxRunningQueries(), 100);
assertEquals(global.getSchedulingPolicy(), WEIGHTED);
assertEquals(global.getSchedulingWeight(), 0);
assertEquals(global.getJmxExport(), true);
ResourceGroup sub = new TestingResourceGroup(new ResourceGroupId(new ResourceGroupId("global"), "sub"));
manager.configure(sub, new SelectionContext(true, "user", Optional.empty(), 1));
assertEquals(sub.getSoftMemoryLimit(), new DataSize(2, MEGABYTE));
assertEquals(sub.getMaxRunningQueries(), 3);
assertEquals(sub.getMaxQueuedQueries(), 4);
assertEquals(sub.getSchedulingPolicy(), null);
assertEquals(sub.getSchedulingWeight(), 5);
assertEquals(sub.getJmxExport(), false);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class RcFileTester method writeRcFileColumnNew.
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception {
OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile));
AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader()));
RcFileWriter writer = new RcFileWriter(output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, // use a smaller size to create more row groups
new DataSize(100, KILOBYTE), new DataSize(200, KILOBYTE), true);
BlockBuilder blockBuilder = type.createBlockBuilder(new BlockBuilderStatus(), 1024);
while (values.hasNext()) {
Object value = values.next();
writeValue(type, blockBuilder, value);
}
writer.write(new Page(blockBuilder.build()));
writer.close();
writer.validate(new FileRcFileDataSource(outputFile));
return new DataSize(output.size(), BYTE);
}
Aggregations