use of io.airlift.units.DataSize in project presto by prestodb.
the class BackupStats method addCopyShardDataRate.
public void addCopyShardDataRate(DataSize size, Duration duration) {
DataSize rate = dataRate(size, duration).convertToMostSuccinctDataSize();
copyToBackupBytesPerSecond.add(Math.round(rate.toBytes()));
copyToBackupShardSizeBytes.add(size.toBytes());
copyToBackupTimeInMilliSeconds.add(duration.toMillis());
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestStorageManagerConfig method testExplicitPropertyMappings.
@Test
public void testExplicitPropertyMappings() {
Map<String, String> properties = new ImmutableMap.Builder<String, String>().put("storage.data-directory", "/data").put("storage.min-available-space", "123GB").put("storage.orc.max-merge-distance", "16kB").put("storage.orc.max-read-size", "16kB").put("storage.orc.stream-buffer-size", "16kB").put("storage.max-deletion-threads", "999").put("storage.shard-recovery-timeout", "1m").put("storage.missing-shard-discovery-interval", "4m").put("storage.compaction-enabled", "false").put("storage.compaction-interval", "4h").put("storage.organization-enabled", "false").put("storage.organization-interval", "4h").put("storage.ejector-interval", "9h").put("storage.max-recovery-threads", "12").put("storage.max-organization-threads", "12").put("storage.max-shard-rows", "10000").put("storage.max-shard-size", "10MB").put("storage.max-buffer-size", "512MB").put("storage.one-split-per-bucket-threshold", "4").build();
StorageManagerConfig expected = new StorageManagerConfig().setDataDirectory(new File("/data")).setMinAvailableSpace(new DataSize(123, GIGABYTE)).setOrcMaxMergeDistance(new DataSize(16, KILOBYTE)).setOrcMaxReadSize(new DataSize(16, KILOBYTE)).setOrcStreamBufferSize(new DataSize(16, KILOBYTE)).setDeletionThreads(999).setShardRecoveryTimeout(new Duration(1, MINUTES)).setMissingShardDiscoveryInterval(new Duration(4, MINUTES)).setCompactionEnabled(false).setCompactionInterval(new Duration(4, HOURS)).setOrganizationEnabled(false).setOrganizationInterval(new Duration(4, HOURS)).setShardEjectorInterval(new Duration(9, HOURS)).setRecoveryThreads(12).setOrganizationThreads(12).setMaxShardRows(10_000).setMaxShardSize(new DataSize(10, MEGABYTE)).setMaxBufferSize(new DataSize(512, MEGABYTE)).setOneSplitPerBucketThreshold(4);
assertFullMapping(properties, expected);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class OrcTestingUtil method createReader.
public static OrcRecordReader createReader(OrcDataSource dataSource, List<Long> columnIds, List<Type> types) throws IOException {
OrcReader orcReader = new OrcReader(dataSource, new OrcMetadataReader(), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE));
List<String> columnNames = orcReader.getColumnNames();
assertEquals(columnNames.size(), columnIds.size());
Map<Integer, Type> includedColumns = new HashMap<>();
int ordinal = 0;
for (long columnId : columnIds) {
assertEquals(columnNames.get(ordinal), String.valueOf(columnId));
includedColumns.put(ordinal, types.get(ordinal));
ordinal++;
}
return createRecordReader(orcReader, includedColumns);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class RcFileTester method createRcFileReader.
private static RcFileReader createRcFileReader(TempFile tempFile, Type type, RcFileEncoding encoding) throws IOException {
RcFileDataSource rcFileDataSource = new FileRcFileDataSource(tempFile.getFile());
RcFileReader rcFileReader = new RcFileReader(rcFileDataSource, encoding, ImmutableMap.of(0, type), new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())), 0, tempFile.getFile().length(), new DataSize(8, MEGABYTE));
assertEquals(rcFileReader.getColumnCount(), 1);
return rcFileReader;
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class AbstractResourceConfigurationManager method configureGroup.
protected void configureGroup(ResourceGroup group, ResourceGroupSpec match) {
if (match.getSoftMemoryLimit().isPresent()) {
group.setSoftMemoryLimit(match.getSoftMemoryLimit().get());
} else {
synchronized (generalPoolMemoryFraction) {
double fraction = match.getSoftMemoryLimitFraction().get();
generalPoolMemoryFraction.put(group, fraction);
group.setSoftMemoryLimit(new DataSize(generalPoolBytes * fraction, BYTE));
}
}
group.setMaxQueuedQueries(match.getMaxQueued());
group.setMaxRunningQueries(match.getMaxRunning());
if (match.getSchedulingPolicy().isPresent()) {
group.setSchedulingPolicy(match.getSchedulingPolicy().get());
}
if (match.getSchedulingWeight().isPresent()) {
group.setSchedulingWeight(match.getSchedulingWeight().get());
}
if (match.getJmxExport().isPresent()) {
group.setJmxExport(match.getJmxExport().get());
}
if (match.getSoftCpuLimit().isPresent() || match.getHardCpuLimit().isPresent()) {
// This will never throw an exception if the validateManagerSpec method succeeds
checkState(getCpuQuotaPeriodMillis().isPresent(), "Must specify hard CPU limit in addition to soft limit");
Duration limit;
if (match.getHardCpuLimit().isPresent()) {
limit = match.getHardCpuLimit().get();
} else {
limit = match.getSoftCpuLimit().get();
}
long rate = (long) Math.min(1000.0 * limit.toMillis() / (double) getCpuQuotaPeriodMillis().get().toMillis(), Long.MAX_VALUE);
rate = Math.max(1, rate);
group.setCpuQuotaGenerationMillisPerSecond(rate);
}
if (match.getSoftCpuLimit().isPresent()) {
group.setSoftCpuLimit(match.getSoftCpuLimit().get());
}
if (match.getHardCpuLimit().isPresent()) {
group.setHardCpuLimit(match.getHardCpuLimit().get());
}
}
Aggregations