Search in sources :

Example 46 with DataSize

use of io.airlift.units.DataSize in project presto by prestodb.

the class BackupStats method addCopyShardDataRate.

public void addCopyShardDataRate(DataSize size, Duration duration) {
    DataSize rate = dataRate(size, duration).convertToMostSuccinctDataSize();
    copyToBackupBytesPerSecond.add(Math.round(rate.toBytes()));
    copyToBackupShardSizeBytes.add(size.toBytes());
    copyToBackupTimeInMilliSeconds.add(duration.toMillis());
}
Also used : DataSize(io.airlift.units.DataSize)

Example 47 with DataSize

use of io.airlift.units.DataSize in project presto by prestodb.

the class TestStorageManagerConfig method testExplicitPropertyMappings.

@Test
public void testExplicitPropertyMappings() {
    Map<String, String> properties = new ImmutableMap.Builder<String, String>().put("storage.data-directory", "/data").put("storage.min-available-space", "123GB").put("storage.orc.max-merge-distance", "16kB").put("storage.orc.max-read-size", "16kB").put("storage.orc.stream-buffer-size", "16kB").put("storage.max-deletion-threads", "999").put("storage.shard-recovery-timeout", "1m").put("storage.missing-shard-discovery-interval", "4m").put("storage.compaction-enabled", "false").put("storage.compaction-interval", "4h").put("storage.organization-enabled", "false").put("storage.organization-interval", "4h").put("storage.ejector-interval", "9h").put("storage.max-recovery-threads", "12").put("storage.max-organization-threads", "12").put("storage.max-shard-rows", "10000").put("storage.max-shard-size", "10MB").put("storage.max-buffer-size", "512MB").put("storage.one-split-per-bucket-threshold", "4").build();
    StorageManagerConfig expected = new StorageManagerConfig().setDataDirectory(new File("/data")).setMinAvailableSpace(new DataSize(123, GIGABYTE)).setOrcMaxMergeDistance(new DataSize(16, KILOBYTE)).setOrcMaxReadSize(new DataSize(16, KILOBYTE)).setOrcStreamBufferSize(new DataSize(16, KILOBYTE)).setDeletionThreads(999).setShardRecoveryTimeout(new Duration(1, MINUTES)).setMissingShardDiscoveryInterval(new Duration(4, MINUTES)).setCompactionEnabled(false).setCompactionInterval(new Duration(4, HOURS)).setOrganizationEnabled(false).setOrganizationInterval(new Duration(4, HOURS)).setShardEjectorInterval(new Duration(9, HOURS)).setRecoveryThreads(12).setOrganizationThreads(12).setMaxShardRows(10_000).setMaxShardSize(new DataSize(10, MEGABYTE)).setMaxBufferSize(new DataSize(512, MEGABYTE)).setOneSplitPerBucketThreshold(4);
    assertFullMapping(properties, expected);
}
Also used : DataSize(io.airlift.units.DataSize) Duration(io.airlift.units.Duration) File(java.io.File) Test(org.testng.annotations.Test)

Example 48 with DataSize

use of io.airlift.units.DataSize in project presto by prestodb.

the class OrcTestingUtil method createReader.

public static OrcRecordReader createReader(OrcDataSource dataSource, List<Long> columnIds, List<Type> types) throws IOException {
    OrcReader orcReader = new OrcReader(dataSource, new OrcMetadataReader(), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE));
    List<String> columnNames = orcReader.getColumnNames();
    assertEquals(columnNames.size(), columnIds.size());
    Map<Integer, Type> includedColumns = new HashMap<>();
    int ordinal = 0;
    for (long columnId : columnIds) {
        assertEquals(columnNames.get(ordinal), String.valueOf(columnId));
        includedColumns.put(ordinal, types.get(ordinal));
        ordinal++;
    }
    return createRecordReader(orcReader, includedColumns);
}
Also used : Type(com.facebook.presto.spi.type.Type) OrcReader(com.facebook.presto.orc.OrcReader) HashMap(java.util.HashMap) OrcMetadataReader(com.facebook.presto.orc.metadata.OrcMetadataReader) DataSize(io.airlift.units.DataSize)

Example 49 with DataSize

use of io.airlift.units.DataSize in project presto by prestodb.

the class RcFileTester method createRcFileReader.

private static RcFileReader createRcFileReader(TempFile tempFile, Type type, RcFileEncoding encoding) throws IOException {
    RcFileDataSource rcFileDataSource = new FileRcFileDataSource(tempFile.getFile());
    RcFileReader rcFileReader = new RcFileReader(rcFileDataSource, encoding, ImmutableMap.of(0, type), new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())), 0, tempFile.getFile().length(), new DataSize(8, MEGABYTE));
    assertEquals(rcFileReader.getColumnCount(), 1);
    return rcFileReader;
}
Also used : DataSize(io.airlift.units.DataSize)

Example 50 with DataSize

use of io.airlift.units.DataSize in project presto by prestodb.

the class AbstractResourceConfigurationManager method configureGroup.

protected void configureGroup(ResourceGroup group, ResourceGroupSpec match) {
    if (match.getSoftMemoryLimit().isPresent()) {
        group.setSoftMemoryLimit(match.getSoftMemoryLimit().get());
    } else {
        synchronized (generalPoolMemoryFraction) {
            double fraction = match.getSoftMemoryLimitFraction().get();
            generalPoolMemoryFraction.put(group, fraction);
            group.setSoftMemoryLimit(new DataSize(generalPoolBytes * fraction, BYTE));
        }
    }
    group.setMaxQueuedQueries(match.getMaxQueued());
    group.setMaxRunningQueries(match.getMaxRunning());
    if (match.getSchedulingPolicy().isPresent()) {
        group.setSchedulingPolicy(match.getSchedulingPolicy().get());
    }
    if (match.getSchedulingWeight().isPresent()) {
        group.setSchedulingWeight(match.getSchedulingWeight().get());
    }
    if (match.getJmxExport().isPresent()) {
        group.setJmxExport(match.getJmxExport().get());
    }
    if (match.getSoftCpuLimit().isPresent() || match.getHardCpuLimit().isPresent()) {
        // This will never throw an exception if the validateManagerSpec method succeeds
        checkState(getCpuQuotaPeriodMillis().isPresent(), "Must specify hard CPU limit in addition to soft limit");
        Duration limit;
        if (match.getHardCpuLimit().isPresent()) {
            limit = match.getHardCpuLimit().get();
        } else {
            limit = match.getSoftCpuLimit().get();
        }
        long rate = (long) Math.min(1000.0 * limit.toMillis() / (double) getCpuQuotaPeriodMillis().get().toMillis(), Long.MAX_VALUE);
        rate = Math.max(1, rate);
        group.setCpuQuotaGenerationMillisPerSecond(rate);
    }
    if (match.getSoftCpuLimit().isPresent()) {
        group.setSoftCpuLimit(match.getSoftCpuLimit().get());
    }
    if (match.getHardCpuLimit().isPresent()) {
        group.setHardCpuLimit(match.getHardCpuLimit().get());
    }
}
Also used : DataSize(io.airlift.units.DataSize) Duration(io.airlift.units.Duration)

Aggregations

DataSize (io.airlift.units.DataSize)114 Test (org.testng.annotations.Test)71 Duration (io.airlift.units.Duration)36 Page (com.facebook.presto.spi.Page)23 PlanNodeId (com.facebook.presto.sql.planner.plan.PlanNodeId)19 RowPagesBuilder (com.facebook.presto.RowPagesBuilder)11 HashAggregationOperatorFactory (com.facebook.presto.operator.HashAggregationOperator.HashAggregationOperatorFactory)11 URI (java.net.URI)11 MockQueryExecution (com.facebook.presto.execution.MockQueryExecution)10 RootInternalResourceGroup (com.facebook.presto.execution.resourceGroups.InternalResourceGroup.RootInternalResourceGroup)10 TestingHttpClient (io.airlift.http.client.testing.TestingHttpClient)10 Type (com.facebook.presto.spi.type.Type)9 MaterializedResult (com.facebook.presto.testing.MaterializedResult)9 MemoryPoolId (com.facebook.presto.spi.memory.MemoryPoolId)7 QueryId (com.facebook.presto.spi.QueryId)6 BufferResult (com.facebook.presto.execution.buffer.BufferResult)5 MetadataManager (com.facebook.presto.metadata.MetadataManager)5 TopNOperatorFactory (com.facebook.presto.operator.TopNOperator.TopNOperatorFactory)5 ImmutableMap (com.google.common.collect.ImmutableMap)5 ArrayList (java.util.ArrayList)5