use of io.airlift.units.DataSize in project presto by prestodb.
the class OrcTestingUtil method createReader.
public static OrcRecordReader createReader(OrcDataSource dataSource, List<Long> columnIds, List<Type> types) throws IOException {
OrcReader orcReader = new OrcReader(dataSource, new OrcMetadataReader(), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE));
List<String> columnNames = orcReader.getColumnNames();
assertEquals(columnNames.size(), columnIds.size());
Map<Integer, Type> includedColumns = new HashMap<>();
int ordinal = 0;
for (long columnId : columnIds) {
assertEquals(columnNames.get(ordinal), String.valueOf(columnId));
includedColumns.put(ordinal, types.get(ordinal));
ordinal++;
}
return createRecordReader(orcReader, includedColumns);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class RcFileTester method createRcFileReader.
private static RcFileReader createRcFileReader(TempFile tempFile, Type type, RcFileEncoding encoding) throws IOException {
RcFileDataSource rcFileDataSource = new FileRcFileDataSource(tempFile.getFile());
RcFileReader rcFileReader = new RcFileReader(rcFileDataSource, encoding, ImmutableMap.of(0, type), new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())), 0, tempFile.getFile().length(), new DataSize(8, MEGABYTE));
assertEquals(rcFileReader.getColumnCount(), 1);
return rcFileReader;
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class AbstractResourceConfigurationManager method configureGroup.
protected void configureGroup(ResourceGroup group, ResourceGroupSpec match) {
if (match.getSoftMemoryLimit().isPresent()) {
group.setSoftMemoryLimit(match.getSoftMemoryLimit().get());
} else {
synchronized (generalPoolMemoryFraction) {
double fraction = match.getSoftMemoryLimitFraction().get();
generalPoolMemoryFraction.put(group, fraction);
group.setSoftMemoryLimit(new DataSize(generalPoolBytes * fraction, BYTE));
}
}
group.setMaxQueuedQueries(match.getMaxQueued());
group.setMaxRunningQueries(match.getMaxRunning());
if (match.getSchedulingPolicy().isPresent()) {
group.setSchedulingPolicy(match.getSchedulingPolicy().get());
}
if (match.getSchedulingWeight().isPresent()) {
group.setSchedulingWeight(match.getSchedulingWeight().get());
}
if (match.getJmxExport().isPresent()) {
group.setJmxExport(match.getJmxExport().get());
}
if (match.getSoftCpuLimit().isPresent() || match.getHardCpuLimit().isPresent()) {
// This will never throw an exception if the validateManagerSpec method succeeds
checkState(getCpuQuotaPeriodMillis().isPresent(), "Must specify hard CPU limit in addition to soft limit");
Duration limit;
if (match.getHardCpuLimit().isPresent()) {
limit = match.getHardCpuLimit().get();
} else {
limit = match.getSoftCpuLimit().get();
}
long rate = (long) Math.min(1000.0 * limit.toMillis() / (double) getCpuQuotaPeriodMillis().get().toMillis(), Long.MAX_VALUE);
rate = Math.max(1, rate);
group.setCpuQuotaGenerationMillisPerSecond(rate);
}
if (match.getSoftCpuLimit().isPresent()) {
group.setSoftCpuLimit(match.getSoftCpuLimit().get());
}
if (match.getHardCpuLimit().isPresent()) {
group.setHardCpuLimit(match.getHardCpuLimit().get());
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestRcFileReaderManual method readValues.
private static List<Integer> readValues(Slice data, int offset, int length) throws IOException {
if (offset < 0) {
// adjust length to new offset
length += offset;
offset = 0;
}
if (offset + length > data.length()) {
length = data.length() - offset;
}
RcFileReader reader = new RcFileReader(new SliceRcFileDataSource(data), new BinaryRcFileEncoding(), ImmutableMap.of(0, SMALLINT), new BogusRcFileCodecFactory(), offset, length, new DataSize(1, MEGABYTE));
ImmutableList.Builder<Integer> values = ImmutableList.builder();
while (reader.advance() >= 0) {
Block block = reader.readBlock(0);
for (int position = 0; position < block.getPositionCount(); position++) {
values.add((int) SMALLINT.getLong(block, position));
}
}
return values.build();
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class ClusterMemoryManager method updatePools.
private synchronized void updatePools(Map<MemoryPoolId, Integer> queryCounts) {
// Update view of cluster memory and pools
List<MemoryInfo> nodeMemoryInfos = nodes.values().stream().map(RemoteNodeMemory::getInfo).filter(Optional::isPresent).map(Optional::get).collect(toImmutableList());
long totalClusterMemory = nodeMemoryInfos.stream().map(MemoryInfo::getTotalNodeMemory).mapToLong(DataSize::toBytes).sum();
clusterMemoryBytes.set(totalClusterMemory);
Set<MemoryPoolId> activePoolIds = nodeMemoryInfos.stream().flatMap(info -> info.getPools().keySet().stream()).collect(toImmutableSet());
// Make a copy to materialize the set difference
Set<MemoryPoolId> removedPools = ImmutableSet.copyOf(difference(pools.keySet(), activePoolIds));
for (MemoryPoolId removed : removedPools) {
unexport(pools.get(removed));
pools.remove(removed);
if (changeListeners.containsKey(removed)) {
for (Consumer<MemoryPoolInfo> listener : changeListeners.get(removed)) {
listenerExecutor.execute(() -> listener.accept(new MemoryPoolInfo(0, 0, ImmutableMap.of())));
}
}
}
for (MemoryPoolId id : activePoolIds) {
ClusterMemoryPool pool = pools.computeIfAbsent(id, poolId -> {
ClusterMemoryPool newPool = new ClusterMemoryPool(poolId);
String objectName = ObjectNames.builder(ClusterMemoryPool.class, newPool.getId().toString()).build();
try {
exporter.export(objectName, newPool);
} catch (JmxException e) {
log.error(e, "Error exporting memory pool %s", poolId);
}
return newPool;
});
pool.update(nodeMemoryInfos, queryCounts.getOrDefault(pool.getId(), 0));
if (changeListeners.containsKey(id)) {
MemoryPoolInfo info = pool.getInfo();
for (Consumer<MemoryPoolInfo> listener : changeListeners.get(id)) {
listenerExecutor.execute(() -> listener.accept(info));
}
}
}
}
Aggregations