use of io.airlift.units.Duration in project presto by prestodb.
the class TestShardCleaner method testCleanLocalShards.
@Test
public void testCleanLocalShards() throws Exception {
assertEquals(cleaner.getLocalShardsCleaned().getTotalCount(), 0);
TestingShardDao shardDao = dbi.onDemand(TestingShardDao.class);
MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);
long tableId = metadataDao.insertTable("test", "test", false, false, null, 0);
UUID shard1 = randomUUID();
UUID shard2 = randomUUID();
UUID shard3 = randomUUID();
UUID shard4 = randomUUID();
Set<UUID> shards = ImmutableSet.of(shard1, shard2, shard3, shard4);
for (UUID shard : shards) {
shardDao.insertShard(shard, tableId, null, 0, 0, 0);
createShardFile(shard);
assertTrue(shardFileExists(shard));
}
int node1 = shardDao.insertNode("node1");
int node2 = shardDao.insertNode("node2");
// shard 1: referenced by this node
// shard 2: not referenced
// shard 3: not referenced
// shard 4: referenced by other node
shardDao.insertShardNode(shard1, node1);
shardDao.insertShardNode(shard4, node2);
// mark unreferenced shards
cleaner.cleanLocalShards();
assertEquals(cleaner.getLocalShardsCleaned().getTotalCount(), 0);
// make sure nothing is deleted
for (UUID shard : shards) {
assertTrue(shardFileExists(shard));
}
// add reference for shard 3
shardDao.insertShardNode(shard3, node1);
// advance time beyond clean time
Duration cleanTime = new ShardCleanerConfig().getLocalCleanTime();
ticker.increment(cleanTime.toMillis() + 1, MILLISECONDS);
// clean shards
cleaner.cleanLocalShards();
assertEquals(cleaner.getLocalShardsCleaned().getTotalCount(), 2);
// shards 2 and 4 should be deleted
// shards 1 and 3 are referenced by this node
assertTrue(shardFileExists(shard1));
assertFalse(shardFileExists(shard2));
assertTrue(shardFileExists(shard3));
assertFalse(shardFileExists(shard4));
}
use of io.airlift.units.Duration in project presto by prestodb.
the class TestShardCleanerConfig method testExplicitPropertyMappings.
@Test
public void testExplicitPropertyMappings() {
Map<String, String> properties = new ImmutableMap.Builder<String, String>().put("raptor.max-transaction-age", "42m").put("raptor.transaction-cleaner-interval", "43m").put("raptor.local-cleaner-interval", "31m").put("raptor.local-clean-time", "32m").put("raptor.backup-cleaner-interval", "34m").put("raptor.backup-clean-time", "35m").put("raptor.backup-deletion-threads", "37").put("raptor.max-completed-transaction-age", "39m").build();
ShardCleanerConfig expected = new ShardCleanerConfig().setMaxTransactionAge(new Duration(42, MINUTES)).setTransactionCleanerInterval(new Duration(43, MINUTES)).setLocalCleanerInterval(new Duration(31, MINUTES)).setLocalCleanTime(new Duration(32, MINUTES)).setBackupCleanerInterval(new Duration(34, MINUTES)).setBackupCleanTime(new Duration(35, MINUTES)).setBackupDeletionThreads(37).setMaxCompletedTransactionAge(new Duration(39, MINUTES));
assertFullMapping(properties, expected);
}
use of io.airlift.units.Duration in project presto by prestodb.
the class TestRaptorSplitManager method setup.
@BeforeMethod
public void setup() throws Exception {
TypeRegistry typeRegistry = new TypeRegistry();
DBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime());
dbi.registerMapper(new TableColumn.Mapper(typeRegistry));
dummyHandle = dbi.open();
createTablesWithRetry(dbi);
temporary = createTempDir();
AssignmentLimiter assignmentLimiter = new AssignmentLimiter(ImmutableSet::of, systemTicker(), new MetadataConfig());
shardManager = new DatabaseShardManager(dbi, new DaoSupplier<>(dbi, ShardDao.class), ImmutableSet::of, assignmentLimiter, systemTicker(), new Duration(0, MINUTES));
TestingNodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
String nodeName = UUID.randomUUID().toString();
nodeManager.addNode(new PrestoNode(nodeName, new URI("http://127.0.0.1/"), NodeVersion.UNKNOWN, false));
RaptorConnectorId connectorId = new RaptorConnectorId("raptor");
metadata = new RaptorMetadata(connectorId.toString(), dbi, shardManager);
metadata.createTable(SESSION, TEST_TABLE);
tableHandle = metadata.getTableHandle(SESSION, TEST_TABLE.getTable());
List<ShardInfo> shards = ImmutableList.<ShardInfo>builder().add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).build();
tableId = ((RaptorTableHandle) tableHandle).getTableId();
List<ColumnInfo> columns = metadata.getColumnHandles(SESSION, tableHandle).values().stream().map(RaptorColumnHandle.class::cast).map(ColumnInfo::fromHandle).collect(toList());
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, columns, shards, Optional.empty(), 0);
raptorSplitManager = new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false);
}
use of io.airlift.units.Duration in project presto by prestodb.
the class TestBackupConfig method testExplicitPropertyMappings.
@Test
public void testExplicitPropertyMappings() {
Map<String, String> properties = new ImmutableMap.Builder<String, String>().put("backup.provider", "file").put("backup.timeout", "42s").put("backup.timeout-threads", "13").put("backup.threads", "3").build();
BackupConfig expected = new BackupConfig().setProvider("file").setTimeout(new Duration(42, SECONDS)).setTimeoutThreads(13).setBackupThreads(3);
assertFullMapping(properties, expected);
}
use of io.airlift.units.Duration in project presto by prestodb.
the class AbstractResourceConfigurationManager method configureGroup.
protected void configureGroup(ResourceGroup group, ResourceGroupSpec match) {
if (match.getSoftMemoryLimit().isPresent()) {
group.setSoftMemoryLimit(match.getSoftMemoryLimit().get());
} else {
synchronized (generalPoolMemoryFraction) {
double fraction = match.getSoftMemoryLimitFraction().get();
generalPoolMemoryFraction.put(group, fraction);
group.setSoftMemoryLimit(new DataSize(generalPoolBytes * fraction, BYTE));
}
}
group.setMaxQueuedQueries(match.getMaxQueued());
group.setMaxRunningQueries(match.getMaxRunning());
if (match.getSchedulingPolicy().isPresent()) {
group.setSchedulingPolicy(match.getSchedulingPolicy().get());
}
if (match.getSchedulingWeight().isPresent()) {
group.setSchedulingWeight(match.getSchedulingWeight().get());
}
if (match.getJmxExport().isPresent()) {
group.setJmxExport(match.getJmxExport().get());
}
if (match.getSoftCpuLimit().isPresent() || match.getHardCpuLimit().isPresent()) {
// This will never throw an exception if the validateManagerSpec method succeeds
checkState(getCpuQuotaPeriodMillis().isPresent(), "Must specify hard CPU limit in addition to soft limit");
Duration limit;
if (match.getHardCpuLimit().isPresent()) {
limit = match.getHardCpuLimit().get();
} else {
limit = match.getSoftCpuLimit().get();
}
long rate = (long) Math.min(1000.0 * limit.toMillis() / (double) getCpuQuotaPeriodMillis().get().toMillis(), Long.MAX_VALUE);
rate = Math.max(1, rate);
group.setCpuQuotaGenerationMillisPerSecond(rate);
}
if (match.getSoftCpuLimit().isPresent()) {
group.setSoftCpuLimit(match.getSoftCpuLimit().get());
}
if (match.getHardCpuLimit().isPresent()) {
group.setHardCpuLimit(match.getHardCpuLimit().get());
}
}
Aggregations