use of java.util.OptionalLong in project presto by prestodb.
the class ShardCompactionManager method filterAndCreateCompactionSets.
private Collection<OrganizationSet> filterAndCreateCompactionSets(long tableId, Collection<ShardMetadata> tableShards) {
Table tableInfo = metadataDao.getTableInformation(tableId);
OptionalLong temporalColumnId = tableInfo.getTemporalColumnId();
if (temporalColumnId.isPresent()) {
TableColumn tableColumn = metadataDao.getTableColumn(tableId, temporalColumnId.getAsLong());
if (!isValidTemporalColumn(tableId, tableColumn.getDataType())) {
return ImmutableSet.of();
}
}
Set<ShardMetadata> filteredShards = tableShards.stream().filter(this::needsCompaction).filter(shard -> !organizer.inProgress(shard.getShardUuid())).collect(toSet());
Collection<ShardIndexInfo> shardIndexInfos = getOrganizationEligibleShards(dbi, metadataDao, tableInfo, filteredShards, false);
if (tableInfo.getTemporalColumnId().isPresent()) {
Set<ShardIndexInfo> temporalShards = shardIndexInfos.stream().filter(shard -> shard.getTemporalRange().isPresent()).collect(toSet());
return compactionSetCreator.createCompactionSets(tableInfo, temporalShards);
}
return compactionSetCreator.createCompactionSets(tableInfo, shardIndexInfos);
}
use of java.util.OptionalLong in project presto by prestodb.
the class TestShardDao method testNodeShards.
@Test
public void testNodeShards() throws Exception {
assertEquals(dao.getAllNodesInUse(), ImmutableSet.of());
String nodeName1 = UUID.randomUUID().toString();
int nodeId1 = dao.insertNode(nodeName1);
String nodeName2 = UUID.randomUUID().toString();
int nodeId2 = dao.insertNode(nodeName2);
assertEquals(dao.getAllNodesInUse(), ImmutableSet.of(nodeName1, nodeName2));
UUID shardUuid1 = UUID.randomUUID();
UUID shardUuid2 = UUID.randomUUID();
UUID shardUuid3 = UUID.randomUUID();
UUID shardUuid4 = UUID.randomUUID();
UUID shardUuid5 = UUID.randomUUID();
MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);
int bucketCount = 20;
long distributionId = metadataDao.insertDistribution("test", "bigint", bucketCount);
for (int i = 0; i < bucketCount; i++) {
Integer nodeId = ((i % 2) == 0) ? nodeId1 : nodeId2;
dao.insertBuckets(distributionId, ImmutableList.of(i), ImmutableList.of(nodeId));
}
long plainTableId = metadataDao.insertTable("test", "plain", false, false, null, 0);
long bucketedTableId = metadataDao.insertTable("test", "bucketed", false, false, distributionId, 0);
long shardId1 = dao.insertShard(shardUuid1, plainTableId, null, 1, 11, 111);
long shardId2 = dao.insertShard(shardUuid2, plainTableId, null, 2, 22, 222);
long shardId3 = dao.insertShard(shardUuid3, bucketedTableId, 8, 3, 33, 333);
long shardId4 = dao.insertShard(shardUuid4, bucketedTableId, 9, 4, 44, 444);
long shardId5 = dao.insertShard(shardUuid5, bucketedTableId, 7, 5, 55, 555);
OptionalInt noBucket = OptionalInt.empty();
OptionalLong noRange = OptionalLong.empty();
ShardMetadata shard1 = new ShardMetadata(plainTableId, shardId1, shardUuid1, noBucket, 1, 11, 111, noRange, noRange);
ShardMetadata shard2 = new ShardMetadata(plainTableId, shardId2, shardUuid2, noBucket, 2, 22, 222, noRange, noRange);
ShardMetadata shard3 = new ShardMetadata(bucketedTableId, shardId3, shardUuid3, OptionalInt.of(8), 3, 33, 333, noRange, noRange);
ShardMetadata shard4 = new ShardMetadata(bucketedTableId, shardId4, shardUuid4, OptionalInt.of(9), 4, 44, 444, noRange, noRange);
ShardMetadata shard5 = new ShardMetadata(bucketedTableId, shardId5, shardUuid5, OptionalInt.of(7), 5, 55, 555, noRange, noRange);
assertEquals(dao.getShards(plainTableId), ImmutableList.of(shardUuid1, shardUuid2));
assertEquals(dao.getShards(bucketedTableId), ImmutableList.of(shardUuid3, shardUuid4, shardUuid5));
assertEquals(dao.getNodeShards(nodeName1, null), ImmutableSet.of(shard3));
assertEquals(dao.getNodeShards(nodeName2, null), ImmutableSet.of(shard4, shard5));
assertEquals(dao.getNodeSizes(), ImmutableSet.of(new NodeSize(nodeName1, 33), new NodeSize(nodeName2, 44 + 55)));
dao.insertShardNode(shardId1, nodeId1);
dao.insertShardNode(shardId2, nodeId1);
dao.insertShardNode(shardId1, nodeId2);
assertEquals(dao.getNodeShards(nodeName1, null), ImmutableSet.of(shard1, shard2, shard3));
assertEquals(dao.getNodeShards(nodeName2, null), ImmutableSet.of(shard1, shard4, shard5));
assertEquals(dao.getNodeSizes(), ImmutableSet.of(new NodeSize(nodeName1, 11 + 22 + 33), new NodeSize(nodeName2, 11 + 44 + 55)));
dao.dropShardNodes(plainTableId);
assertEquals(dao.getNodeShards(nodeName1, null), ImmutableSet.of(shard3));
assertEquals(dao.getNodeShards(nodeName2, null), ImmutableSet.of(shard4, shard5));
assertEquals(dao.getNodeSizes(), ImmutableSet.of(new NodeSize(nodeName1, 33), new NodeSize(nodeName2, 44 + 55)));
dao.dropShards(plainTableId);
dao.dropShards(bucketedTableId);
assertEquals(dao.getShards(plainTableId), ImmutableList.of());
assertEquals(dao.getShards(bucketedTableId), ImmutableList.of());
assertEquals(dao.getNodeSizes(), ImmutableSet.of());
}
use of java.util.OptionalLong in project presto by prestodb.
the class MetadataDeleteOperator method getOutput.
@Override
public Page getOutput() {
if (finished) {
return null;
}
finished = true;
OptionalLong rowsDeletedCount = metadata.metadataDelete(session, tableHandle, tableLayout);
PageBuilder page = new PageBuilder(TYPES);
BlockBuilder rowsBuilder = page.getBlockBuilder(0);
page.declarePosition();
if (rowsDeletedCount.isPresent()) {
BIGINT.writeLong(rowsBuilder, rowsDeletedCount.getAsLong());
} else {
rowsBuilder.appendNull();
}
return page.build();
}
use of java.util.OptionalLong in project camel by apache.
the class TelegramConsumer method updateOffset.
private void updateOffset(List<Update> updates) {
OptionalLong ol = updates.stream().mapToLong(Update::getUpdateId).max();
if (ol.isPresent()) {
this.offset = ol.getAsLong() + 1;
log.debug("Next Telegram offset will be {}", this.offset);
}
}
use of java.util.OptionalLong in project torodb by torodb.
the class FindImplementation method apply.
@Override
public Status<FindResult> apply(Request req, Command<? super FindArgument, ? super FindResult> command, FindArgument arg, MongodTransaction context) {
logFindCommand(arg);
BsonDocument filter = arg.getFilter();
Cursor<BsonDocument> cursor;
switch(filter.size()) {
case 0:
{
cursor = context.getTorodTransaction().findAll(req.getDatabase(), arg.getCollection()).asDocCursor().transform(t -> t.getRoot()).transform(ToBsonDocumentTranslator.getInstance());
break;
}
case 1:
{
try {
cursor = getByAttributeCursor(context.getTorodTransaction(), req.getDatabase(), arg.getCollection(), filter).transform(ToBsonDocumentTranslator.getInstance());
} catch (CommandFailed ex) {
return Status.from(ex);
}
break;
}
default:
{
return Status.from(ErrorCode.COMMAND_FAILED, "The given query is not supported right now");
}
}
if (Long.valueOf(arg.getBatchSize()) > (long) Integer.MAX_VALUE) {
return Status.from(ErrorCode.COMMAND_FAILED, "Only batchSize equals or lower than " + Integer.MAX_VALUE + " is supported");
}
OptionalLong batchSize = arg.getEffectiveBatchSize();
List<BsonDocument> batch = cursor.getNextBatch(batchSize.isPresent() ? (int) batchSize.getAsLong() : 101);
cursor.close();
return Status.ok(new FindResult(CursorResult.createSingleBatchCursor(req.getDatabase(), arg.getCollection(), batch.iterator())));
}
Aggregations