use of io.trino.spi.Node in project trino by trinodb.
the class AtopSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
AtopTableHandle tableHandle = (AtopTableHandle) table;
List<ConnectorSplit> splits = new ArrayList<>();
ZonedDateTime end = ZonedDateTime.now(timeZone);
for (Node node : nodeManager.getWorkerNodes()) {
ZonedDateTime start = end.minusDays(maxHistoryDays - 1).withHour(0).withMinute(0).withSecond(0).withNano(0);
while (start.isBefore(end)) {
ZonedDateTime splitEnd = start.withHour(23).withMinute(59).withSecond(59).withNano(0);
Domain splitDomain = Domain.create(ValueSet.ofRanges(Range.range(TIMESTAMP_TZ_MILLIS, packDateTimeWithZone(start.toInstant().toEpochMilli(), UTC_KEY), true, packDateTimeWithZone(splitEnd.toInstant().toEpochMilli(), UTC_KEY), true)), false);
if (tableHandle.getStartTimeConstraint().overlaps(splitDomain) && tableHandle.getEndTimeConstraint().overlaps(splitDomain)) {
splits.add(new AtopSplit(node.getHostAndPort(), start.toEpochSecond(), start.getZone().getId()));
}
start = start.plusDays(1).withHour(0).withMinute(0).withSecond(0).withNano(0);
}
}
return new FixedSplitSource(splits);
}
use of io.trino.spi.Node in project trino by trinodb.
the class TestShardEjector method createNodeManager.
private static NodeManager createNodeManager(String current, String... others) {
Node currentNode = createTestingNode(current);
TestingNodeManager nodeManager = new TestingNodeManager(currentNode);
for (String other : others) {
nodeManager.addNode(createTestingNode(other));
}
return nodeManager;
}
use of io.trino.spi.Node in project trino by trinodb.
the class TestDatabaseShardManager method testBucketAssignments.
@Test
public void testBucketAssignments() {
Node node1 = createTestingNode();
Node node2 = createTestingNode();
Node node3 = createTestingNode();
TestingTicker ticker = new TestingTicker();
MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);
int bucketCount = 13;
long distributionId = metadataDao.insertDistribution(null, "test", bucketCount);
Set<Node> originalNodes = ImmutableSet.of(node1, node2);
ShardManager shardManager = createShardManager(dbi, () -> originalNodes, ticker);
shardManager.createBuckets(distributionId, bucketCount);
List<String> assignments = shardManager.getBucketAssignments(distributionId);
assertEquals(assignments.size(), bucketCount);
assertEquals(ImmutableSet.copyOf(assignments), nodeIds(originalNodes));
Set<Node> newNodes = ImmutableSet.of(node1, node3);
shardManager = createShardManager(dbi, () -> newNodes, ticker);
ShardManager finalShardManager = shardManager;
assertTrinoExceptionThrownBy(() -> finalShardManager.getBucketAssignments(distributionId)).hasErrorCode(SERVER_STARTING_UP).hasMessage("Cannot reassign buckets while server is starting");
ticker.increment(2, DAYS);
assignments = shardManager.getBucketAssignments(distributionId);
assertEquals(assignments.size(), bucketCount);
assertEquals(ImmutableSet.copyOf(assignments), nodeIds(newNodes));
Set<Node> singleNode = ImmutableSet.of(node1);
shardManager = createShardManager(dbi, () -> singleNode, ticker);
ticker.increment(2, DAYS);
assignments = shardManager.getBucketAssignments(distributionId);
assertEquals(assignments.size(), bucketCount);
assertEquals(ImmutableSet.copyOf(assignments), nodeIds(singleNode));
}
use of io.trino.spi.Node in project trino by trinodb.
the class TestJmxSplitManager method testPredicatePushdown.
@Test
public void testPredicatePushdown() throws Exception {
for (Node node : nodes) {
String nodeIdentifier = node.getNodeIdentifier();
TupleDomain<ColumnHandle> nodeTupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle, NullableValue.of(createUnboundedVarcharType(), utf8Slice(nodeIdentifier))));
JmxTableHandle tableHandle = new JmxTableHandle(new SchemaTableName("schema", "tableName"), ImmutableList.of("objectName"), ImmutableList.of(columnHandle), true, nodeTupleDomain);
ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, UNGROUPED_SCHEDULING, DynamicFilter.EMPTY);
List<ConnectorSplit> allSplits = getAllSplits(splitSource);
assertEquals(allSplits.size(), 1);
assertEquals(allSplits.get(0).getAddresses().size(), 1);
assertEquals(allSplits.get(0).getAddresses().get(0).getHostText(), nodeIdentifier);
}
}
use of io.trino.spi.Node in project trino by trinodb.
the class TpcdsSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
checkState(!nodes.isEmpty(), "No TPCDS nodes available");
int totalParts = nodes.size() * splitsPerNode;
int partNumber = 0;
// Split the data using split and skew by the number of nodes available.
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (Node node : nodes) {
for (int i = 0; i < splitsPerNode; i++) {
splits.add(new TpcdsSplit(partNumber, totalParts, ImmutableList.of(node.getHostAndPort()), noSexism));
partNumber++;
}
}
return new FixedSplitSource(splits.build());
}
Aggregations