use of io.trino.spi.NodeManager in project trino by trinodb.
the class TestRaptorMetadata method setupDatabase.
@BeforeMethod
public void setupDatabase() {
dbi = createTestingJdbi();
dummyHandle = dbi.open();
createTablesWithRetry(dbi);
NodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
shardManager = createShardManager(dbi, nodeSupplier, systemTicker());
metadata = new RaptorMetadata(dbi, shardManager);
}
use of io.trino.spi.NodeManager in project trino by trinodb.
the class TestShardEjector method testEjector.
@Test(invocationCount = 20)
public void testEjector() throws Exception {
NodeManager nodeManager = createNodeManager("node1", "node2", "node3", "node4", "node5");
ShardEjector ejector = new ShardEjector(nodeManager.getCurrentNode().getNodeIdentifier(), nodeManager::getWorkerNodes, shardManager, storageService, new Duration(1, HOURS), Optional.of(new TestingBackupStore()), "test");
List<ShardInfo> shards = ImmutableList.<ShardInfo>builder().add(shardInfo("node1", 14)).add(shardInfo("node1", 13)).add(shardInfo("node1", 12)).add(shardInfo("node1", 11)).add(shardInfo("node1", 10)).add(shardInfo("node1", 10)).add(shardInfo("node1", 10)).add(shardInfo("node1", 10)).add(shardInfo("node2", 5)).add(shardInfo("node2", 5)).add(shardInfo("node3", 10)).add(shardInfo("node4", 10)).add(shardInfo("node5", 10)).add(shardInfo("node6", 200)).build();
long tableId = createTable("test");
List<ColumnInfo> columns = ImmutableList.of(new ColumnInfo(1, BIGINT));
shardManager.createTable(tableId, columns, false, OptionalLong.empty());
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, columns, shards, Optional.empty(), 0);
for (ShardInfo shard : shards.subList(0, 8)) {
File file = storageService.getStorageFile(shard.getShardUuid());
storageService.createParents(file);
assertTrue(file.createNewFile());
}
ejector.process();
shardManager.getShardNodes(tableId, TupleDomain.all());
Set<UUID> ejectedShards = shards.subList(0, 4).stream().map(ShardInfo::getShardUuid).collect(toSet());
Set<UUID> keptShards = shards.subList(4, 8).stream().map(ShardInfo::getShardUuid).collect(toSet());
Set<UUID> remaining = uuids(shardManager.getNodeShards("node1"));
for (UUID uuid : ejectedShards) {
assertFalse(remaining.contains(uuid));
assertFalse(storageService.getStorageFile(uuid).exists());
}
assertEquals(remaining, keptShards);
for (UUID uuid : keptShards) {
assertTrue(storageService.getStorageFile(uuid).exists());
}
Set<UUID> others = ImmutableSet.<UUID>builder().addAll(uuids(shardManager.getNodeShards("node2"))).addAll(uuids(shardManager.getNodeShards("node3"))).addAll(uuids(shardManager.getNodeShards("node4"))).addAll(uuids(shardManager.getNodeShards("node5"))).build();
assertTrue(others.containsAll(ejectedShards));
}
use of io.trino.spi.NodeManager in project trino by trinodb.
the class TestRaptorConnector method setup.
@BeforeMethod
public void setup() {
Jdbi dbi = createTestingJdbi();
dummyHandle = dbi.open();
metadataDao = dbi.onDemand(MetadataDao.class);
createTablesWithRetry(dbi);
dataDir = Files.createTempDir();
CatalogName connectorId = new CatalogName("test");
NodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
ShardManager shardManager = createShardManager(dbi);
StorageManager storageManager = createRaptorStorageManager(dbi, dataDir);
StorageManagerConfig config = new StorageManagerConfig();
connector = new RaptorConnector(new LifeCycleManager(ImmutableList.of(), null), new TestingNodeManager(), new RaptorMetadataFactory(dbi, shardManager), new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false), new RaptorPageSourceProvider(storageManager), new RaptorPageSinkProvider(storageManager, new PagesIndexPageSorter(new PagesIndex.TestingFactory(false)), config), new RaptorNodePartitioningProvider(nodeSupplier), new RaptorSessionProperties(config), new RaptorTableProperties(TESTING_TYPE_MANAGER), ImmutableSet.of(), Optional.empty(), dbi);
}
use of io.trino.spi.NodeManager in project trino by trinodb.
the class PinotModule method configure.
@Override
public void configure(Binder binder) {
configBinder(binder).bindConfig(PinotConfig.class);
binder.bind(PinotConnector.class).in(Scopes.SINGLETON);
binder.bind(PinotMetadata.class).in(Scopes.SINGLETON);
binder.bind(PinotSplitManager.class).in(Scopes.SINGLETON);
binder.bind(PinotPageSourceProvider.class).in(Scopes.SINGLETON);
binder.bind(PinotClient.class).in(Scopes.SINGLETON);
binder.bind(PinotQueryClient.class).in(Scopes.SINGLETON);
binder.bind(Executor.class).annotatedWith(ForPinot.class).toInstance(newCachedThreadPool(threadsNamed("pinot-metadata-fetcher-" + catalogName)));
binder.bind(PinotSessionProperties.class).in(Scopes.SINGLETON);
binder.bind(PinotNodePartitioningProvider.class).in(Scopes.SINGLETON);
httpClientBinder(binder).bindHttpClient("pinot", ForPinot.class).withConfigDefaults(cfg -> {
cfg.setIdleTimeout(new Duration(300, SECONDS));
cfg.setConnectTimeout(new Duration(300, SECONDS));
cfg.setRequestTimeout(new Duration(300, SECONDS));
cfg.setMaxConnectionsPerServer(250);
cfg.setMaxContentLength(DataSize.of(32, MEGABYTE));
cfg.setSelectorCount(10);
cfg.setTimeoutThreads(8);
cfg.setTimeoutConcurrency(4);
});
jsonBinder(binder).addDeserializerBinding(DataSchema.class).to(DataSchemaDeserializer.class);
PinotClient.addJsonBinders(jsonCodecBinder(binder));
binder.bind(MBeanServer.class).toInstance(new RebindSafeMBeanServer(getPlatformMBeanServer()));
binder.bind(NodeManager.class).toInstance(nodeManager);
binder.bind(ConnectorNodePartitioningProvider.class).to(PinotNodePartitioningProvider.class).in(Scopes.SINGLETON);
newOptionalBinder(binder, PinotHostMapper.class).setDefault().to(IdentityPinotHostMapper.class).in(Scopes.SINGLETON);
}
use of io.trino.spi.NodeManager in project trino by trinodb.
the class IndexedTpchConnectorFactory method create.
@Override
public Connector create(String catalogName, Map<String, String> properties, ConnectorContext context) {
int splitsPerNode = getSplitsPerNode(properties);
TpchIndexedData indexedData = new TpchIndexedData(indexSpec);
NodeManager nodeManager = context.getNodeManager();
return new Connector() {
@Override
public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit) {
return TpchTransactionHandle.INSTANCE;
}
@Override
public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) {
return new TpchIndexMetadata(indexedData);
}
@Override
public ConnectorSplitManager getSplitManager() {
return new TpchSplitManager(nodeManager, splitsPerNode);
}
@Override
public ConnectorRecordSetProvider getRecordSetProvider() {
return new TpchRecordSetProvider(DecimalTypeMapping.DOUBLE);
}
@Override
public ConnectorIndexProvider getIndexProvider() {
return new TpchIndexProvider(indexedData);
}
@Override
public Set<SystemTable> getSystemTables() {
return ImmutableSet.of(new ExampleSystemTable());
}
@Override
public ConnectorNodePartitioningProvider getNodePartitioningProvider() {
return new TpchNodePartitioningProvider(nodeManager, splitsPerNode);
}
};
}
Aggregations