use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.
the class TestStateStoreLauncherAndProvider method testRegisterDiscoveryService.
@Test(timeOut = 5000, expectedExceptions = ThreadTimeoutException.class)
public void testRegisterDiscoveryService() throws Exception {
String failurehost = "failurehost";
String otherhost = "otherhost";
String localHostName = "localhost";
int port = 8888;
URI uri = new URI("http://" + localHostName + ":" + port);
MockStateMap discoveryServiceMap = new MockStateMap(DISCOVERY_SERVICE, new HashMap<>());
// Mock
StateStore stateStore = mock(StateStore.class);
Lock lock = mock(ReentrantLock.class);
InternalCommunicationConfig internalCommunicationConfig = mock(InternalCommunicationConfig.class);
HttpServerInfo httpServerInfo = mock(HttpServerInfo.class);
when(httpServerInfo.getHttpUri()).thenReturn(uri);
when(internalCommunicationConfig.isHttpsRequired()).thenReturn(false);
when(stateStore.getStateCollection(DISCOVERY_SERVICE)).thenReturn(discoveryServiceMap);
when(stateStore.getLock(DISCOVERY_SERVICE_LOCK)).thenReturn(lock);
EmbeddedStateStoreLauncher launcher = new EmbeddedStateStoreLauncher(new SeedStoreManager(new FileSystemClientManager()), internalCommunicationConfig, httpServerInfo, new HetuConfig());
launcher.setStateStore(stateStore);
when(lock.tryLock(DISCOVERY_REGISTRY_LOCK_TIMEOUT, TimeUnit.MILLISECONDS)).thenReturn(true);
// discoveryServiceMap is empty, so the current coordinator can get the lock and register itself(register=true)
discoveryServiceMap.clear();
assertTrue(launcher.registerDiscoveryService(failurehost));
assertEquals(discoveryServiceMap.size(), 1);
assertTrue(discoveryServiceMap.getAll().keySet().contains(localHostName));
// discoveryServiceMap contains the failure host, so the current coordinator can get the lock and register itself(register=true)
discoveryServiceMap.clear();
discoveryServiceMap.put(failurehost, String.valueOf(port));
assertTrue(launcher.registerDiscoveryService(failurehost));
assertEquals(discoveryServiceMap.size(), 1);
assertTrue(discoveryServiceMap.getAll().keySet().contains(localHostName));
// discoveryServiceMap is already updated by other coordinator(otherhosts)
// the current coordinator can grab the lock but will not register itself(register=false)
discoveryServiceMap.clear();
discoveryServiceMap.put(otherhost, String.valueOf(port));
assertFalse(launcher.registerDiscoveryService(failurehost));
assertEquals(discoveryServiceMap.size(), 1);
assertFalse(discoveryServiceMap.containsKey(localHostName));
when(lock.tryLock(DISCOVERY_REGISTRY_LOCK_TIMEOUT, TimeUnit.MILLISECONDS)).thenReturn(false);
// discoveryServiceMap is already updated by other coordinator(otherhosts)
// the current coordinator cannot grab the lock and not register itself
discoveryServiceMap.clear();
discoveryServiceMap.put(otherhost, String.valueOf(port));
assertFalse(launcher.registerDiscoveryService(failurehost));
assertEquals(discoveryServiceMap.size(), 1);
assertFalse(discoveryServiceMap.containsKey(localHostName));
// discoveryServiceMap contains failure host.
// The current coordinator cannot get the lock and retry will cause timeout exception
discoveryServiceMap.clear();
discoveryServiceMap.put(failurehost, String.valueOf(port));
launcher.registerDiscoveryService(failurehost);
}
use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.
the class TestStateStoreLauncherAndProvider method setUp.
// setup for test provider
@BeforeTest
private void setUp() throws IOException {
Set<Seed> seeds = new HashSet<>();
SeedStore mockSeedStore = mock(SeedStore.class);
Seed mockSeed = mock(Seed.class);
seeds.add(mockSeed);
SeedStoreManager mockSeedStoreManager = mock(SeedStoreManager.class);
when(mockSeedStoreManager.getSeedStore(SeedStoreSubType.HAZELCAST)).thenReturn(mockSeedStore);
when(mockSeed.getLocation()).thenReturn(LOCALHOST + ":" + PORT3);
when(mockSeedStore.get()).thenReturn(seeds);
factory = new HazelcastStateStoreFactory();
stateStoreProvider = new LocalStateStoreProvider(mockSeedStoreManager);
stateStoreProvider.addStateStoreFactory(factory);
}
use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.
the class TestDynamicFilterSourceOperator method prepareConfigFiles.
@BeforeTest
private void prepareConfigFiles() throws Exception {
File launcherConfigFile = new File(STATE_STORE_CONFIGURATION_PATH);
if (launcherConfigFile.exists()) {
launcherConfigFile.delete();
}
launcherConfigFile.createNewFile();
FileWriter configWriter = new FileWriter(STATE_STORE_CONFIGURATION_PATH);
configWriter.write("state-store.type=hazelcast\n" + "state-store.name=test\n" + "state-store.cluster=test-cluster\n" + "hazelcast.discovery.mode=tcp-ip\n" + "hazelcast.discovery.port=7980\n");
configWriter.close();
Set<Seed> seeds = new HashSet<>();
SeedStore mockSeedStore = mock(SeedStore.class);
Seed mockSeed = mock(Seed.class);
seeds.add(mockSeed);
SeedStoreManager mockSeedStoreManager = mock(SeedStoreManager.class);
when(mockSeedStoreManager.getSeedStore(SeedStoreSubType.HAZELCAST)).thenReturn(mockSeedStore);
when(mockSeed.getLocation()).thenReturn("127.0.0.1:6991");
when(mockSeedStore.get()).thenReturn(seeds);
StateStoreFactory factory = new HazelcastStateStoreFactory();
stateStoreProvider = new LocalStateStoreProvider(mockSeedStoreManager);
stateStoreProvider.addStateStoreFactory(factory);
createStateStoreCluster("6991");
stateStoreProvider.loadStateStore();
}
use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.
the class TaskTestUtils method createTestingPlanner.
public static LocalExecutionPlanner createTestingPlanner() {
Metadata metadata = createTestMetadataManager();
PageSourceManager pageSourceManager = new PageSourceManager();
HetuMetaStoreManager hetuMetaStoreManager = new HetuMetaStoreManager();
FeaturesConfig featuresConfig = new FeaturesConfig();
CubeManager cubeManager = new CubeManager(featuresConfig, hetuMetaStoreManager);
pageSourceManager.addConnectorPageSourceProvider(CONNECTOR_ID, new TestingPageSourceProvider());
// we don't start the finalizer so nothing will be collected, which is ok for a test
FinalizerService finalizerService = new FinalizerService();
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(finalizerService));
NodePartitioningManager nodePartitioningManager = new NodePartitioningManager(nodeScheduler);
PageFunctionCompiler pageFunctionCompiler = new PageFunctionCompiler(metadata, 0);
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
HeuristicIndexerManager heuristicIndexerManager = new HeuristicIndexerManager(new FileSystemClientManager(), new HetuMetaStoreManager());
return new LocalExecutionPlanner(metadata, new TypeAnalyzer(new SqlParser(), metadata), Optional.empty(), pageSourceManager, new IndexManager(), nodePartitioningManager, new PageSinkManager(), new MockExchangeClientSupplier(), new ExpressionCompiler(metadata, pageFunctionCompiler), pageFunctionCompiler, new JoinFilterFunctionCompiler(metadata), new IndexJoinLookupStats(), new TaskManagerConfig(), new GenericSpillerFactory((types, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}), (types, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}, (types, partitionFunction, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}, new PagesIndex.TestingFactory(false), new JoinCompiler(metadata), new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
}
use of io.prestosql.seedstore.SeedStoreManager in project boostkit-bigdata by kunpengcompute.
the class OmniLocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), metadata, plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = planFragmenter.createSubPlans(session, plan, true, WarningCollector.NOOP);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(metadata, new TypeAnalyzer(sqlParser, metadata), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), null, outputFactory, Optional.empty(), Optional.empty(), null);
// generate sources
List<TaskSource> sources = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, null, Optional.empty(), Collections.emptyMap(), ImmutableSet.of(), tableScan.getStrategy() != ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, tableScan.getId());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
sources.add(new TaskSource(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add sources to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (TaskSource source : sources) {
DriverFactory driverFactory = driverFactoriesBySource.get(source.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : source.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSource(new TaskSource(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
Aggregations