use of org.apache.curator.test.TestingCluster in project druid by druid-io.
the class BatchServerInventoryViewTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(TEST_BASE_PATH);
jsonMapper = TestHelper.makeJsonMapper();
announcer = new Announcer(cf, Execs.directExecutor());
announcer.start();
DruidServerMetadata serverMetadata = new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0);
ZkPathsConfig zkPathsConfig = new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
};
serverAnnouncer = new CuratorDataSegmentServerAnnouncer(serverMetadata, zkPathsConfig, announcer, jsonMapper);
serverAnnouncer.announce();
segmentAnnouncer = new BatchDataSegmentAnnouncer(serverMetadata, new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, zkPathsConfig, announcer, jsonMapper);
testSegments = Sets.newConcurrentHashSet();
for (int i = 0; i < INITIAL_SEGMENTS; i++) {
testSegments.add(makeSegment(i));
}
batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, cf, jsonMapper, Predicates.alwaysTrue(), "test");
batchServerInventoryView.start();
inventoryUpdateCounter.set(0);
filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
}
}, "test") {
@Override
protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
inventoryUpdateCounter.incrementAndGet();
return server;
}
};
filteredBatchServerInventoryView.start();
}
use of org.apache.curator.test.TestingCluster in project druid by druid-io.
the class RemoteTaskRunnerTestUtils method setUp.
void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
cf.create().creatingParentsIfNeeded().forPath(TASKS_PATH);
}
use of org.apache.curator.test.TestingCluster in project druid by druid-io.
the class WorkerResourceTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
worker = new Worker("http", "host", "ip", 3, "v1", WorkerConfig.DEFAULT_CATEGORY);
curatorCoordinator = new WorkerCuratorCoordinator(JSON_MAPPER, new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return BASE_PATH;
}
}, null, null, null, null), new RemoteTaskRunnerConfig(), cf, worker);
curatorCoordinator.start();
workerResource = new WorkerResource(worker, () -> curatorCoordinator, null, EasyMock.createNiceMock(WorkerTaskMonitor.class), ZkEnablementConfig.ENABLED);
}
use of org.apache.curator.test.TestingCluster in project helios by spotify.
the class ZooKeeperTestingClusterManager method start0.
private void start0() {
zkPeers = createPeers(3);
zkAddresses = allocateAddresses(zkPeers);
peerCurators = createCurators(zkAddresses);
System.setProperty("zookeeper.jmx.log4j.disable", "true");
cluster = new TestingCluster(zkPeers);
zkServers = cluster.getServers();
try {
cluster.start();
} catch (Exception e) {
stop();
Throwables.throwIfUnchecked(e);
throw new RuntimeException(e);
}
}
use of org.apache.curator.test.TestingCluster in project druid by druid-io.
the class TestKafkaExtractionCluster method setUp.
@Before
public void setUp() throws Exception {
zkServer = new TestingCluster(1);
zkServer.start();
closer.register(() -> {
zkServer.stop();
});
kafkaServer = new KafkaServer(getBrokerProperties(), Time.SYSTEM, Some.apply(StringUtils.format("TestingBroker[%d]-", 1)), false);
kafkaServer.startup();
closer.register(() -> {
kafkaServer.shutdown();
kafkaServer.awaitShutdown();
});
log.info("---------------------------Started Kafka Broker ---------------------------");
log.info("---------------------------Publish Messages to topic-----------------------");
publishRecordsToKafka();
System.setProperty("druid.extensions.searchCurrentClassloader", "false");
injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.of(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0);
binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1);
}
}, // These injections fail under IntelliJ but are required for maven
new NamespaceExtractionModule(), new KafkaExtractionNamespaceModule()));
mapper = injector.getInstance(ObjectMapper.class);
log.info("--------------------------- placed default item via producer ---------------------------");
final Map<String, String> consumerProperties = getConsumerProperties();
final KafkaLookupExtractorFactory kafkaLookupExtractorFactory = new KafkaLookupExtractorFactory(null, TOPIC_NAME, consumerProperties);
factory = (KafkaLookupExtractorFactory) mapper.readValue(mapper.writeValueAsString(kafkaLookupExtractorFactory), LookupExtractorFactory.class);
Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaTopic(), factory.getKafkaTopic());
Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaProperties(), factory.getKafkaProperties());
factory.start();
closer.register(() -> factory.close());
log.info("--------------------------- started rename manager ---------------------------");
}
Aggregations