use of org.apache.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class BatchServerInventoryViewTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(TEST_BASE_PATH);
jsonMapper = TestHelper.makeJsonMapper();
announcer = new Announcer(cf, Execs.directExecutor());
announcer.start();
DruidServerMetadata serverMetadata = new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0);
ZkPathsConfig zkPathsConfig = new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
};
serverAnnouncer = new CuratorDataSegmentServerAnnouncer(serverMetadata, zkPathsConfig, announcer, jsonMapper);
serverAnnouncer.announce();
segmentAnnouncer = new BatchDataSegmentAnnouncer(serverMetadata, new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, zkPathsConfig, announcer, jsonMapper);
testSegments = Sets.newConcurrentHashSet();
for (int i = 0; i < INITIAL_SEGMENTS; i++) {
testSegments.add(makeSegment(i));
}
batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, cf, jsonMapper, Predicates.alwaysTrue(), "test");
batchServerInventoryView.start();
inventoryUpdateCounter.set(0);
filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
}
}, "test") {
@Override
protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
inventoryUpdateCounter.incrementAndGet();
return server;
}
};
filteredBatchServerInventoryView.start();
}
use of org.apache.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class RemoteTaskRunnerTestUtils method setUp.
void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
cf.create().creatingParentsIfNeeded().forPath(TASKS_PATH);
}
use of org.apache.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class WorkerResourceTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
worker = new Worker("http", "host", "ip", 3, "v1", WorkerConfig.DEFAULT_CATEGORY);
curatorCoordinator = new WorkerCuratorCoordinator(JSON_MAPPER, new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return BASE_PATH;
}
}, null, null, null, null), new RemoteTaskRunnerConfig(), cf, worker);
curatorCoordinator.start();
workerResource = new WorkerResource(worker, () -> curatorCoordinator, null, EasyMock.createNiceMock(WorkerTaskMonitor.class), ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class OverlordTest method setupServerAndCurator.
private void setupServerAndCurator() throws Exception {
server = new TestingServer();
timing = new Timing();
curator = CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()).retryPolicy(new RetryOneTime(1)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
}
use of org.apache.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class WorkerTaskMonitorTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
worker = new Worker("http", "worker", "localhost", 3, "0", WorkerConfig.DEFAULT_CATEGORY);
workerCuratorCoordinator = new WorkerCuratorCoordinator(jsonMapper, new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return BASE_PATH;
}
}, null, null, null, null), new TestRemoteTaskRunnerConfig(new Period("PT1S")), cf, worker);
workerCuratorCoordinator.start();
// Start a task monitor
workerTaskMonitor = createTaskMonitor();
TestTasks.registerSubtypes(jsonMapper);
jsonMapper.registerSubtypes(new NamedType(TestRealtimeTask.class, "test_realtime"));
workerTaskMonitor.start();
task = TestTasks.immediateSuccess("test");
}
Aggregations