use of io.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class OverlordTest method setupServerAndCurator.
private void setupServerAndCurator() throws Exception {
server = new TestingServer();
timing = new Timing();
curator = CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()).retryPolicy(new RetryOneTime(1)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
}
use of io.druid.curator.PotentiallyGzippedCompressionProvider in project druid by druid-io.
the class BatchDataSegmentAnnouncerTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(testBasePath);
jsonMapper = new DefaultObjectMapper();
announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
announcer.start();
segmentReader = new SegmentReader(cf, jsonMapper);
skipDimensionsAndMetrics = false;
skipLoadSpec = false;
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
@Override
public long getMaxBytesPerNode() {
return maxBytesPerNode.get();
}
@Override
public boolean isSkipDimensionsAndMetrics() {
return skipDimensionsAndMetrics;
}
@Override
public boolean isSkipLoadSpec() {
return skipLoadSpec;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, announcer, jsonMapper);
segmentAnnouncer.start();
testSegments = Sets.newHashSet();
for (int i = 0; i < 100; i++) {
testSegments.add(makeSegment(i));
}
}
Aggregations