use of org.apache.curator.retry.ExponentialBackoffRetry in project storm by apache.
the class DynamicBrokersReaderTest method setUp.
@Before
public void setUp() throws Exception {
server = new TestingServer();
String connectionString = server.getConnectString();
Map<String, Object> conf = new HashMap<>();
conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
Map<String, Object> conf2 = new HashMap<>();
conf2.putAll(conf);
conf2.put("kafka.topic.wildcard.match", true);
wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
zookeeper.start();
}
use of org.apache.curator.retry.ExponentialBackoffRetry in project druid by druid-io.
the class RemoteTaskRunnerTestUtils method setUp.
void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(basePath);
cf.create().creatingParentsIfNeeded().forPath(tasksPath);
}
use of org.apache.curator.retry.ExponentialBackoffRetry in project druid by druid-io.
the class BatchDataSegmentAnnouncerTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(testBasePath);
jsonMapper = new DefaultObjectMapper();
announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
announcer.start();
segmentReader = new SegmentReader(cf, jsonMapper);
skipDimensionsAndMetrics = false;
skipLoadSpec = false;
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
@Override
public long getMaxBytesPerNode() {
return maxBytesPerNode.get();
}
@Override
public boolean isSkipDimensionsAndMetrics() {
return skipDimensionsAndMetrics;
}
@Override
public boolean isSkipLoadSpec() {
return skipLoadSpec;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, announcer, jsonMapper);
segmentAnnouncer.start();
testSegments = Sets.newHashSet();
for (int i = 0; i < 100; i++) {
testSegments.add(makeSegment(i));
}
}
use of org.apache.curator.retry.ExponentialBackoffRetry in project flink by apache.
the class KafkaTestEnvironmentImpl method createCuratorClient.
/**
* Only for the 0.8 server we need access to the zk client.
*/
public CuratorFramework createCuratorClient() {
RetryPolicy retryPolicy = new ExponentialBackoffRetry(100, 10);
CuratorFramework curatorClient = CuratorFrameworkFactory.newClient(standardProps.getProperty("zookeeper.connect"), retryPolicy);
curatorClient.start();
return curatorClient;
}
use of org.apache.curator.retry.ExponentialBackoffRetry in project storm by apache.
the class SampleDruidBeamFactoryImpl method makeBeam.
@Override
public Beam<Map<String, Object>> makeBeam(Map<?, ?> conf, IMetricsContext metrics) {
// Your overlord's druid.service
final String indexService = "druid/overlord";
// Your overlord's druid.discovery.curator.path
final String discoveryPath = "/druid/discovery";
final String dataSource = "test";
final List<String> dimensions = ImmutableList.of("publisher", "advertiser");
List<AggregatorFactory> aggregator = ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("click"));
// Tranquility needs to be able to extract timestamps from your object type (in this case, Map<String, Object>).
final Timestamper<Map<String, Object>> timestamper = new Timestamper<Map<String, Object>>() {
@Override
public DateTime timestamp(Map<String, Object> theMap) {
return new DateTime(theMap.get("timestamp"));
}
};
// Tranquility uses ZooKeeper (through Curator) for coordination.
final CuratorFramework curator = CuratorFrameworkFactory.builder().connectString(// we can use Storm conf to get config values
(String) conf.get("druid.tranquility.zk.connect")).retryPolicy(new ExponentialBackoffRetry(1000, 20, 30000)).build();
curator.start();
// The JSON serialization of your object must have a timestamp field in a format that Druid understands. By default,
// Druid expects the field to be called "timestamp" and to be an ISO8601 timestamp.
final TimestampSpec timestampSpec = new TimestampSpec("timestamp", "auto", null);
// Tranquility needs to be able to serialize your object type to JSON for transmission to Druid. By default this is
// done with Jackson. If you want to provide an alternate serializer, you can provide your own via ```.objectWriter(...)```.
// In this case, we won't provide one, so we're just using Jackson.
final Beam<Map<String, Object>> beam = DruidBeams.builder(timestamper).curator(curator).discoveryPath(discoveryPath).location(DruidLocation.create(indexService, dataSource)).timestampSpec(timestampSpec).rollup(DruidRollup.create(DruidDimensions.specific(dimensions), aggregator, QueryGranularities.MINUTE)).tuning(ClusteredBeamTuning.builder().segmentGranularity(Granularity.HOUR).windowPeriod(new Period("PT10M")).partitions(1).replicants(1).build()).druidBeamConfig(DruidBeamConfig.builder().indexRetryPeriod(new Period("PT10M")).build()).buildBeam();
return beam;
}
Aggregations