Search in sources :

Example 96 with ExponentialBackoffRetry

use of org.apache.curator.retry.ExponentialBackoffRetry in project storm by apache.

the class DynamicBrokersReaderTest method setUp.

@Before
public void setUp() throws Exception {
    server = new TestingServer();
    String connectionString = server.getConnectString();
    Map<String, Object> conf = new HashMap<>();
    conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
    conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
    conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
    conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
    ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
    zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
    dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
    Map<String, Object> conf2 = new HashMap<>();
    conf2.putAll(conf);
    conf2.put("kafka.topic.wildcard.match", true);
    wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
    zookeeper.start();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) HashMap(java.util.HashMap) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) Before(org.junit.Before)

Example 97 with ExponentialBackoffRetry

use of org.apache.curator.retry.ExponentialBackoffRetry in project druid by druid-io.

the class RemoteTaskRunnerTestUtils method setUp.

void setUp() throws Exception {
    testingCluster = new TestingCluster(1);
    testingCluster.start();
    cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
    cf.start();
    cf.blockUntilConnected();
    cf.create().creatingParentsIfNeeded().forPath(basePath);
    cf.create().creatingParentsIfNeeded().forPath(tasksPath);
}
Also used : TestingCluster(org.apache.curator.test.TestingCluster) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) PotentiallyGzippedCompressionProvider(io.druid.curator.PotentiallyGzippedCompressionProvider)

Example 98 with ExponentialBackoffRetry

use of org.apache.curator.retry.ExponentialBackoffRetry in project druid by druid-io.

the class BatchDataSegmentAnnouncerTest method setUp.

@Before
public void setUp() throws Exception {
    testingCluster = new TestingCluster(1);
    testingCluster.start();
    cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
    cf.start();
    cf.blockUntilConnected();
    cf.create().creatingParentsIfNeeded().forPath(testBasePath);
    jsonMapper = new DefaultObjectMapper();
    announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
    announcer.start();
    segmentReader = new SegmentReader(cf, jsonMapper);
    skipDimensionsAndMetrics = false;
    skipLoadSpec = false;
    segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {

        @Override
        public int getSegmentsPerNode() {
            return 50;
        }

        @Override
        public long getMaxBytesPerNode() {
            return maxBytesPerNode.get();
        }

        @Override
        public boolean isSkipDimensionsAndMetrics() {
            return skipDimensionsAndMetrics;
        }

        @Override
        public boolean isSkipLoadSpec() {
            return skipLoadSpec;
        }
    }, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return testBasePath;
        }
    }, announcer, jsonMapper);
    segmentAnnouncer.start();
    testSegments = Sets.newHashSet();
    for (int i = 0; i < 100; i++) {
        testSegments.add(makeSegment(i));
    }
}
Also used : TestingCluster(org.apache.curator.test.TestingCluster) BatchDataSegmentAnnouncer(io.druid.server.coordination.BatchDataSegmentAnnouncer) Announcer(io.druid.curator.announcement.Announcer) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) ZkPathsConfig(io.druid.server.initialization.ZkPathsConfig) BatchDataSegmentAnnouncerConfig(io.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) PotentiallyGzippedCompressionProvider(io.druid.curator.PotentiallyGzippedCompressionProvider) BatchDataSegmentAnnouncer(io.druid.server.coordination.BatchDataSegmentAnnouncer) Before(org.junit.Before)

Example 99 with ExponentialBackoffRetry

use of org.apache.curator.retry.ExponentialBackoffRetry in project flink by apache.

the class KafkaTestEnvironmentImpl method createCuratorClient.

/**
	 * Only for the 0.8 server we need access to the zk client.
	 */
public CuratorFramework createCuratorClient() {
    RetryPolicy retryPolicy = new ExponentialBackoffRetry(100, 10);
    CuratorFramework curatorClient = CuratorFrameworkFactory.newClient(standardProps.getProperty("zookeeper.connect"), retryPolicy);
    curatorClient.start();
    return curatorClient;
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) RetryPolicy(org.apache.curator.RetryPolicy)

Example 100 with ExponentialBackoffRetry

use of org.apache.curator.retry.ExponentialBackoffRetry in project storm by apache.

the class SampleDruidBeamFactoryImpl method makeBeam.

@Override
public Beam<Map<String, Object>> makeBeam(Map<?, ?> conf, IMetricsContext metrics) {
    // Your overlord's druid.service
    final String indexService = "druid/overlord";
    // Your overlord's druid.discovery.curator.path
    final String discoveryPath = "/druid/discovery";
    final String dataSource = "test";
    final List<String> dimensions = ImmutableList.of("publisher", "advertiser");
    List<AggregatorFactory> aggregator = ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("click"));
    // Tranquility needs to be able to extract timestamps from your object type (in this case, Map<String, Object>).
    final Timestamper<Map<String, Object>> timestamper = new Timestamper<Map<String, Object>>() {

        @Override
        public DateTime timestamp(Map<String, Object> theMap) {
            return new DateTime(theMap.get("timestamp"));
        }
    };
    // Tranquility uses ZooKeeper (through Curator) for coordination.
    final CuratorFramework curator = CuratorFrameworkFactory.builder().connectString(// we can use Storm conf to get config values
    (String) conf.get("druid.tranquility.zk.connect")).retryPolicy(new ExponentialBackoffRetry(1000, 20, 30000)).build();
    curator.start();
    // The JSON serialization of your object must have a timestamp field in a format that Druid understands. By default,
    // Druid expects the field to be called "timestamp" and to be an ISO8601 timestamp.
    final TimestampSpec timestampSpec = new TimestampSpec("timestamp", "auto", null);
    // Tranquility needs to be able to serialize your object type to JSON for transmission to Druid. By default this is
    // done with Jackson. If you want to provide an alternate serializer, you can provide your own via ```.objectWriter(...)```.
    // In this case, we won't provide one, so we're just using Jackson.
    final Beam<Map<String, Object>> beam = DruidBeams.builder(timestamper).curator(curator).discoveryPath(discoveryPath).location(DruidLocation.create(indexService, dataSource)).timestampSpec(timestampSpec).rollup(DruidRollup.create(DruidDimensions.specific(dimensions), aggregator, QueryGranularities.MINUTE)).tuning(ClusteredBeamTuning.builder().segmentGranularity(Granularity.HOUR).windowPeriod(new Period("PT10M")).partitions(1).replicants(1).build()).druidBeamConfig(DruidBeamConfig.builder().indexRetryPeriod(new Period("PT10M")).build()).buildBeam();
    return beam;
}
Also used : ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) Period(org.joda.time.Period) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DateTime(org.joda.time.DateTime) Timestamper(com.metamx.tranquility.typeclass.Timestamper) CuratorFramework(org.apache.curator.framework.CuratorFramework) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) TimestampSpec(io.druid.data.input.impl.TimestampSpec) Map(java.util.Map)

Aggregations

ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)181 CuratorFramework (org.apache.curator.framework.CuratorFramework)107 RetryPolicy (org.apache.curator.RetryPolicy)46 Before (org.junit.Before)26 TestingCluster (org.apache.curator.test.TestingCluster)23 Test (org.testng.annotations.Test)23 IOException (java.io.IOException)18 TestingServer (org.apache.curator.test.TestingServer)18 Timing (org.apache.curator.test.Timing)16 CountDownLatch (java.util.concurrent.CountDownLatch)15 CuratorFrameworkFactory (org.apache.curator.framework.CuratorFrameworkFactory)12 ACLProvider (org.apache.curator.framework.api.ACLProvider)12 Test (org.junit.Test)12 ArrayList (java.util.ArrayList)11 ConnectionState (org.apache.curator.framework.state.ConnectionState)11 ExecutorService (java.util.concurrent.ExecutorService)10 ConnectionStateListener (org.apache.curator.framework.state.ConnectionStateListener)10 TestingServerStarter (io.pravega.test.common.TestingServerStarter)9 KeeperException (org.apache.zookeeper.KeeperException)8 HashMap (java.util.HashMap)7