use of org.apache.flink.shaded.curator5.org.apache.curator.retry.ExponentialBackoffRetry in project druid by druid-io.
the class BatchServerInventoryViewTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(testBasePath);
jsonMapper = new DefaultObjectMapper();
announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
announcer.start();
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, announcer, jsonMapper);
segmentAnnouncer.start();
testSegments = Sets.newConcurrentHashSet();
for (int i = 0; i < INITIAL_SEGMENTS; i++) {
testSegments.add(makeSegment(i));
}
batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, cf, jsonMapper, Predicates.<Pair<DruidServerMetadata, DataSegment>>alwaysTrue());
batchServerInventoryView.start();
inventoryUpdateCounter.set(0);
filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
}
}) {
@Override
protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
inventoryUpdateCounter.incrementAndGet();
return server;
}
};
filteredBatchServerInventoryView.start();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.retry.ExponentialBackoffRetry in project alluxio by Alluxio.
the class LeaderSelectorClient method getNewCuratorClient.
/**
* Returns a new client for the zookeeper connection. The client is already started before
* returning.
*
* @return a new {@link CuratorFramework} client to use for leader selection
*/
private CuratorFramework getNewCuratorClient() {
CuratorFramework client = CuratorFrameworkFactory.newClient(mZookeeperAddress, new ExponentialBackoffRetry(Constants.SECOND_MS, 3));
client.start();
// Sometimes, if the master crashes and restarts too quickly (faster than the zookeeper
// timeout), zookeeper thinks the new client is still an old one. In order to ensure a clean
// state, explicitly close the "old" client recreate a new one.
client.close();
client = CuratorFrameworkFactory.newClient(mZookeeperAddress, new ExponentialBackoffRetry(Constants.SECOND_MS, 3));
client.start();
return client;
}
use of org.apache.flink.shaded.curator5.org.apache.curator.retry.ExponentialBackoffRetry in project heron by twitter.
the class DynamicBrokersReaderTest method setUp.
@Before
public void setUp() throws Exception {
server = new TestingServer();
String connectionString = server.getConnectString();
Map<String, Object> conf = new HashMap<>();
conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
Map<String, Object> conf2 = new HashMap<>();
conf2.putAll(conf);
conf2.put("kafka.topic.wildcard.match", true);
wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
zookeeper.start();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.retry.ExponentialBackoffRetry in project BRFS by zhangnianli.
the class FileCenter method main.
public static void main(String[] args) {
id = new Random().nextInt(10);
System.out.println("id = " + id);
System.out.println("hahahahha");
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client = CuratorFrameworkFactory.builder().namespace(ROOT).connectString(zk_address).retryPolicy(retryPolicy).build();
client.start();
try {
Stat stat = client.checkExists().forPath(DUPS);
System.out.println("stat =" + stat);
if (stat == null) {
System.out.println("create--" + client.create().forPath(DUPS));
}
ExecutorService pool = Executors.newFixedThreadPool(5);
PathChildrenCache pathCache = new PathChildrenCache(client, DUPS, true, false, pool);
pathCache.getListenable().addListener(new PathNodeListener());
pathCache.start();
// TreeCache cache = new TreeCache(client, DUPS);
// cache.getListenable().addListener(new TreeNodeListener(), pool);
// cache.start();
} catch (Exception e) {
e.printStackTrace();
}
synchronized (client) {
try {
client.wait();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
client.close();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.retry.ExponentialBackoffRetry in project BRFS by zhangnianli.
the class TestZKNode method main.
public static void main(String[] args) throws Exception {
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client = CuratorFrameworkFactory.newClient(zk_address, 5 * 1000, 30 * 1000, retryPolicy);
client.start();
client.blockUntilConnected();
PathChildrenCache cache = new PathChildrenCache(client.usingNamespace("test"), "/fileCoordinator/big", true);
cache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
System.out.println("---" + event);
ChildData data = event.getData();
if (data != null) {
switch(event.getType()) {
case CHILD_ADDED:
System.out.println("###PATH-" + data.getPath());
break;
default:
break;
}
}
}
});
cache.start();
PersistentNode node = new PersistentNode(client.usingNamespace("test"), CreateMode.EPHEMERAL, false, "/fileCoordinator/temp-1", "node1".getBytes());
node.getListenable().addListener(new PersistentNodeListener() {
@Override
public void nodeCreated(String path) throws Exception {
System.out.println("node1--created:" + path);
}
});
node.start();
PersistentNode node2 = new PersistentNode(client.usingNamespace("test"), CreateMode.EPHEMERAL, false, "/fileCoordinator/temp-1", "node2".getBytes());
node2.getListenable().addListener(new PersistentNodeListener() {
@Override
public void nodeCreated(String path) throws Exception {
System.out.println("node2--created:" + path);
}
});
node2.start();
Thread.sleep(2000);
node2.close();
synchronized (node) {
node.wait();
}
}
Aggregations