use of org.apache.zookeeper.KeeperException.NodeExistsException in project pulsar by yahoo.
the class PulsarClusterMetadataSetup method main.
public static void main(String[] args) throws Exception {
Arguments arguments = new Arguments();
JCommander jcommander = new JCommander();
try {
jcommander.addObject(arguments);
jcommander.parse(args);
if (arguments.help) {
jcommander.usage();
return;
}
} catch (Exception e) {
jcommander.usage();
return;
}
log.info("Setting up cluster {} with zk={} global-zk={}", arguments.cluster, arguments.zookeeper, arguments.globalZookeeper);
// Format BookKeeper metadata
ClientConfiguration bkConf = new ClientConfiguration();
bkConf.setLedgerManagerFactoryClass(HierarchicalLedgerManagerFactory.class);
bkConf.setZkServers(arguments.zookeeper);
if (!BookKeeperAdmin.format(bkConf, false, /* interactive */
false)) {
throw new IOException("Failed to initialize BookKeeper metadata");
}
ZooKeeperClientFactory zkfactory = new ZookeeperClientFactoryImpl();
ZooKeeper localZk = zkfactory.create(arguments.zookeeper, SessionType.ReadWrite, 30000).get();
ZooKeeper globalZk = zkfactory.create(arguments.globalZookeeper, SessionType.ReadWrite, 30000).get();
localZk.create("/managed-ledgers", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
localZk.create("/namespace", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
try {
ZkUtils.createFullPathOptimistic(globalZk, "/admin/policies", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (NodeExistsException e) {
// Ignore
}
try {
ZkUtils.createFullPathOptimistic(globalZk, "/admin/clusters", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (NodeExistsException e) {
// Ignore
}
ClusterData clusterData = new ClusterData(arguments.clusterWebServiceUrl, arguments.clusterWebServiceUrlTls, arguments.clusterBrokerServiceUrl, arguments.clusterBrokerServiceUrlTls);
byte[] clusterDataJson = ObjectMapperFactory.getThreadLocal().writeValueAsBytes(clusterData);
globalZk.create("/admin/clusters/" + arguments.cluster, clusterDataJson, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
// Create marker for "global" cluster
ClusterData globalClusterData = new ClusterData(null, null);
byte[] globalClusterDataJson = ObjectMapperFactory.getThreadLocal().writeValueAsBytes(globalClusterData);
try {
globalZk.create("/admin/clusters/global", globalClusterDataJson, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (NodeExistsException e) {
// Ignore
}
log.info("Cluster metadata for '{}' setup correctly", arguments.cluster);
}
use of org.apache.zookeeper.KeeperException.NodeExistsException in project disgear by yangbutao.
the class ZookeeperClient method makePath.
public void makePath(String path, byte[] data, CreateMode createMode) throws Exception {
boolean failOnExists = true;
boolean retry = true;
if (path.startsWith("/")) {
path = path.substring(1, path.length());
}
String[] paths = path.split("/");
StringBuilder sbPath = new StringBuilder();
for (int i = 0; i < paths.length; i++) {
byte[] bytes = null;
String pathPiece = paths[i];
sbPath.append("/" + pathPiece);
final String currentPath = sbPath.toString();
Object exists = zkClient.exists(currentPath, null);
if (exists == null || ((i == paths.length - 1) && failOnExists)) {
CreateMode mode = CreateMode.PERSISTENT;
if (i == paths.length - 1) {
mode = createMode;
bytes = data;
}
try {
zkClient.create(currentPath, bytes, ZooDefs.Ids.OPEN_ACL_UNSAFE, mode);
} catch (NodeExistsException e) {
if (!failOnExists) {
// TODO:
zkClient.setData(currentPath, data, -1);
zkClient.exists(currentPath, null);
return;
}
if (i == paths.length - 1) {
throw e;
}
}
if (i == paths.length - 1) {
zkClient.exists(currentPath, null);
}
} else if (i == paths.length - 1) {
zkClient.setData(currentPath, data, -1);
zkClient.exists(currentPath, null);
}
}
}
use of org.apache.zookeeper.KeeperException.NodeExistsException in project lucene-solr by apache.
the class ZKPropertiesWriter method persist.
@Override
public void persist(Map<String, Object> propObjs) {
Properties existing = mapToProperties(readIndexerProperties());
existing.putAll(mapToProperties(propObjs));
StringWriter output = new StringWriter();
try {
existing.store(output, null);
byte[] bytes = output.toString().getBytes(StandardCharsets.UTF_8);
if (!zkClient.exists(path, false)) {
try {
zkClient.makePath(path, false);
} catch (NodeExistsException e) {
}
}
zkClient.setData(path, bytes, false);
} catch (Exception e) {
SolrZkClient.checkInterrupted(e);
log.warn("Could not persist properties to " + path + " :" + e.getClass(), e);
}
}
use of org.apache.zookeeper.KeeperException.NodeExistsException in project lucene-solr by apache.
the class LeaderInitiatedRecoveryOnShardRestartTest method testRestartWithAllInLIR.
@Test
public void testRestartWithAllInLIR() throws Exception {
// still waiting to be able to properly start with no default collection1,
// delete to remove confusion
waitForRecoveriesToFinish(false);
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.DELETE.toString());
params.set("name", DEFAULT_COLLECTION);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrClient) clients.get(0)).getBaseURL();
HttpSolrClient delClient = getHttpSolrClient(baseUrl.substring(0, baseUrl.lastIndexOf("/")));
delClient.request(request);
delClient.close();
String testCollectionName = "all_in_lir";
String shardId = "shard1";
createCollection(testCollectionName, 1, 3, 1);
waitForRecoveriesToFinish(testCollectionName, false);
cloudClient.setDefaultCollection(testCollectionName);
Map<String, Object> stateObj = Utils.makeMap();
stateObj.put(ZkStateReader.STATE_PROP, "down");
stateObj.put("createdByNodeName", "test");
stateObj.put("createdByCoreNodeName", "test");
byte[] znodeData = Utils.toJSON(stateObj);
SolrZkClient zkClient = cloudClient.getZkStateReader().getZkClient();
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node1", znodeData, true);
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node2", znodeData, true);
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node3", znodeData, true);
// everyone gets a couple docs so that everyone has tlog entries
// and won't become leader simply because they have no tlog versions
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, "id", "1");
SolrInputDocument doc2 = new SolrInputDocument();
addFields(doc2, "id", "2");
cloudClient.add(doc);
cloudClient.add(doc2);
cloudClient.commit();
assertEquals("We just added 2 docs, we should be able to find them", 2, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
// randomly add too many docs to peer sync to one replica so that only one random replica is the valid leader
// the versions don't matter, they just have to be higher than what the last 2 docs got
HttpSolrClient client = (HttpSolrClient) clients.get(random().nextInt(clients.size()));
client.setBaseURL(client.getBaseURL().substring(0, client.getBaseURL().lastIndexOf("/")) + "/" + testCollectionName);
params = new ModifiableSolrParams();
params.set(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
try {
for (int i = 0; i < 101; i++) {
add(client, params, sdoc("id", 3 + i, "_version_", Long.MAX_VALUE - 1 - i));
}
} catch (RemoteSolrException e) {
// resend without version
if (e.getMessage().contains("conflict")) {
for (int i = 0; i < 101; i++) {
add(client, params, sdoc("id", 3 + i));
}
}
}
client.commit();
for (JettySolrRunner jetty : jettys) {
ChaosMonkey.stop(jetty);
}
ChaosMonkey.stop(controlJetty);
Thread.sleep(10000);
log.info("Start back up");
for (JettySolrRunner jetty : jettys) {
ChaosMonkey.start(jetty);
}
ChaosMonkey.start(controlJetty);
// recoveries will not finish without SOLR-8075 and SOLR-8367
waitForRecoveriesToFinish(testCollectionName, true);
// now expire each node
try {
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node1", znodeData, true);
} catch (NodeExistsException e) {
}
try {
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node2", znodeData, true);
} catch (NodeExistsException e) {
}
try {
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node3", znodeData, true);
} catch (NodeExistsException e) {
}
for (JettySolrRunner jetty : jettys) {
chaosMonkey.expireSession(jetty);
}
Thread.sleep(2000);
// recoveries will not finish without SOLR-8075 and SOLR-8367
waitForRecoveriesToFinish(testCollectionName, true);
}
use of org.apache.zookeeper.KeeperException.NodeExistsException in project zookeeper by apache.
the class DataTree method createNode.
/**
* Add a new node to the DataTree.
* @param path
* Path for the new node.
* @param data
* Data to store in the node.
* @param acl
* Node acls
* @param ephemeralOwner
* the session id that owns this node. -1 indicates this is not
* an ephemeral node.
* @param zxid
* Transaction ID
* @param time
* @param outputStat
* A Stat object to store Stat output results into.
* @throws NodeExistsException
* @throws NoNodeException
* @throws KeeperException
*/
public void createNode(final String path, byte[] data, List<ACL> acl, long ephemeralOwner, int parentCVersion, long zxid, long time, Stat outputStat) throws KeeperException.NoNodeException, KeeperException.NodeExistsException {
int lastSlash = path.lastIndexOf('/');
String parentName = path.substring(0, lastSlash);
String childName = path.substring(lastSlash + 1);
StatPersisted stat = new StatPersisted();
stat.setCtime(time);
stat.setMtime(time);
stat.setCzxid(zxid);
stat.setMzxid(zxid);
stat.setPzxid(zxid);
stat.setVersion(0);
stat.setAversion(0);
stat.setEphemeralOwner(ephemeralOwner);
DataNode parent = nodes.get(parentName);
if (parent == null) {
throw new KeeperException.NoNodeException();
}
synchronized (parent) {
Set<String> children = parent.getChildren();
if (children.contains(childName)) {
throw new KeeperException.NodeExistsException();
}
if (parentCVersion == -1) {
parentCVersion = parent.stat.getCversion();
parentCVersion++;
}
parent.stat.setCversion(parentCVersion);
parent.stat.setPzxid(zxid);
Long longval = aclCache.convertAcls(acl);
DataNode child = new DataNode(data, longval, stat);
parent.addChild(childName);
nodes.put(path, child);
EphemeralType ephemeralType = EphemeralType.get(ephemeralOwner);
if (ephemeralType == EphemeralType.CONTAINER) {
containers.add(path);
} else if (ephemeralType == EphemeralType.TTL) {
ttls.add(path);
} else if (ephemeralOwner != 0) {
HashSet<String> list = ephemerals.get(ephemeralOwner);
if (list == null) {
list = new HashSet<String>();
ephemerals.put(ephemeralOwner, list);
}
synchronized (list) {
list.add(path);
}
}
if (outputStat != null) {
child.copyStat(outputStat);
}
}
// now check if its one of the zookeeper node child
if (parentName.startsWith(quotaZookeeper)) {
// now check if its the limit node
if (Quotas.limitNode.equals(childName)) {
// this is the limit node
// get the parent and add it to the trie
pTrie.addPath(parentName.substring(quotaZookeeper.length()));
}
if (Quotas.statNode.equals(childName)) {
updateQuotaForPath(parentName.substring(quotaZookeeper.length()));
}
}
// also check to update the quotas for this node
String lastPrefix = getMaxPrefixWithQuota(path);
if (lastPrefix != null) {
// ok we have some match and need to update
updateCount(lastPrefix, 1);
updateBytes(lastPrefix, data == null ? 0 : data.length);
}
dataWatches.triggerWatch(path, Event.EventType.NodeCreated);
childWatches.triggerWatch(parentName.equals("") ? "/" : parentName, Event.EventType.NodeChildrenChanged);
}
Aggregations