use of org.apache.solr.common.cloud.ZkCmdExecutor in project lucene-solr by apache.
the class ZkSolrClientTest method testZkCmdExectutor.
public void testZkCmdExectutor() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
ZkTestServer server = null;
try {
server = new ZkTestServer(zkDir);
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
final int timeout = random().nextInt(10000) + 5000;
ZkCmdExecutor zkCmdExecutor = new ZkCmdExecutor(timeout);
final long start = System.nanoTime();
try {
zkCmdExecutor.retryOperation(new ZkOperation() {
@Override
public String execute() throws KeeperException, InterruptedException {
if (System.nanoTime() - start > TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) {
throw new KeeperException.SessionExpiredException();
}
throw new KeeperException.ConnectionLossException();
}
});
} catch (KeeperException.SessionExpiredException e) {
} catch (Exception e) {
fail("Expected " + KeeperException.SessionExpiredException.class.getSimpleName() + " but got " + e.getClass().getSimpleName());
}
} finally {
if (server != null) {
server.shutdown();
}
}
}
use of org.apache.solr.common.cloud.ZkCmdExecutor in project lucene-solr by apache.
the class CollectionsHandler method createSysConfigSet.
private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL, zk);
try {
String path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/schema.xml";
byte[] data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSchema.xml"));
assert data != null && data.length > 0;
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/solrconfig.xml";
data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSolrConfig.xml"));
assert data != null && data.length > 0;
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
}
use of org.apache.solr.common.cloud.ZkCmdExecutor in project lucene-solr by apache.
the class ZkController method createClusterZkNodes.
/**
* Create the zknodes necessary for a cluster to operate
*
* @param zkClient a SolrZkClient
* @throws KeeperException if there is a Zookeeper error
* @throws InterruptedException on interrupt
*/
public static void createClusterZkNodes(SolrZkClient zkClient) throws KeeperException, InterruptedException {
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
cmdExecutor.ensureExists(ZkStateReader.LIVE_NODES_ZKNODE, zkClient);
cmdExecutor.ensureExists(ZkStateReader.COLLECTIONS_ZKNODE, zkClient);
cmdExecutor.ensureExists(ZkStateReader.ALIASES, zkClient);
byte[] emptyJson = "{}".getBytes(StandardCharsets.UTF_8);
cmdExecutor.ensureExists(ZkStateReader.CLUSTER_STATE, emptyJson, CreateMode.PERSISTENT, zkClient);
cmdExecutor.ensureExists(ZkStateReader.SOLR_SECURITY_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient);
}
use of org.apache.solr.common.cloud.ZkCmdExecutor in project lucene-solr by apache.
the class ManagedIndexSchemaFactory method zkUgradeToManagedSchema.
/**
* Persist the managed schema to ZooKeeper and rename the non-managed schema
* by appending {@link #UPGRADED_SCHEMA_EXTENSION}.
*
* Failure to rename the non-managed schema will be logged as a warning,
* and no exception will be thrown.
*/
private void zkUgradeToManagedSchema() {
// Only create, don't update it if it already exists
schema.persistManagedSchemaToZooKeeper(true);
if (resourceName.equals(managedSchemaResourceName)) {
log.info("On upgrading to managed schema, did not rename non-managed schema " + resourceName + " because it's the same as the managed schema's name.");
} else {
// Rename the non-managed schema znode in ZooKeeper
ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) loader;
final String nonManagedSchemaPath = zkLoader.getConfigSetZkPath() + "/" + resourceName;
try {
ZkController zkController = zkLoader.getZkController();
ZkCmdExecutor zkCmdExecutor = new ZkCmdExecutor(zkController.getClientTimeout());
if (zkController.pathExists(nonManagedSchemaPath)) {
// First, copy the non-managed schema znode content to the upgraded schema znode
byte[] bytes = zkController.getZkClient().getData(nonManagedSchemaPath, null, null, true);
final String upgradedSchemaPath = nonManagedSchemaPath + UPGRADED_SCHEMA_EXTENSION;
zkCmdExecutor.ensureExists(upgradedSchemaPath, zkController.getZkClient());
zkController.getZkClient().setData(upgradedSchemaPath, bytes, true);
// Then delete the non-managed schema znode
zkController.getZkClient().delete(nonManagedSchemaPath, -1, true);
// Set the resource name to the managed schema so that the CoreAdminHandler returns a findable filename
schema.setResourceName(managedSchemaResourceName);
log.info("After upgrading to managed schema in ZooKeeper, renamed the non-managed schema " + nonManagedSchemaPath + " to " + upgradedSchemaPath);
} else {
log.info("After upgrading to managed schema in ZooKeeper, the non-managed schema " + nonManagedSchemaPath + " no longer exists.");
}
} catch (Exception e) {
if (e instanceof InterruptedException) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
final String msg = "Error persisting managed schema resource " + managedSchemaResourceName;
// Log as warning and suppress the exception
log.warn(msg, e);
}
}
}
use of org.apache.solr.common.cloud.ZkCmdExecutor in project lucene-solr by apache.
the class ZkSolrClientTest method testSkipPathPartsOnMakePath.
public void testSkipPathPartsOnMakePath() throws Exception {
try (ZkConnection conn = new ZkConnection()) {
final SolrZkClient zkClient = conn.getClient();
zkClient.makePath("/test", true);
// should work
zkClient.makePath("/test/path/here", (byte[]) null, CreateMode.PERSISTENT, (Watcher) null, true, true, 1);
zkClient.clean("/");
// should not work
try {
zkClient.makePath("/test/path/here", (byte[]) null, CreateMode.PERSISTENT, (Watcher) null, true, true, 1);
fail("We should not be able to create this path");
} catch (Exception e) {
}
zkClient.clean("/");
ZkCmdExecutor zkCmdExecutor = new ZkCmdExecutor(30000);
try {
zkCmdExecutor.ensureExists("/collection/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2);
fail("We should not be able to create this path");
} catch (Exception e) {
}
zkClient.makePath("/collection", true);
try {
zkCmdExecutor.ensureExists("/collections/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2);
fail("We should not be able to create this path");
} catch (Exception e) {
}
zkClient.makePath("/collection/collection", true);
byte[] bytes = new byte[10];
zkCmdExecutor.ensureExists("/collection/collection", bytes, CreateMode.PERSISTENT, zkClient, 2);
byte[] returnedBytes = zkClient.getData("/collection/collection", null, null, true);
assertNull("We skipped 2 path parts, so data won't be written", returnedBytes);
zkClient.makePath("/collection/collection/leader", true);
zkCmdExecutor.ensureExists("/collection/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2);
}
}
Aggregations