use of org.apache.flink.shaded.zookeeper3.org.apache.zookeeper.KeeperException in project lucene-solr by apache.
the class RequestParams method getFreshRequestParams.
public static RequestParams getFreshRequestParams(SolrResourceLoader loader, RequestParams requestParams) {
if (loader instanceof ZkSolrResourceLoader) {
ZkSolrResourceLoader resourceLoader = (ZkSolrResourceLoader) loader;
try {
Stat stat = resourceLoader.getZkController().getZkClient().exists(resourceLoader.getConfigSetZkPath() + "/" + RequestParams.RESOURCE, null, true);
log.debug("latest version of {} in ZK is : {}", resourceLoader.getConfigSetZkPath() + "/" + RequestParams.RESOURCE, stat == null ? "" : stat.getVersion());
if (stat == null) {
requestParams = new RequestParams(Collections.EMPTY_MAP, -1);
} else if (requestParams == null || stat.getVersion() > requestParams.getZnodeVersion()) {
Object[] o = getMapAndVersion(loader, RequestParams.RESOURCE);
requestParams = new RequestParams((Map) o[0], (Integer) o[1]);
log.info("request params refreshed to version {}", requestParams.getZnodeVersion());
}
} catch (KeeperException | InterruptedException e) {
SolrZkClient.checkInterrupted(e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
} else {
Object[] o = getMapAndVersion(loader, RequestParams.RESOURCE);
requestParams = new RequestParams((Map) o[0], (Integer) o[1]);
}
return requestParams;
}
use of org.apache.flink.shaded.zookeeper3.org.apache.zookeeper.KeeperException in project lucene-solr by apache.
the class ZkContainer method initZooKeeper.
public void initZooKeeper(final CoreContainer cc, String solrHome, CloudConfig config) {
ZkController zkController = null;
String zkRun = System.getProperty("zkRun");
if (zkRun != null && config == null)
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Cannot start Solr in cloud mode - no cloud config provided");
if (config == null)
// not in zk mode
return;
String zookeeperHost = config.getZkHost();
// zookeeper in quorum mode currently causes a failure when trying to
// register log4j mbeans. See SOLR-2369
// TODO: remove after updating to an slf4j based zookeeper
System.setProperty("zookeeper.jmx.log4j.disable", "true");
if (zkRun != null) {
String zkDataHome = System.getProperty("zkServerDataDir", Paths.get(solrHome).resolve("zoo_data").toString());
String zkConfHome = System.getProperty("zkServerConfDir", solrHome);
zkServer = new SolrZkServer(stripChroot(zkRun), stripChroot(config.getZkHost()), zkDataHome, zkConfHome, config.getSolrHostPort());
zkServer.parseConfig();
zkServer.start();
// set client from server config if not already set
if (zookeeperHost == null) {
zookeeperHost = zkServer.getClientString();
}
}
int zkClientConnectTimeout = 30000;
if (zookeeperHost != null) {
// we are ZooKeeper enabled
try {
// If this is an ensemble, allow for a long connect time for other servers to come up
if (zkRun != null && zkServer.getServers().size() > 1) {
// 1 day for embedded ensemble
zkClientConnectTimeout = 24 * 60 * 60 * 1000;
log.info("Zookeeper client=" + zookeeperHost + " Waiting for a quorum.");
} else {
log.info("Zookeeper client=" + zookeeperHost);
}
String confDir = System.getProperty("bootstrap_confdir");
boolean boostrapConf = Boolean.getBoolean("bootstrap_conf");
if (!ZkController.checkChrootPath(zookeeperHost, (confDir != null) || boostrapConf || zkRunOnly)) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A chroot was specified in ZkHost but the znode doesn't exist. " + zookeeperHost);
}
zkController = new ZkController(cc, zookeeperHost, zkClientConnectTimeout, config, new CurrentCoreDescriptorProvider() {
@Override
public List<CoreDescriptor> getCurrentDescriptors() {
List<CoreDescriptor> descriptors = new ArrayList<>(cc.getLoadedCoreNames().size());
Collection<SolrCore> cores = cc.getCores();
for (SolrCore core : cores) {
descriptors.add(core.getCoreDescriptor());
}
return descriptors;
}
});
if (zkRun != null && zkServer.getServers().size() > 1 && confDir == null && boostrapConf == false) {
// we are part of an ensemble and we are not uploading the config - pause to give the config time
// to get up
Thread.sleep(10000);
}
if (confDir != null) {
Path configPath = Paths.get(confDir);
if (!Files.isDirectory(configPath))
throw new IllegalArgumentException("bootstrap_confdir must be a directory of configuration files");
String confName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, "configuration1");
ZkConfigManager configManager = new ZkConfigManager(zkController.getZkClient());
configManager.uploadConfigDir(configPath, confName);
}
if (boostrapConf) {
ZkController.bootstrapConf(zkController.getZkClient(), cc, solrHome);
}
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (TimeoutException e) {
log.error("Could not connect to ZooKeeper", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (IOException | KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
}
this.zkController = zkController;
}
use of org.apache.flink.shaded.zookeeper3.org.apache.zookeeper.KeeperException in project lucene-solr by apache.
the class ManagedIndexSchemaFactory method inform.
@Override
public void inform(SolrCore core) {
this.core = core;
if (loader instanceof ZkSolrResourceLoader) {
this.zkIndexSchemaReader = new ZkIndexSchemaReader(this, core);
ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) loader;
zkLoader.setZkIndexSchemaReader(this.zkIndexSchemaReader);
try {
// update immediately if newer is available
zkIndexSchemaReader.refreshSchemaFromZk(-1);
core.setLatestSchema(getSchema());
} catch (KeeperException e) {
String msg = "Error attempting to access " + zkLoader.getConfigSetZkPath() + "/" + managedSchemaResourceName;
log.error(msg, e);
throw new SolrException(ErrorCode.SERVER_ERROR, msg, e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("", e);
}
} else {
this.zkIndexSchemaReader = null;
}
}
use of org.apache.flink.shaded.zookeeper3.org.apache.zookeeper.KeeperException in project lucene-solr by apache.
the class CloserThread method create.
/**
* Creates a new core in a specified instance directory, publishing the core state to the cluster
* @param coreName the core name
* @param instancePath the instance directory
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Path instancePath, Map<String, String> parameters, boolean newCollection) {
CoreDescriptor cd = new CoreDescriptor(coreName, instancePath, parameters, getContainerProperties(), isZooKeeperAware());
// TODO: There's a race here, isn't there?
if (getLoadedCoreNames().contains(coreName)) {
log.warn("Creating a core with existing name is not allowed");
// TODO: Shouldn't this be a BAD_REQUEST?
throw new SolrException(ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists.");
}
boolean preExisitingZkEntry = false;
try {
if (getZkController() != null) {
if (!Overseer.isLegacy(getZkController().getZkStateReader())) {
if (cd.getCloudDescriptor().getCoreNodeName() == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "non legacy mode coreNodeName missing " + parameters.toString());
}
}
preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
}
SolrCore core = create(cd, true, newCollection);
// only write out the descriptor if the core is successfully created
coresLocator.create(this, cd);
return core;
} catch (Exception ex) {
if (isZooKeeperAware() && !preExisitingZkEntry) {
try {
getZkController().unregister(coreName, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
SolrException.log(log, null, e);
} catch (KeeperException e) {
SolrException.log(log, null, e);
}
}
Throwable tc = ex;
Throwable c = null;
do {
tc = tc.getCause();
if (tc != null) {
c = tc;
}
} while (tc != null);
String rootMsg = "";
if (c != null) {
rootMsg = " Caused by: " + c.getMessage();
}
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error CREATEing SolrCore '" + coreName + "': " + ex.getMessage() + rootMsg, ex);
}
}
use of org.apache.flink.shaded.zookeeper3.org.apache.zookeeper.KeeperException in project lucene-solr by apache.
the class DeleteCollectionCmd method call.
@Override
public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
ZkStateReader zkStateReader = ocmh.zkStateReader;
final String collection = message.getStr(NAME);
try {
// Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
// should be taken care of as part of collection delete operation.
SolrZkClient zkClient = zkStateReader.getZkClient();
SolrSnapshotManager.cleanupCollectionLevelSnapshots(zkClient, collection);
if (zkStateReader.getClusterState().getCollectionOrNull(collection) == null) {
if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
// is not in the clusterstate
return;
}
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
params.set(CoreAdminParams.DELETE_INSTANCE_DIR, true);
params.set(CoreAdminParams.DELETE_DATA_DIR, true);
String asyncId = message.getStr(ASYNC);
Map<String, String> requestMap = null;
if (asyncId != null) {
requestMap = new HashMap<>();
}
Set<String> okayExceptions = new HashSet<>(1);
okayExceptions.add(NonExistentCoreException.class.getName());
ocmh.collectionCmd(message, params, results, null, asyncId, requestMap, okayExceptions);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETE.toLower(), NAME, collection);
Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
// wait for a while until we don't see the collection
TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
boolean removed = false;
while (!timeout.hasTimedOut()) {
Thread.sleep(100);
removed = !zkStateReader.getClusterState().hasCollection(collection);
if (removed) {
// just a bit of time so it's more likely other
Thread.sleep(500);
// readers see on return
break;
}
}
if (!removed) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully remove collection: " + collection);
}
} finally {
try {
if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
zkStateReader.getZkClient().clean(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection);
}
} catch (InterruptedException e) {
SolrException.log(log, "Cleaning up collection in zk was interrupted:" + collection, e);
Thread.currentThread().interrupt();
} catch (KeeperException e) {
SolrException.log(log, "Problem cleaning up collection in zk:" + collection, e);
}
}
}
Aggregations