use of org.apache.solr.common.cloud.ZkStateReader in project titan by thinkaurelius.
the class SolrIndex method checkIfCollectionExists.
/**
* Checks if the collection has already been created in Solr.
*/
private static boolean checkIfCollectionExists(CloudSolrClient server, String collection) throws KeeperException, InterruptedException {
ZkStateReader zkStateReader = server.getZkStateReader();
zkStateReader.updateClusterState(true);
ClusterState clusterState = zkStateReader.getClusterState();
return clusterState.getCollectionOrNull(collection) != null;
}
use of org.apache.solr.common.cloud.ZkStateReader in project titan by thinkaurelius.
the class SolrIndex method clearStorage.
@Override
public void clearStorage() throws BackendException {
try {
if (mode != Mode.CLOUD)
throw new UnsupportedOperationException("Operation only supported for SolrCloud");
logger.debug("Clearing storage from Solr: {}", solrClient);
ZkStateReader zkStateReader = ((CloudSolrClient) solrClient).getZkStateReader();
zkStateReader.updateClusterState(true);
ClusterState clusterState = zkStateReader.getClusterState();
for (String collection : clusterState.getCollections()) {
logger.debug("Clearing collection [{}] in Solr", collection);
UpdateRequest deleteAll = newUpdateRequest();
deleteAll.deleteByQuery("*:*");
solrClient.request(deleteAll, collection);
}
} catch (SolrServerException e) {
logger.error("Unable to clear storage from index due to server error on Solr.", e);
throw new PermanentBackendException(e);
} catch (IOException e) {
logger.error("Unable to clear storage from index due to low-level I/O error.", e);
throw new PermanentBackendException(e);
} catch (Exception e) {
logger.error("Unable to clear storage from index due to general error.", e);
throw new PermanentBackendException(e);
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class TestTolerantUpdateProcessorRandomCloud method createMiniSolrCloudCluster.
@BeforeClass
public static void createMiniSolrCloudCluster() throws Exception {
final String configName = "solrCloudCollectionConfig";
final File configDir = new File(TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
final int numShards = TestUtil.nextInt(random(), 2, TEST_NIGHTLY ? 5 : 3);
final int repFactor = TestUtil.nextInt(random(), 2, TEST_NIGHTLY ? 5 : 3);
// at least one server won't have any replicas
final int numServers = 1 + (numShards * repFactor);
log.info("Configuring cluster: servers={}, shards={}, repfactor={}", numServers, numShards, repFactor);
configureCluster(numServers).addConfig(configName, configDir.toPath()).configure();
TestTolerantUpdateProcessorCloud.assertSpinLoopAllJettyAreRunning(cluster);
Map<String, String> collectionProperties = new HashMap<>();
collectionProperties.put("config", "solrconfig-distrib-update-processor-chains.xml");
// string id
collectionProperties.put("schema", "schema15.xml");
CLOUD_CLIENT = cluster.getSolrClient();
CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, numShards, repFactor).setProperties(collectionProperties).process(CLOUD_CLIENT);
if (NODE_CLIENTS != null) {
for (HttpSolrClient client : NODE_CLIENTS) {
client.close();
}
}
NODE_CLIENTS = new ArrayList<HttpSolrClient>(numServers);
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
URL jettyURL = jetty.getBaseUrl();
NODE_CLIENTS.add(getHttpSolrClient(jettyURL.toString() + "/" + COLLECTION_NAME + "/"));
}
assertEquals(numServers, NODE_CLIENTS.size());
ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION_NAME, zkStateReader, true, true, 330);
}
use of org.apache.solr.common.cloud.ZkStateReader in project jackrabbit-oak by apache.
the class RemoteSolrServerProvider method createCollectionIfNeeded.
private void createCollectionIfNeeded(CloudSolrServer cloudSolrServer) throws SolrServerException {
String solrCollection = remoteSolrServerConfiguration.getSolrCollection();
try {
ZkStateReader zkStateReader = cloudSolrServer.getZkStateReader();
SolrZkClient zkClient = zkStateReader.getZkClient();
if (zkClient.isConnected() && !zkClient.exists("/configs/" + solrCollection, false)) {
String solrConfDir = remoteSolrServerConfiguration.getSolrConfDir();
File dir;
if (solrConfDir != null && solrConfDir.length() > 0) {
dir = new File(solrConfDir);
} else {
dir = new File(getClass().getResource("/solr/oak/conf").getFile());
}
ZkController.uploadConfigDir(zkClient, dir, solrCollection);
UpdateRequest req = new UpdateRequest("/admin/collections");
req.setParam("action", "CREATE");
req.setParam("numShards", String.valueOf(remoteSolrServerConfiguration.getSolrShardsNo()));
req.setParam("replicationFactor", String.valueOf(remoteSolrServerConfiguration.getSolrReplicationFactor()));
req.setParam("collection.configName", solrCollection);
req.setParam("name", solrCollection);
cloudSolrServer.request(req);
}
} catch (Exception e) {
log.warn("could not create collection {}", solrCollection);
throw new SolrServerException(e);
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class StatementImpl method constructStream.
protected SolrStream constructStream(String sql) throws IOException {
try {
ZkStateReader zkStateReader = this.connection.getClient().getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(this.connection.getCollection(), zkStateReader, true);
List<Replica> shuffler = new ArrayList<>();
for (Slice slice : slices) {
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
shuffler.add(replica);
}
}
Collections.shuffle(shuffler, new Random());
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CommonParams.QT, "/sql");
params.set("stmt", sql);
for (String propertyName : this.connection.getProperties().stringPropertyNames()) {
params.set(propertyName, this.connection.getProperties().getProperty(propertyName));
}
Replica rep = shuffler.get(0);
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
String url = zkProps.getCoreUrl();
return new SolrStream(url, params);
} catch (Exception e) {
throw new IOException(e);
}
}
Aggregations