use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestHdfsBackupRestoreCore method test.
@Test
public void test() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "HdfsBackupRestore";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
boolean testViaReplicationHandler = random().nextBoolean();
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
// Create a backup.
if (testViaReplicationHandler) {
log.info("Running Backup via replication handler");
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
while (!checkBackupStatus.success) {
checkBackupStatus.fetchStatus();
Thread.sleep(1000);
}
} else {
log.info("Running Backup via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
}
int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempts = 0; attempts < numRestoreTests; attempts++) {
//Modify existing index before we call restore.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery(collectionName, "id:" + i);
}
masterClient.commit(collectionName);
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(collectionName, doc);
}
//Purposely not calling commit once in a while. There can be some docs which are not committed
if (usually()) {
masterClient.commit(collectionName);
}
}
// Snapshooter prefixes "snapshot." to the backup name.
if (testViaReplicationHandler) {
log.info("Running Restore via replication handler");
// Snapshooter prefixes "snapshot." to the backup name.
BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
Thread.sleep(1000);
}
} else {
log.info("Running Restore via core admin api");
Map<String, String> params = new HashMap<>();
params.put("name", "snapshot." + backupName);
params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
}
//See if restore was successful by checking if all the docs are present again
BackupRestoreUtils.verifyDocs(nDocs, masterClient, coreName);
// Verify the permissions for the backup folder.
FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path("/backup/snapshot." + backupName));
FsPermission perm = status.getPermission();
assertEquals(FsAction.ALL, perm.getUserAction());
assertEquals(FsAction.ALL, perm.getGroupAction());
assertEquals(FsAction.ALL, perm.getOtherAction());
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class PingRequestHandlerTest method testPingInClusterWithNoHealthCheck.
public void testPingInClusterWithNoHealthCheck() throws Exception {
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), buildJettyConfig("/solr"));
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// create collection
String collectionName = "testSolrCloudCollection";
String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1").resolve("conf"), configName);
CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR).process(miniCluster.getSolrClient());
// Send distributed and non-distributed ping query
SolrPingWithDistrib reqDistrib = new SolrPingWithDistrib();
reqDistrib.setDistrib(true);
SolrPingResponse rsp = reqDistrib.process(cloudSolrClient, collectionName);
assertEquals(0, rsp.getStatus());
assertTrue(rsp.getResponseHeader().getBooleanArg(("zkConnected")));
SolrPing reqNonDistrib = new SolrPing();
rsp = reqNonDistrib.process(cloudSolrClient, collectionName);
assertEquals(0, rsp.getStatus());
assertTrue(rsp.getResponseHeader().getBooleanArg(("zkConnected")));
} finally {
miniCluster.shutdown();
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSolrCloudWithHadoopAuthPlugin method testCollectionCreateSearchDelete.
protected void testCollectionCreateSearchDelete() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "testkerberoscollection";
// create collection
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, REPLICATION_FACTOR);
create.process(solrClient);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
solrClient.add(collectionName, doc);
solrClient.commit(collectionName);
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = solrClient.query(collectionName, query);
assertEquals(1, rsp.getResults().getNumFound());
CollectionAdminRequest.Delete deleteReq = CollectionAdminRequest.deleteCollection(collectionName);
deleteReq.process(solrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, solrClient.getZkStateReader(), true, true, 330);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class StatsStream method open.
public void open() throws IOException {
ModifiableSolrParams paramsLoc = new ModifiableSolrParams(this.params);
addStats(paramsLoc, metrics);
paramsLoc.set("stats", "true");
paramsLoc.set("rows", "0");
Map<String, List<String>> shardsMap = (Map<String, List<String>>) streamContext.get("shards");
if (shardsMap == null) {
QueryRequest request = new QueryRequest(paramsLoc);
CloudSolrClient cloudSolrClient = cache.getCloudSolrClient(zkHost);
try {
NamedList response = cloudSolrClient.request(request, collection);
this.tuple = getTuple(response);
} catch (Exception e) {
throw new IOException(e);
}
} else {
List<String> shards = shardsMap.get(collection);
HttpSolrClient client = cache.getHttpSolrClient(shards.get(0));
if (shards.size() > 1) {
String shardsParam = getShardString(shards);
paramsLoc.add("shards", shardsParam);
paramsLoc.add("distrib", "true");
}
QueryRequest request = new QueryRequest(paramsLoc);
try {
NamedList response = client.request(request);
this.tuple = getTuple(response);
} catch (Exception e) {
throw new IOException(e);
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class ScoreNodesStream method open.
public void open() throws IOException {
stream.open();
Tuple node = null;
StringBuilder builder = new StringBuilder();
String field = null;
String collection = null;
while (true) {
node = stream.read();
if (node.EOF) {
break;
}
if (facet) {
//Turn the facet tuple into a node.
String nodeId = node.getString(bucket);
node.put("node", nodeId);
node.remove(bucket);
node.put("collection", facetCollection);
node.put("field", bucket);
}
if (!node.fields.containsKey("node")) {
throw new IOException("node field not present in the Tuple");
}
String nodeId = node.getString("node");
nodes.put(nodeId, node);
if (builder.length() > 0) {
builder.append(",");
field = node.getString("field");
collection = node.getString("collection");
}
builder.append(nodeId);
}
CloudSolrClient client = clientCache.getCloudSolrClient(zkHost);
ModifiableSolrParams params = new ModifiableSolrParams();
params.add(CommonParams.QT, "/terms");
params.add(TermsParams.TERMS, "true");
params.add(TermsParams.TERMS_FIELD, field);
params.add(TermsParams.TERMS_STATS, "true");
params.add(TermsParams.TERMS_LIST, builder.toString());
params.add(TermsParams.TERMS_LIMIT, Integer.toString(nodes.size()));
params.add(DISTRIB, "true");
QueryRequest request = new QueryRequest(params);
try {
//Get the response from the terms component
NamedList response = client.request(request, collection);
NamedList<Number> stats = (NamedList<Number>) response.get("indexstats");
long numDocs = stats.get("numDocs").longValue();
NamedList<NamedList<Number>> fields = (NamedList<NamedList<Number>>) response.get("terms");
int size = fields.size();
for (int i = 0; i < size; i++) {
String fieldName = fields.getName(i);
NamedList<Number> terms = fields.get(fieldName);
int tsize = terms.size();
for (int t = 0; t < tsize; t++) {
String term = terms.getName(t);
Number docFreq = terms.get(term);
Tuple tuple = nodes.get(term);
if (!tuple.fields.containsKey(termFreq)) {
throw new Exception("termFreq field not present in the Tuple");
}
Number termFreqValue = (Number) tuple.get(termFreq);
float score = (float) (Math.log(termFreqValue.floatValue()) + 1.0) * (float) (Math.log((numDocs + 1) / (docFreq.doubleValue() + 1)) + 1.0);
tuple.put("nodeScore", score);
tuple.put("docFreq", docFreq);
tuple.put("numDocs", numDocs);
}
}
} catch (Exception e) {
throw new IOException(e);
}
tuples = nodes.values().iterator();
}
Aggregations