use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class CloudSolrClientTest method stateVersionParamTest.
@Test
public void stateVersionParamTest() throws Exception {
DocCollection coll = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
SolrQuery q = new SolrQuery().setQuery("*:*");
HttpSolrClient.RemoteSolrException sse = null;
final String url = r.getStr(ZkStateReader.BASE_URL_PROP) + "/" + COLLECTION;
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
log.info("should work query, result {}", solrClient.query(q));
//no problem
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + coll.getZNodeVersion());
log.info("2nd query , result {}", solrClient.query(q));
//no error yet good
//an older version expect error
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + (coll.getZNodeVersion() - 1));
QueryResponse rsp = solrClient.query(q);
Map m = (Map) rsp.getResponse().get(CloudSolrClient.STATE_VERSION, rsp.getResponse().size() - 1);
assertNotNull("Expected an extra information from server with the list of invalid collection states", m);
assertNotNull(m.get(COLLECTION));
}
//now send the request to another node that does not serve the collection
Set<String> allNodesOfColl = new HashSet<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) {
allNodesOfColl.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
}
}
String theNode = null;
Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
for (String s : liveNodes) {
String n = cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(s);
if (!allNodesOfColl.contains(n)) {
theNode = n;
break;
}
}
log.info("the node which does not serve this collection{} ", theNode);
assertNotNull(theNode);
final String solrClientUrl = theNode + "/" + COLLECTION;
try (SolrClient solrClient = getHttpSolrClient(solrClientUrl)) {
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + (coll.getZNodeVersion() - 1));
try {
QueryResponse rsp = solrClient.query(q);
log.info("error was expected");
} catch (HttpSolrClient.RemoteSolrException e) {
sse = e;
}
assertNotNull(sse);
assertEquals(" Error code should be 510", SolrException.ErrorCode.INVALID_STATE.code, sse.code());
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class ReplaceNodeTest method test.
@Test
public void test() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "replacenodetest_coll";
log.info("total_jettys: " + cluster.getJettySolrRunners().size());
CloudSolrClient cloudClient = cluster.getSolrClient();
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String emptyNode = l.remove(0);
String node2bdecommissioned = l.get(0);
CollectionAdminRequest.Create create;
// NOTE: always using the createCollection that takes in 'int' for all types of replicas, so we never
// have to worry about null checking when comparing the Create command with the final Slices
create = pickRandom(CollectionAdminRequest.createCollection(coll, "conf1", 5, 2, 0, 0), CollectionAdminRequest.createCollection(coll, "conf1", 5, 1, 1, 0), CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 1, 1), CollectionAdminRequest.createCollection(coll, "conf1", 5, 1, 0, 1), CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 2, 0));
create.setCreateNodeSet(StrUtils.join(l, ',')).setMaxShardsPerNode(3);
cloudClient.request(create);
log.info("excluded_node : {} ", emptyNode);
new CollectionAdminRequest.ReplaceNode(node2bdecommissioned, emptyNode).processAsync("000", cloudClient);
CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("000");
boolean success = false;
for (int i = 0; i < 200; i++) {
CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
success = true;
break;
}
assertFalse(rsp.getRequestStatus() == RequestStatusState.FAILED);
Thread.sleep(50);
}
assertTrue(success);
try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(node2bdecommissioned))) {
CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
assertTrue(status.getCoreStatus().size() == 0);
}
//let's do it back
new CollectionAdminRequest.ReplaceNode(emptyNode, node2bdecommissioned).setParallel(Boolean.TRUE).processAsync("001", cloudClient);
requestStatus = CollectionAdminRequest.requestStatus("001");
for (int i = 0; i < 200; i++) {
CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
success = true;
break;
}
assertFalse(rsp.getRequestStatus() == RequestStatusState.FAILED);
Thread.sleep(50);
}
assertTrue(success);
try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(emptyNode))) {
CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
assertEquals("Expecting no cores but found some: " + status.getCoreStatus(), 0, status.getCoreStatus().size());
}
DocCollection collection = cloudClient.getZkStateReader().getClusterState().getCollection(coll);
assertEquals(create.getNumShards().intValue(), collection.getSlices().size());
for (Slice s : collection.getSlices()) {
assertEquals(create.getNumNrtReplicas().intValue(), s.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
assertEquals(create.getNumTlogReplicas().intValue(), s.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
assertEquals(create.getNumPullReplicas().intValue(), s.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class RecoveryZkTest method test.
@Test
public void test() throws Exception {
final String collection = "recoverytest";
CollectionAdminRequest.createCollection(collection, "conf", 1, 2).setMaxShardsPerNode(1).process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);
// start a couple indexing threads
int[] maxDocList = new int[] { 300, 700, 1200, 1350, 3000 };
int[] maxDocNightlyList = new int[] { 3000, 7000, 12000, 30000, 45000, 60000 };
int maxDoc;
if (!TEST_NIGHTLY) {
maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
} else {
maxDoc = maxDocNightlyList[random().nextInt(maxDocList.length - 1)];
}
log.info("Indexing {} documents", maxDoc);
indexThread = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", true, maxDoc, 1, true);
indexThread.start();
indexThread2 = new StoppableIndexingThread(null, cluster.getSolrClient(), "2", true, maxDoc, 1, true);
indexThread2.start();
// give some time to index...
int[] waitTimes = new int[] { 200, 2000, 3000 };
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica down
DocCollection state = getCollectionState(collection);
Replica leader = state.getLeader("shard1");
Replica replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
JettySolrRunner jetty = cluster.getReplicaJetty(replica);
jetty.stop();
// wait a moment - lets allow some docs to be indexed so replication time is non 0
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica up
jetty.start();
// make sure replication can start
Thread.sleep(3000);
// stop indexing threads
indexThread.safeStop();
indexThread2.safeStop();
indexThread.join();
indexThread2.join();
new UpdateRequest().commit(cluster.getSolrClient(), collection);
cluster.getSolrClient().waitForState(collection, 120, TimeUnit.SECONDS, clusterShape(1, 2));
// test that leader and replica have same doc count
state = getCollectionState(collection);
assertShardConsistency(state.getSlice("shard1"), true);
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class MissingSegmentRecoveryTest method setup.
@Before
public void setup() throws SolrServerException, IOException {
CollectionAdminRequest.createCollection(collection, "conf", 1, 2).setMaxShardsPerNode(1).process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);
List<SolrInputDocument> docs = new ArrayList<>();
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i);
docs.add(doc);
}
cluster.getSolrClient().add(docs);
cluster.getSolrClient().commit();
DocCollection state = getCollectionState(collection);
leader = state.getLeader("shard1");
replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class SSLMigrationTest method getReplicas.
private List<Replica> getReplicas() {
List<Replica> replicas = new ArrayList<Replica>();
DocCollection collection = this.cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
for (Slice slice : collection.getSlices()) {
replicas.addAll(slice.getReplicas());
}
return replicas;
}
Aggregations