use of org.apache.solr.common.cloud.Replica in project lucene-solr by apache.
the class RecoveryZkTest method assertShardConsistency.
private void assertShardConsistency(Slice shard, boolean expectDocs) throws Exception {
List<Replica> replicas = shard.getReplicas(r -> r.getState() == Replica.State.ACTIVE);
long[] numCounts = new long[replicas.size()];
int i = 0;
for (Replica replica : replicas) {
try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl()).withHttpClient(cluster.getSolrClient().getHttpClient()).build()) {
numCounts[i] = client.query(new SolrQuery("*:*").add("distrib", "false")).getResults().getNumFound();
i++;
}
}
for (int j = 1; j < replicas.size(); j++) {
if (numCounts[j] != numCounts[j - 1])
// TODO improve this!
fail("Mismatch in counts between replicas");
if (numCounts[j] == 0 && expectDocs)
fail("Expected docs on shard " + shard.getName() + " but found none");
}
}
use of org.apache.solr.common.cloud.Replica in project lucene-solr by apache.
the class RecoveryZkTest method test.
@Test
public void test() throws Exception {
final String collection = "recoverytest";
CollectionAdminRequest.createCollection(collection, "conf", 1, 2).setMaxShardsPerNode(1).process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);
// start a couple indexing threads
int[] maxDocList = new int[] { 300, 700, 1200, 1350, 3000 };
int[] maxDocNightlyList = new int[] { 3000, 7000, 12000, 30000, 45000, 60000 };
int maxDoc;
if (!TEST_NIGHTLY) {
maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
} else {
maxDoc = maxDocNightlyList[random().nextInt(maxDocList.length - 1)];
}
log.info("Indexing {} documents", maxDoc);
indexThread = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", true, maxDoc, 1, true);
indexThread.start();
indexThread2 = new StoppableIndexingThread(null, cluster.getSolrClient(), "2", true, maxDoc, 1, true);
indexThread2.start();
// give some time to index...
int[] waitTimes = new int[] { 200, 2000, 3000 };
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica down
DocCollection state = getCollectionState(collection);
Replica leader = state.getLeader("shard1");
Replica replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
JettySolrRunner jetty = cluster.getReplicaJetty(replica);
jetty.stop();
// wait a moment - lets allow some docs to be indexed so replication time is non 0
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica up
jetty.start();
// make sure replication can start
Thread.sleep(3000);
// stop indexing threads
indexThread.safeStop();
indexThread2.safeStop();
indexThread.join();
indexThread2.join();
new UpdateRequest().commit(cluster.getSolrClient(), collection);
cluster.getSolrClient().waitForState(collection, 120, TimeUnit.SECONDS, clusterShape(1, 2));
// test that leader and replica have same doc count
state = getCollectionState(collection);
assertShardConsistency(state.getSlice("shard1"), true);
}
use of org.apache.solr.common.cloud.Replica in project lucene-solr by apache.
the class MissingSegmentRecoveryTest method setup.
@Before
public void setup() throws SolrServerException, IOException {
CollectionAdminRequest.createCollection(collection, "conf", 1, 2).setMaxShardsPerNode(1).process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);
List<SolrInputDocument> docs = new ArrayList<>();
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i);
docs.add(doc);
}
cluster.getSolrClient().add(docs);
cluster.getSolrClient().commit();
DocCollection state = getCollectionState(collection);
leader = state.getLeader("shard1");
replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
}
use of org.apache.solr.common.cloud.Replica in project lucene-solr by apache.
the class SSLMigrationTest method getReplicas.
private List<Replica> getReplicas() {
List<Replica> replicas = new ArrayList<Replica>();
DocCollection collection = this.cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
for (Slice slice : collection.getSlices()) {
replicas.addAll(slice.getReplicas());
}
return replicas;
}
use of org.apache.solr.common.cloud.Replica in project lucene-solr by apache.
the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.
@Test
public void moveReplicaTest() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
for (int i = 0; i < 10; i++) {
cloudClient.add(coll, sdoc("id", String.valueOf(i)));
cloudClient.commit(coll);
}
List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
Collections.shuffle(slices, random());
Slice slice = null;
Replica replica = null;
for (Slice s : slices) {
slice = s;
for (Replica r : s.getReplicas()) {
if (s.getLeader() != r) {
replica = r;
}
}
}
String dataDir = getDataDir(replica);
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String targetNode = null;
for (String node : liveNodes) {
if (!replica.getNodeName().equals(node)) {
targetNode = node;
break;
}
}
assertNotNull(targetNode);
CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
boolean found = false;
for (Replica newReplica : slice.getReplicas()) {
if (getDataDir(newReplica).equals(dataDir)) {
found = true;
}
}
assertTrue(found);
// data dir is reused so replication will be skipped
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
if (counter != null) {
assertEquals(0, counter.getCount());
}
}
}
}
Aggregations