use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method delayedReorderingFetchesMissingUpdateFromLeaderTest.
private void delayedReorderingFetchesMissingUpdateFromLeaderTest() throws Exception {
clearIndex();
commit();
float inplace_updatable_float = 1F;
buildRandomIndex(inplace_updatable_float, Collections.singletonList(1));
float newinplace_updatable_float = 100F;
List<UpdateRequest> updates = new ArrayList<>();
updates.add(regularUpdateRequest("id", 1, "title_s", "title1_new", "id_i", 1, "inplace_updatable_float", newinplace_updatable_float));
updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
// The next request to replica2 will be delayed by 6 secs (timeout is 5s)
shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay("Waiting for dependant update to timeout", 1, 6000);
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
for (UpdateRequest update : updates) {
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, cloudClient, random().nextLong());
threadpool.submit(task);
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(100);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
commit();
// Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
ClusterState state = cloudClient.getZkStateReader().getClusterState();
int numActiveReplicas = 0;
for (Replica rep : state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas()) if (rep.getState().equals(Replica.State.ACTIVE))
numActiveReplicas++;
assertEquals("The replica receiving reordered updates must not have gone down", 3, numActiveReplicas);
}
for (SolrClient client : clients) {
log.info("Testing client (Fetch missing test): " + ((HttpSolrClient) client).getBaseURL());
log.info("Version at " + ((HttpSolrClient) client).getBaseURL() + " is: " + getReplicaValue(client, 1, "_version_"));
assertReplicaValue(client, 1, "inplace_updatable_float", (newinplace_updatable_float + 2.0f), "inplace_updatable_float didn't match for replica at client: " + ((HttpSolrClient) client).getBaseURL());
assertReplicaValue(client, 1, "title_s", "title1_new", "Title didn't match for replica at client: " + ((HttpSolrClient) client).getBaseURL());
}
// Try another round of these updates, this time with a delete request at the end.
// This is to ensure that the fetch missing update from leader doesn't bomb out if the
// document has been deleted on the leader later on
{
clearIndex();
commit();
shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().unsetDelay();
updates.add(regularDeleteRequest(1));
// the first update
shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay("Waiting for dependant update to timeout", 1, 5999);
// the delete update
shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay("Waiting for dependant update to timeout", 4, 5998);
threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
for (UpdateRequest update : updates) {
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, cloudClient, random().nextLong());
threadpool.submit(task);
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(100);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
commit();
// TODO: Could try checking ZK for LIR flags to ensure LIR has not kicked in
// Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
ZkController zkController = shardToLeaderJetty.get(SHARD1).jetty.getCoreContainer().getZkController();
String lirPath = zkController.getLeaderInitiatedRecoveryZnodePath(DEFAULT_TEST_COLLECTION_NAME, SHARD1);
assertFalse(zkController.getZkClient().exists(lirPath, true));
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
ClusterState state = cloudClient.getZkStateReader().getClusterState();
int numActiveReplicas = 0;
for (Replica rep : state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas()) if (rep.getState().equals(Replica.State.ACTIVE))
numActiveReplicas++;
assertEquals("The replica receiving reordered updates must not have gone down", 3, numActiveReplicas);
}
for (SolrClient client : new SolrClient[] { LEADER, NONLEADERS.get(0), NONLEADERS.get(1) }) {
// nonleader 0 re-ordered replica, nonleader 1 well-ordered replica
SolrDocument doc = client.getById(String.valueOf(1), params("distrib", "false"));
assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
}
}
log.info("delayedReorderingFetchesMissingUpdateFromLeaderTest: This test passed fine...");
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method reorderedDBQIndividualReplicaTest.
private void reorderedDBQIndividualReplicaTest() throws Exception {
if (onlyLeaderIndexes) {
log.info("RTG with DBQs are not working in tlog replicas");
return;
}
clearIndex();
commit();
// put replica out of sync
float newinplace_updatable_float = 100;
long version0 = 2000;
List<UpdateRequest> updates = new ArrayList<>();
updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", // full update
version0 + 1));
updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, "inplace_updatable_float", newinplace_updatable_float + 1, "_version_", // inplace_updatable_float=101
version0 + 2));
updates.add(simulatedDeleteRequest("inplace_updatable_float:" + (newinplace_updatable_float + 1), version0 + 3));
// Reordering needs to happen using parallel threads
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
// re-order the updates by swapping the last two
List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
reorderedUpdates.set(1, updates.get(2));
reorderedUpdates.set(2, updates.get(1));
List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
for (UpdateRequest update : reorderedUpdates) {
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
updateResponses.add(threadpool.submit(task));
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(100);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
// assert all requests were successful
for (Future<UpdateResponse> resp : updateResponses) {
assertEquals(0, resp.get().getStatus());
}
SolrDocument doc = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
log.info("reorderedDBQIndividualReplicaTest: This test passed fine...");
clearIndex();
commit();
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method outOfOrderUpdatesIndividualReplicaTest.
private void outOfOrderUpdatesIndividualReplicaTest() throws Exception {
clearIndex();
commit();
buildRandomIndex(0);
float inplace_updatable_float = 1;
// update doc, set
index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
LEADER.commit();
// RTG straight from the index
SolrDocument sdoc = LEADER.getById("0");
assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
assertEquals("title0", sdoc.get("title_s"));
long version0 = (long) sdoc.get("_version_");
// put replica out of sync
float newinplace_updatable_float = 100;
List<UpdateRequest> updates = new ArrayList<>();
// full update
updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1));
for (int i = 1; i < atLeast(3); i++) {
updates.add(simulatedUpdateRequest(version0 + i, "id", 0, "inplace_updatable_float", newinplace_updatable_float + i, "_version_", version0 + i + 1));
}
// order the updates correctly for NONLEADER 1
for (UpdateRequest update : updates) {
log.info("Issuing well ordered update: " + update.getDocuments());
NONLEADERS.get(1).request(update);
}
// Reordering needs to happen using parallel threads, since some of these updates will
// be blocking calls, waiting for some previous updates to arrive on which it depends.
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
// re-order the updates for NONLEADER 0
List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
Collections.shuffle(reorderedUpdates, r);
List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
for (UpdateRequest update : reorderedUpdates) {
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
updateResponses.add(threadpool.submit(task));
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(10);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
// assert all requests were successful
for (Future<UpdateResponse> resp : updateResponses) {
assertEquals(0, resp.get().getStatus());
}
// assert both replicas have same effect
for (SolrClient client : NONLEADERS) {
// 0th is re-ordered replica, 1st is well-ordered replica
log.info("Testing client: " + ((HttpSolrClient) client).getBaseURL());
assertReplicaValue(client, 0, "inplace_updatable_float", (newinplace_updatable_float + (float) (updates.size() - 1)), "inplace_updatable_float didn't match for replica at client: " + ((HttpSolrClient) client).getBaseURL());
assertReplicaValue(client, 0, "title_s", "title0_new", "Title didn't match for replica at client: " + ((HttpSolrClient) client).getBaseURL());
assertEquals(version0 + updates.size(), getReplicaValue(client, 0, "_version_"));
}
log.info("outOfOrderUpdatesIndividualReplicaTest: This test passed fine...");
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method reorderedDBQsUsingUpdatedValueFromADroppedUpdate.
/*
* Situation:
* add(id=1,inpfield=12,title=mytitle,version=1)
* inp(id=1,inpfield=13,prevVersion=1,version=2) // timeout indefinitely
* inp(id=1,inpfield=14,prevVersion=2,version=3) // will wait till timeout, and then fetch a "not found" from leader
* dbq("inp:14",version=4)
*/
private void reorderedDBQsUsingUpdatedValueFromADroppedUpdate() throws Exception {
if (onlyLeaderIndexes) {
log.info("RTG with DBQs are not working in tlog replicas");
return;
}
clearIndex();
commit();
float inplace_updatable_float = 1F;
buildRandomIndex(inplace_updatable_float, Collections.singletonList(1));
List<UpdateRequest> updates = new ArrayList<>();
updates.add(regularUpdateRequest("id", 1, "id_i", 1, "inplace_updatable_float", 12, "title_s", "mytitle"));
// delay indefinitely
updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
updates.add(regularDeleteByQueryRequest("inplace_updatable_float:14"));
// The second request will be delayed very very long, so that the next update actually gives up waiting for this
// and fetches a full update from the leader.
shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay("Waiting for dependant update to timeout", 2, 8000);
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
for (UpdateRequest update : updates) {
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, cloudClient, random().nextLong());
threadpool.submit(task);
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(100);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 12 secs", threadpool.awaitTermination(12, TimeUnit.SECONDS));
commit();
// Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
for (int i = 0; i < 100; i++) {
Thread.sleep(10);
cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
ClusterState state = cloudClient.getZkStateReader().getClusterState();
int numActiveReplicas = 0;
for (Replica rep : state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas()) if (rep.getState().equals(Replica.State.ACTIVE))
numActiveReplicas++;
assertEquals("The replica receiving reordered updates must not have gone down", 3, numActiveReplicas);
}
for (SolrClient client : clients) {
log.info("Testing client (testDBQUsingUpdatedFieldFromDroppedUpdate): " + ((HttpSolrClient) client).getBaseURL());
log.info("Version at " + ((HttpSolrClient) client).getBaseURL() + " is: " + getReplicaValue(client, 1, "_version_"));
assertNull(client.getById("1", params("distrib", "false")));
}
log.info("reorderedDBQsUsingUpdatedValueFromADroppedUpdate: This test passed fine...");
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method reorderedDBQsSimpleTest.
// The following should work: full update to doc 0, in-place update for doc 0, delete doc 0
private void reorderedDBQsSimpleTest() throws Exception {
clearIndex();
commit();
buildRandomIndex(0);
float inplace_updatable_float = 1;
// update doc, set
index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
LEADER.commit();
// RTG straight from the index
SolrDocument sdoc = LEADER.getById("0");
assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
assertEquals("title0", sdoc.get("title_s"));
long version0 = (long) sdoc.get("_version_");
// put replica out of sync
float newinplace_updatable_float = 100;
List<UpdateRequest> updates = new ArrayList<>();
// full update
updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, "inplace_updatable_float", newinplace_updatable_float + 1, "_version_", version0 + 2));
updates.add(simulatedDeleteRequest(0, version0 + 3));
// order the updates correctly for NONLEADER 1
for (UpdateRequest update : updates) {
log.info("Issuing well ordered update: " + update.getDocuments());
NONLEADERS.get(1).request(update);
}
// Reordering needs to happen using parallel threads
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
// re-order the updates for NONLEADER 0
List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
Collections.shuffle(reorderedUpdates, random());
List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
for (UpdateRequest update : reorderedUpdates) {
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
updateResponses.add(threadpool.submit(task));
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(10);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
// assert all requests were successful
for (Future<UpdateResponse> resp : updateResponses) {
assertEquals(0, resp.get().getStatus());
}
// assert both replicas have same effect
for (SolrClient client : NONLEADERS) {
// 0th is re-ordered replica, 1st is well-ordered replica
SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
}
log.info("reorderedDBQsSimpleTest: This test passed fine...");
}
Aggregations