use of org.apache.solr.client.solrj.impl.HttpSolrClient in project lucene-solr by apache.
the class FullSolrCloudDistribCmdsTest method testDeleteByIdImplicitRouter.
private void testDeleteByIdImplicitRouter() throws Exception {
SolrClient server = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
CollectionAdminResponse response;
Map<String, NamedList<Integer>> coresStatus;
CollectionAdminRequest.Create createCollectionRequest = CollectionAdminRequest.createCollectionWithImplicitRouter("implicit_collection_without_routerfield", "conf1", "shard1,shard2", 2);
response = createCollectionRequest.process(server);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
coresStatus = response.getCollectionCoresStatus();
assertEquals(4, coresStatus.size());
for (int i = 0; i < 4; i++) {
NamedList<Integer> status = coresStatus.get("implicit_collection_without_routerfield_shard" + (i / 2 + 1) + "_replica" + (i % 2 + 1));
assertEquals(0, (int) status.get("status"));
assertTrue(status.get("QTime") > 0);
}
waitForRecoveriesToFinish("implicit_collection_without_routerfield", true);
SolrClient shard1 = createNewSolrClient("implicit_collection_without_routerfield_shard1_replica1", getBaseUrl((HttpSolrClient) clients.get(0)));
SolrClient shard2 = createNewSolrClient("implicit_collection_without_routerfield_shard2_replica1", getBaseUrl((HttpSolrClient) clients.get(0)));
SolrInputDocument doc = new SolrInputDocument();
int docCounts1, docCounts2;
// Add three documents to shard1
doc.clear();
doc.addField("id", "1");
doc.addField("title", "s1 one");
shard1.add(doc);
shard1.commit();
doc.clear();
doc.addField("id", "2");
doc.addField("title", "s1 two");
shard1.add(doc);
shard1.commit();
doc.clear();
doc.addField("id", "3");
doc.addField("title", "s1 three");
shard1.add(doc);
shard1.commit();
// Three documents in shard1
docCounts1 = 3;
// Add two documents to shard2
doc.clear();
doc.addField("id", "4");
doc.addField("title", "s2 four");
shard2.add(doc);
shard2.commit();
doc.clear();
doc.addField("id", "5");
doc.addField("title", "s2 five");
shard2.add(doc);
shard2.commit();
// Two documents in shard2
docCounts2 = 2;
// Verify the documents were added to correct shards
ModifiableSolrParams query = new ModifiableSolrParams();
query.set("q", "*:*");
QueryResponse respAll = shard1.query(query);
assertEquals(docCounts1 + docCounts2, respAll.getResults().getNumFound());
query.set("shards", "shard1");
QueryResponse resp1 = shard1.query(query);
assertEquals(docCounts1, resp1.getResults().getNumFound());
query.set("shards", "shard2");
QueryResponse resp2 = shard2.query(query);
assertEquals(docCounts2, resp2.getResults().getNumFound());
// Delete a document in shard2 with update to shard1, with _route_ param
// Should delete.
UpdateRequest deleteRequest = new UpdateRequest();
deleteRequest.deleteById("4", "shard2");
shard1.request(deleteRequest);
shard1.commit();
query.set("shards", "shard2");
resp2 = shard2.query(query);
assertEquals(--docCounts2, resp2.getResults().getNumFound());
// Delete a document in shard2 with update to shard1, without _route_ param
// Shouldn't delete, since deleteById requests are not broadcast to all shard leaders.
deleteRequest = new UpdateRequest();
deleteRequest.deleteById("5");
shard1.request(deleteRequest);
shard1.commit();
query.set("shards", "shard2");
resp2 = shard2.query(query);
assertEquals(docCounts2, resp2.getResults().getNumFound());
// Multiple deleteById commands in a single request
deleteRequest.clear();
deleteRequest.deleteById("2", "shard1");
deleteRequest.deleteById("3", "shard1");
deleteRequest.setCommitWithin(1);
query.set("shards", "shard1");
shard2.request(deleteRequest);
resp1 = shard1.query(query);
--docCounts1;
--docCounts1;
assertEquals(docCounts1, resp1.getResults().getNumFound());
// Test commitWithin, update to shard2, document deleted in shard1
deleteRequest.clear();
deleteRequest.deleteById("1", "shard1");
deleteRequest.setCommitWithin(1);
shard2.request(deleteRequest);
Thread.sleep(1000);
query.set("shards", "shard1");
resp1 = shard1.query(query);
assertEquals(--docCounts1, resp1.getResults().getNumFound());
}
use of org.apache.solr.client.solrj.impl.HttpSolrClient in project lucene-solr by apache.
the class RecoveryZkTest method assertShardConsistency.
private void assertShardConsistency(Slice shard, boolean expectDocs) throws Exception {
List<Replica> replicas = shard.getReplicas(r -> r.getState() == Replica.State.ACTIVE);
long[] numCounts = new long[replicas.size()];
int i = 0;
for (Replica replica : replicas) {
try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl()).withHttpClient(cluster.getSolrClient().getHttpClient()).build()) {
numCounts[i] = client.query(new SolrQuery("*:*").add("distrib", "false")).getResults().getNumFound();
i++;
}
}
for (int j = 1; j < replicas.size(); j++) {
if (numCounts[j] != numCounts[j - 1])
// TODO improve this!
fail("Mismatch in counts between replicas");
if (numCounts[j] == 0 && expectDocs)
fail("Expected docs on shard " + shard.getName() + " but found none");
}
}
use of org.apache.solr.client.solrj.impl.HttpSolrClient in project lucene-solr by apache.
the class MultiThreadedOCPTest method testFillWorkQueue.
private void testFillWorkQueue() throws Exception {
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
DistributedQueue distributedQueue = new DistributedQueue(cloudClient.getZkStateReader().getZkClient(), "/overseer/collection-queue-work", new Overseer.Stats());
//fill the work queue with blocked tasks by adding more than the no:of parallel tasks
for (int i = 0; i < MAX_PARALLEL_TASKS + 5; i++) {
distributedQueue.offer(Utils.toJSON(Utils.makeMap("collection", "A_COLL", QUEUE_OPERATION, MOCK_COLL_TASK.toLower(), ASYNC, String.valueOf(i), //first task waits for 1 second, and thus blocking
"sleep", //first task waits for 1 second, and thus blocking
(i == 0 ? "1000" : "1"))));
log.info("MOCK task added {}", i);
}
//wait and post the next message
Thread.sleep(10);
//this is not going to be blocked because it operates on another collection
distributedQueue.offer(Utils.toJSON(Utils.makeMap("collection", "B_COLL", QUEUE_OPERATION, MOCK_COLL_TASK.toLower(), ASYNC, "200", "sleep", "1")));
Long acoll = null, bcoll = null;
for (int i = 0; i < 100; i++) {
if (bcoll == null) {
CollectionAdminResponse statusResponse = getStatusResponse("200", client);
bcoll = (Long) statusResponse.getResponse().get("MOCK_FINISHED");
}
if (acoll == null) {
CollectionAdminResponse statusResponse = getStatusResponse("2", client);
acoll = (Long) statusResponse.getResponse().get("MOCK_FINISHED");
}
if (acoll != null && bcoll != null)
break;
Thread.sleep(100);
}
assertTrue(acoll != null && bcoll != null);
assertTrue(acoll > bcoll);
}
}
use of org.apache.solr.client.solrj.impl.HttpSolrClient in project lucene-solr by apache.
the class TestTolerantUpdateProcessorRandomCloud method afterClass.
@AfterClass
public static void afterClass() throws IOException {
if (NODE_CLIENTS != null) {
for (HttpSolrClient client : NODE_CLIENTS) {
client.close();
}
}
NODE_CLIENTS = null;
if (CLOUD_CLIENT != null) {
CLOUD_CLIENT.close();
}
CLOUD_CLIENT = null;
}
use of org.apache.solr.client.solrj.impl.HttpSolrClient in project lucene-solr by apache.
the class UnloadDistributedZkTest method testUnloadLotsOfCores.
private void testUnloadLotsOfCores() throws Exception {
SolrClient client = clients.get(2);
String url3 = getBaseUrl(client);
try (final HttpSolrClient adminClient = getHttpSolrClient(url3)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(60000);
int cnt = atLeast(3);
ThreadPoolExecutor executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("testExecutor"));
try {
// create the cores
createCores(adminClient, executor, "multiunload", 2, cnt);
} finally {
ExecutorUtil.shutdownAndAwaitTermination(executor);
}
executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("testExecutor"));
try {
for (int j = 0; j < cnt; j++) {
final int freezeJ = j;
executor.execute(() -> {
Unload unloadCmd = new Unload(true);
unloadCmd.setCoreName("multiunload" + freezeJ);
try {
adminClient.request(unloadCmd);
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
});
Thread.sleep(random().nextInt(50));
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(executor);
}
}
}
Aggregations