use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class PreAnalyzedFieldManagedSchemaCloudTest method addField.
private void addField(Map<String, Object> field) throws Exception {
CloudSolrClient client = cluster.getSolrClient();
UpdateResponse addFieldResponse = new SchemaRequest.AddField(field).process(client, COLLECTION);
assertNotNull(addFieldResponse);
assertEquals(0, addFieldResponse.getStatus());
assertNull(addFieldResponse.getResponse().get("errors"));
FieldResponse fieldResponse = new SchemaRequest.Field(field.get("name").toString()).process(client, COLLECTION);
assertNotNull(fieldResponse);
assertEquals(0, fieldResponse.getStatus());
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestManagedSchemaAPI method testReloadAndAddSimple.
private void testReloadAndAddSimple(String collection) throws IOException, SolrServerException {
CloudSolrClient cloudClient = cluster.getSolrClient();
String fieldName = "myNewField";
addStringField(fieldName, collection, cloudClient);
CollectionAdminRequest.Reload reloadRequest = CollectionAdminRequest.reloadCollection(collection);
CollectionAdminResponse response = reloadRequest.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "1");
doc.addField(fieldName, "val");
UpdateRequest ureq = new UpdateRequest().add(doc);
cloudClient.request(ureq, collection);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestTrackingShardHandlerFactory method testRequestTracking.
@Test
@BaseDistributedSearchTestCase.ShardsFixed(num = 2)
public void testRequestTracking() throws Exception {
String collectionName = "testTwoPhase";
List<JettySolrRunner> runners = new ArrayList<>(jettys);
runners.add(controlJetty);
TrackingShardHandlerFactory.RequestTrackingQueue trackingQueue = new TrackingShardHandlerFactory.RequestTrackingQueue();
TrackingShardHandlerFactory.setTrackingQueue(runners, trackingQueue);
for (JettySolrRunner runner : runners) {
CoreContainer container = runner.getCoreContainer();
ShardHandlerFactory factory = container.getShardHandlerFactory();
assert factory instanceof TrackingShardHandlerFactory;
TrackingShardHandlerFactory trackingShardHandlerFactory = (TrackingShardHandlerFactory) factory;
assertSame(trackingQueue, trackingShardHandlerFactory.getTrackingQueue());
}
createCollection(collectionName, 2, 1, 1);
waitForRecoveriesToFinish(collectionName, true);
List<TrackingShardHandlerFactory.ShardRequestAndParams> coreAdminRequests = trackingQueue.getCoreAdminRequests();
assertNotNull(coreAdminRequests);
assertEquals("Unexpected number of core admin requests were found", 2, coreAdminRequests.size());
CloudSolrClient client = cloudClient;
client.setDefaultCollection(collectionName);
/*
hash of b is 95de7e03 high bits=2 shard=shard1
hash of e is 656c4367 high bits=1 shard=shard2
*/
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", (i % 2 == 0 ? "b!" : "e!") + i);
doc.addField("a_i", i);
doc.addField("a_t", "text_" + i);
client.add(doc);
}
client.commit();
client.query(new SolrQuery("*:*"));
TrackingShardHandlerFactory.ShardRequestAndParams getTopIdsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard1", ShardRequest.PURPOSE_GET_TOP_IDS);
assertNotNull(getTopIdsRequest);
getTopIdsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard2", ShardRequest.PURPOSE_GET_TOP_IDS);
assertNotNull(getTopIdsRequest);
TrackingShardHandlerFactory.ShardRequestAndParams getFieldsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard1", ShardRequest.PURPOSE_GET_FIELDS);
assertNotNull(getFieldsRequest);
getFieldsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard2", ShardRequest.PURPOSE_GET_FIELDS);
assertNotNull(getFieldsRequest);
int numRequests = 0;
Map<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> allRequests = trackingQueue.getAllRequests();
for (Map.Entry<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> entry : allRequests.entrySet()) {
numRequests += entry.getValue().size();
}
// 4 shard requests + 2 core admin requests (invoked by create collection API)
assertEquals("Total number of requests do not match expected", 6, numRequests);
// reset
TrackingShardHandlerFactory.setTrackingQueue(runners, null);
for (JettySolrRunner runner : runners) {
CoreContainer container = runner.getCoreContainer();
ShardHandlerFactory factory = container.getShardHandlerFactory();
assert factory instanceof TrackingShardHandlerFactory;
TrackingShardHandlerFactory trackingShardHandlerFactory = (TrackingShardHandlerFactory) factory;
assertFalse(trackingShardHandlerFactory.isTracking());
}
// make another request and verify
client.query(new SolrQuery("*:*"));
numRequests = 0;
allRequests = trackingQueue.getAllRequests();
for (Map.Entry<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> entry : allRequests.entrySet()) {
numRequests += entry.getValue().size();
}
// should still be 6
assertEquals("Total number of shard requests do not match expected", 6, numRequests);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class ChaosMonkeyNothingIsSafeWithPullReplicasTest method test.
@Test
public void test() throws Exception {
cloudClient.setSoTimeout(clientSoTimeout);
DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
assertEquals(this.sliceCount, docCollection.getSlices().size());
Slice s = docCollection.getSlice("shard1");
assertNotNull(s);
assertEquals("Unexpected number of replicas. Collection: " + docCollection, numRealtimeOrTlogReplicas + numPullReplicas, s.getReplicas().size());
assertEquals("Unexpected number of pull replicas. Collection: " + docCollection, numPullReplicas, s.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
assertEquals(useTlogReplicas() ? 0 : numRealtimeOrTlogReplicas, s.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
assertEquals(useTlogReplicas() ? numRealtimeOrTlogReplicas : 0, s.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
boolean testSuccessful = false;
try {
handle.clear();
handle.put("timestamp", SKIPVAL);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
// make sure we have leaders for each shard
for (int j = 1; j < sliceCount; j++) {
zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
}
// make sure we again have leaders for each shard
waitForRecoveriesToFinish(false);
// we cannot do delete by query
// as it's not supported for recovery
del("*:*");
List<StoppableThread> threads = new ArrayList<>();
List<StoppableIndexingThread> indexTreads = new ArrayList<>();
int threadCount = TEST_NIGHTLY ? 3 : 1;
int i = 0;
for (i = 0; i < threadCount; i++) {
StoppableIndexingThread indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true);
threads.add(indexThread);
indexTreads.add(indexThread);
indexThread.start();
}
threadCount = 1;
i = 0;
for (i = 0; i < threadCount; i++) {
StoppableSearchThread searchThread = new StoppableSearchThread(cloudClient);
threads.add(searchThread);
searchThread.start();
}
if (usually()) {
StoppableCommitThread commitThread = new StoppableCommitThread(cloudClient, 1000, false);
threads.add(commitThread);
commitThread.start();
}
// TODO: we only do this sometimes so that we can sometimes compare against control,
// it's currently hard to know what requests failed when using ConcurrentSolrUpdateServer
boolean runFullThrottle = random().nextBoolean();
if (runFullThrottle) {
FullThrottleStoppableIndexingThread ftIndexThread = new FullThrottleStoppableIndexingThread(controlClient, cloudClient, clients, "ft1", true, this.clientSoTimeout);
threads.add(ftIndexThread);
ftIndexThread.start();
}
chaosMonkey.startTheMonkey(true, 10000);
try {
long runLength;
if (RUN_LENGTH != -1) {
runLength = RUN_LENGTH;
} else {
int[] runTimes;
if (TEST_NIGHTLY) {
runTimes = new int[] { 5000, 6000, 10000, 15000, 25000, 30000, 30000, 45000, 90000, 120000 };
} else {
runTimes = new int[] { 5000, 7000, 15000 };
}
runLength = runTimes[random().nextInt(runTimes.length - 1)];
}
ChaosMonkey.wait(runLength, DEFAULT_COLLECTION, zkStateReader);
} finally {
chaosMonkey.stopTheMonkey();
}
// ideally this should go into chaosMonkey
restartZk(1000 * (5 + random().nextInt(4)));
for (StoppableThread indexThread : threads) {
indexThread.safeStop();
}
// wait for stop...
for (StoppableThread indexThread : threads) {
indexThread.join();
}
// try and wait for any replications and what not to finish...
ChaosMonkey.wait(2000, DEFAULT_COLLECTION, zkStateReader);
// wait until there are no recoveries...
//Math.round((runLength / 1000.0f / 3.0f)));
waitForThingsToLevelOut(Integer.MAX_VALUE);
// make sure we again have leaders for each shard
for (int j = 1; j < sliceCount; j++) {
zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 30000);
}
commit();
// TODO: assert we didnt kill everyone
zkStateReader.updateLiveNodes();
assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0);
// we expect full throttle fails, but cloud client should not easily fail
for (StoppableThread indexThread : threads) {
if (indexThread instanceof StoppableIndexingThread && !(indexThread instanceof FullThrottleStoppableIndexingThread)) {
int failCount = ((StoppableIndexingThread) indexThread).getFailCount();
assertFalse("There were too many update fails (" + failCount + " > " + FAIL_TOLERANCE + ") - we expect it can happen, but shouldn't easily", failCount > FAIL_TOLERANCE);
}
}
waitForReplicationFromReplicas(DEFAULT_COLLECTION, zkStateReader, new TimeOut(30, TimeUnit.SECONDS));
// waitForAllWarmingSearchers();
Set<String> addFails = getAddFails(indexTreads);
Set<String> deleteFails = getDeleteFails(indexTreads);
// full throttle thread can
// have request fails
checkShardConsistency(!runFullThrottle, true, addFails, deleteFails);
long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
// ensure we have added more than 0 docs
long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
if (VERBOSE)
System.out.println("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
// sometimes we restart zookeeper as well
if (random().nextBoolean()) {
restartZk(1000 * (5 + random().nextInt(4)));
}
try (CloudSolrClient client = createCloudClient("collection1")) {
// We don't really know how many live nodes we have at this point, so "maxShardsPerNode" needs to be > 1
createCollection(null, "testcollection", 1, 1, 10, client, null, "conf1");
}
List<Integer> numShardsNumReplicas = new ArrayList<>(2);
numShardsNumReplicas.add(1);
numShardsNumReplicas.add(1 + getPullReplicaCount());
checkForCollection("testcollection", numShardsNumReplicas, null);
testSuccessful = true;
} finally {
if (!testSuccessful) {
logReplicaTypesReplicationInfo(DEFAULT_COLLECTION, cloudClient.getZkStateReader());
printLayout();
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class ChaosMonkeySafeLeaderWithPullReplicasTest method test.
@Test
public void test() throws Exception {
DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
assertEquals(this.sliceCount, docCollection.getSlices().size());
Slice s = docCollection.getSlice("shard1");
assertNotNull(s);
assertEquals("Unexpected number of replicas. Collection: " + docCollection, numRealtimeOrTlogReplicas + numPullReplicas, s.getReplicas().size());
assertEquals("Unexpected number of pull replicas. Collection: " + docCollection, numPullReplicas, s.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
assertEquals(useTlogReplicas() ? 0 : numRealtimeOrTlogReplicas, s.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
assertEquals(useTlogReplicas() ? numRealtimeOrTlogReplicas : 0, s.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
handle.clear();
handle.put("timestamp", SKIPVAL);
// randomly turn on 1 seconds 'soft' commit
randomlyEnableAutoSoftCommit();
tryDelete();
List<StoppableThread> threads = new ArrayList<>();
int threadCount = 2;
int batchSize = 1;
if (random().nextBoolean()) {
batchSize = random().nextInt(98) + 2;
}
boolean pauseBetweenUpdates = TEST_NIGHTLY ? random().nextBoolean() : true;
int maxUpdates = -1;
if (!pauseBetweenUpdates) {
maxUpdates = 1000 + random().nextInt(1000);
} else {
maxUpdates = 15000;
}
for (int i = 0; i < threadCount; i++) {
// random().nextInt(999) + 1
StoppableIndexingThread indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true, maxUpdates, batchSize, pauseBetweenUpdates);
threads.add(indexThread);
indexThread.start();
}
StoppableCommitThread commitThread = new StoppableCommitThread(cloudClient, 1000, false);
threads.add(commitThread);
commitThread.start();
chaosMonkey.startTheMonkey(false, 500);
try {
long runLength;
if (RUN_LENGTH != -1) {
runLength = RUN_LENGTH;
} else {
int[] runTimes;
if (TEST_NIGHTLY) {
runTimes = new int[] { 5000, 6000, 10000, 15000, 25000, 30000, 30000, 45000, 90000, 120000 };
} else {
runTimes = new int[] { 5000, 7000, 15000 };
}
runLength = runTimes[random().nextInt(runTimes.length - 1)];
}
ChaosMonkey.wait(runLength, DEFAULT_COLLECTION, cloudClient.getZkStateReader());
} finally {
chaosMonkey.stopTheMonkey();
}
for (StoppableThread thread : threads) {
thread.safeStop();
}
// wait for stop...
for (StoppableThread thread : threads) {
thread.join();
}
for (StoppableThread thread : threads) {
if (thread instanceof StoppableIndexingThread) {
assertEquals(0, ((StoppableIndexingThread) thread).getFailCount());
}
}
// try and wait for any replications and what not to finish...
Thread.sleep(2000);
waitForThingsToLevelOut(180000);
// even if things were leveled out, a jetty may have just been stopped or something
// we wait again and wait to level out again to make sure the system is not still in flux
Thread.sleep(3000);
waitForThingsToLevelOut(180000);
log.info("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
waitForReplicationFromReplicas(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), new TimeOut(30, TimeUnit.SECONDS));
// waitForAllWarmingSearchers();
checkShardConsistency(batchSize == 1, true);
// sometimes we restart zookeeper as well
if (random().nextBoolean()) {
zkServer.shutdown();
zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
zkServer.run();
}
try (CloudSolrClient client = createCloudClient("collection1")) {
createCollection(null, "testcollection", 1, 1, 100, client, null, "conf1");
}
List<Integer> numShardsNumReplicas = new ArrayList<>(2);
numShardsNumReplicas.add(1);
numShardsNumReplicas.add(1 + getPullReplicaCount());
checkForCollection("testcollection", numShardsNumReplicas, null);
}
Aggregations