use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class AbstractDistribZkTestBase method waitForCollectionToDisappear.
public static void waitForCollectionToDisappear(String collection, ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout, int timeoutSeconds) throws Exception {
log.info("Wait for collection to disappear - collection: " + collection + " failOnTimeout:" + failOnTimeout + " timeout (sec):" + timeoutSeconds);
boolean cont = true;
int cnt = 0;
while (cont) {
if (verbose)
System.out.println("-");
ClusterState clusterState = zkStateReader.getClusterState();
if (!clusterState.hasCollection(collection))
break;
if (cnt == timeoutSeconds) {
if (verbose)
System.out.println("Gave up waiting for " + collection + " to disappear..");
if (failOnTimeout) {
Diagnostics.logThreadDumps("Gave up waiting for " + collection + " to disappear. THREAD DUMP:");
zkStateReader.getZkClient().printLayoutToStdOut();
fail("The collection (" + collection + ") is still present - waited for " + timeoutSeconds + " seconds");
// won't get here
return;
}
cont = false;
} else {
Thread.sleep(1000);
}
cnt++;
}
log.info("Collection has disappeared - collection: " + collection);
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method getLeaderUrlFromZk.
protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
ZkNodeProps leader = clusterState.getLeader(collection, slice);
if (leader == null) {
throw new RuntimeException("Could not find leader:" + collection + " " + slice);
}
return new ZkCoreNodeProps(leader);
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class CloudSolrClientCacheTest method testCaching.
public void testCaching() throws Exception {
String collName = "gettingstarted";
Set<String> livenodes = new HashSet<>();
Map<String, ClusterState.CollectionRef> refs = new HashMap<>();
Map<String, DocCollection> colls = new HashMap<>();
class Ref extends ClusterState.CollectionRef {
private String c;
public Ref(String c) {
super(null);
this.c = c;
}
@Override
public boolean isLazilyLoaded() {
return true;
}
@Override
public DocCollection get() {
gets.incrementAndGet();
return colls.get(c);
}
}
Map<String, Function> responses = new HashMap<>();
NamedList okResponse = new NamedList();
okResponse.add("responseHeader", new NamedList<>(Collections.singletonMap("status", 0)));
LBHttpSolrClient mockLbclient = getMockLbHttpSolrClient(responses);
AtomicInteger lbhttpRequestCount = new AtomicInteger();
try (CloudSolrClient cloudClient = new CloudSolrClient.Builder().withLBHttpSolrClient(mockLbclient).withClusterStateProvider(getStateProvider(livenodes, refs)).build()) {
livenodes.addAll(ImmutableSet.of("192.168.1.108:7574_solr", "192.168.1.108:8983_solr"));
ClusterState cs = ClusterState.load(1, coll1State.getBytes(UTF_8), Collections.emptySet(), "/collections/gettingstarted/state.json");
refs.put(collName, new Ref(collName));
colls.put(collName, cs.getCollectionOrNull(collName));
responses.put("request", o -> {
int i = lbhttpRequestCount.incrementAndGet();
if (i == 1)
return new ConnectException("TEST");
if (i == 2)
return new SocketException("TEST");
if (i == 3)
return new NoHttpResponseException("TEST");
return okResponse;
});
UpdateRequest update = new UpdateRequest().add("id", "123", "desc", "Something 0");
cloudClient.request(update, collName);
assertEquals(2, refs.get(collName).getCount());
}
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class CloudSolrClientTest method testRouting.
@Test
public void testRouting() throws Exception {
AbstractUpdateRequest request = new UpdateRequest().add(id, "0", "a_t", "hello1").add(id, "2", "a_t", "hello2").setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
// Test single threaded routed updates for UpdateRequest
NamedList<Object> response = getRandomClient().request(request, COLLECTION);
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response;
Map<String, LBHttpSolrClient.Req> routes = rr.getRoutes();
Iterator<Map.Entry<String, LBHttpSolrClient.Req>> it = routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue().getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
// Test the deleteById routing for UpdateRequest
final UpdateResponse uResponse = new UpdateRequest().deleteById("0").deleteById("2").commit(cluster.getSolrClient(), COLLECTION);
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(uResponse.getResponse());
}
QueryResponse qResponse = getRandomClient().query(COLLECTION, new SolrQuery("*:*"));
SolrDocumentList docs = qResponse.getResults();
assertEquals(0, docs.getNumFound());
// Test Multi-Threaded routed updates for UpdateRequest
try (CloudSolrClient threadedClient = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
threadedClient.setParallelUpdates(true);
threadedClient.setDefaultCollection(COLLECTION);
response = threadedClient.request(request);
if (threadedClient.isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
rr = (CloudSolrClient.RouteResponse) response;
routes = rr.getRoutes();
it = routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue().getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
}
// Test that queries with _route_ params are routed by the client
// Track request counts on each node before query calls
ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
DocCollection col = clusterState.getCollection(COLLECTION);
Map<String, Long> requestCountsMap = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
requestCountsMap.put(baseURL, getNumRequests(baseURL, COLLECTION));
}
}
// Collect the base URLs of the replicas of shard that's expected to be hit
DocRouter router = col.getRouter();
Collection<Slice> expectedSlices = router.getSearchSlicesSingle("0", null, col);
Set<String> expectedBaseURLs = Sets.newHashSet();
for (Slice expectedSlice : expectedSlices) {
for (Replica replica : expectedSlice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
expectedBaseURLs.add(baseURL);
}
}
assertTrue("expected urls is not fewer than all urls! expected=" + expectedBaseURLs + "; all=" + requestCountsMap.keySet(), expectedBaseURLs.size() < requestCountsMap.size());
// Calculate a number of shard keys that route to the same shard.
int n;
if (TEST_NIGHTLY) {
n = random().nextInt(999) + 2;
} else {
n = random().nextInt(9) + 2;
}
List<String> sameShardRoutes = Lists.newArrayList();
sameShardRoutes.add("0");
for (int i = 1; i < n; i++) {
String shardKey = Integer.toString(i);
Collection<Slice> slices = router.getSearchSlicesSingle(shardKey, null, col);
log.info("Expected Slices {}", slices);
if (expectedSlices.equals(slices)) {
sameShardRoutes.add(shardKey);
}
}
assertTrue(sameShardRoutes.size() > 1);
// Do N queries with _route_ parameter to the same shard
for (int i = 0; i < n; i++) {
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(CommonParams.Q, "*:*");
solrParams.set(ShardParams._ROUTE_, sameShardRoutes.get(random().nextInt(sameShardRoutes.size())));
log.info("output: {}", getRandomClient().query(COLLECTION, solrParams));
}
// Request counts increase from expected nodes should aggregate to 1000, while there should be
// no increase in unexpected nodes.
int increaseFromExpectedUrls = 0;
int increaseFromUnexpectedUrls = 0;
Map<String, Long> numRequestsToUnexpectedUrls = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
Long prevNumRequests = requestCountsMap.get(baseURL);
Long curNumRequests = getNumRequests(baseURL, COLLECTION);
long delta = curNumRequests - prevNumRequests;
if (expectedBaseURLs.contains(baseURL)) {
increaseFromExpectedUrls += delta;
} else {
increaseFromUnexpectedUrls += delta;
numRequestsToUnexpectedUrls.put(baseURL, delta);
}
}
}
assertEquals("Unexpected number of requests to expected URLs", n, increaseFromExpectedUrls);
assertEquals("Unexpected number of requests to unexpected URLs: " + numRequestsToUnexpectedUrls, 0, increaseFromUnexpectedUrls);
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class StressHdfsTest method createAndDeleteCollection.
private void createAndDeleteCollection() throws SolrServerException, IOException, Exception, KeeperException, InterruptedException, URISyntaxException {
boolean overshard = random().nextBoolean();
int rep;
int nShards;
int maxReplicasPerNode;
if (overshard) {
nShards = getShardCount() * 2;
maxReplicasPerNode = 8;
rep = 1;
} else {
nShards = getShardCount() / 2;
maxReplicasPerNode = 1;
rep = 2;
if (nShards == 0)
nShards = 1;
}
createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
// data dirs should be in zk, SOLR-8913
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice = clusterState.getSlice(DELETE_DATA_DIR_COLLECTION, "shard1");
assertNotNull(clusterState.getSlices(DELETE_DATA_DIR_COLLECTION).toString(), slice);
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
assertNotNull(replica.getProperties().toString(), replica.get("dataDir"));
assertNotNull(replica.getProperties().toString(), replica.get("ulogDir"));
}
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
for (int i = 1; i < nShards + 1; i++) {
cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
}
// collect the data dirs
List<String> dataDirs = new ArrayList<>();
int i = 0;
for (SolrClient client : clients) {
try (HttpSolrClient c = getHttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION)) {
int docCnt = random().nextInt(1000) + 1;
for (int j = 0; j < docCnt; j++) {
c.add(getDoc("id", i++, "txt_t", "just some random text for a doc"));
}
if (random().nextBoolean()) {
c.commit();
} else {
c.commit(true, true, true);
}
c.setConnectionTimeout(30000);
NamedList<Object> response = c.query(new SolrQuery().setRequestHandler("/admin/system")).getResponse();
NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data");
dataDirs.add(dataDir);
}
}
if (random().nextBoolean()) {
cloudClient.deleteByQuery("*:*");
cloudClient.commit();
assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
cloudClient.commit();
cloudClient.query(new SolrQuery("*:*"));
// delete collection
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.DELETE.toString());
params.set("name", DELETE_DATA_DIR_COLLECTION);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
cloudClient.request(request);
final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
while (cloudClient.getZkStateReader().getClusterState().hasCollection(DELETE_DATA_DIR_COLLECTION)) {
if (timeout.hasTimedOut()) {
throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
}
Thread.sleep(200);
}
// check that all dirs are gone
for (String dataDir : dataDirs) {
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf);
assertFalse("Data directory exists after collection removal : " + dataDir, fs.exists(new Path(dataDir)));
fs.close();
}
}
Aggregations