use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method createMiniSolrCloudCluster.
@BeforeClass
public static void createMiniSolrCloudCluster() throws Exception {
final String configName = "solrCloudCollectionConfig";
final File configDir = new File(TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
configureCluster(NUM_SERVERS).addConfig(configName, configDir.toPath()).configure();
assertSpinLoopAllJettyAreRunning(cluster);
CLOUD_CLIENT = cluster.getSolrClient();
CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, NUM_SHARDS, REPLICATION_FACTOR).withProperty("config", "solrconfig-distrib-update-processor-chains.xml").withProperty("schema", // string id for doc routing prefix
"schema15.xml").process(CLOUD_CLIENT);
ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION_NAME, zkStateReader, true, true, 330);
// really hackish way to get a URL for specific nodes based on shard/replica hosting
// inspired by TestMiniSolrCloudCluster
HashMap<String, String> urlMap = new HashMap<>();
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
URL jettyURL = jetty.getBaseUrl();
String nodeKey = jettyURL.getHost() + ":" + jettyURL.getPort() + jettyURL.getPath().replace("/", "_");
urlMap.put(nodeKey, jettyURL.toString());
}
zkStateReader.updateClusterState();
ClusterState clusterState = zkStateReader.getClusterState();
for (Slice slice : clusterState.getSlices(COLLECTION_NAME)) {
String shardName = slice.getName();
Replica leader = slice.getLeader();
assertNotNull("slice has null leader: " + slice.toString(), leader);
assertNotNull("slice leader has null node name: " + slice.toString(), leader.getNodeName());
String leaderUrl = urlMap.remove(leader.getNodeName());
assertNotNull("could not find URL for " + shardName + " leader: " + leader.getNodeName(), leaderUrl);
assertEquals("expected two total replicas for: " + slice.getName(), 2, slice.getReplicas().size());
String passiveUrl = null;
for (Replica replica : slice.getReplicas()) {
if (!replica.equals(leader)) {
passiveUrl = urlMap.remove(replica.getNodeName());
assertNotNull("could not find URL for " + shardName + " replica: " + replica.getNodeName(), passiveUrl);
}
}
assertNotNull("could not find URL for " + shardName + " replica", passiveUrl);
if (shardName.equals("shard1")) {
S_ONE_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
S_ONE_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
} else if (shardName.equals("shard2")) {
S_TWO_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
S_TWO_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
} else {
fail("unexpected shard: " + shardName);
}
}
assertEquals("Should be exactly one server left (nost hosting either shard)", 1, urlMap.size());
NO_COLLECTION_CLIENT = getHttpSolrClient(urlMap.values().iterator().next() + "/" + COLLECTION_NAME + "/");
assertNotNull(S_ONE_LEADER_CLIENT);
assertNotNull(S_TWO_LEADER_CLIENT);
assertNotNull(S_ONE_NON_LEADER_CLIENT);
assertNotNull(S_TWO_NON_LEADER_CLIENT);
assertNotNull(NO_COLLECTION_CLIENT);
// sanity check that our S_ONE_PRE & S_TWO_PRE really do map to shard1 & shard2 with default routing
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_ONE_PRE + random().nextInt()), f("expected_shard_s", "shard1"))).getStatus());
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_TWO_PRE + random().nextInt()), f("expected_shard_s", "shard2"))).getStatus());
assertEquals(0, CLOUD_CLIENT.commit().getStatus());
SolrDocumentList docs = CLOUD_CLIENT.query(params("q", "*:*", "fl", "id,expected_shard_s,[shard]")).getResults();
assertEquals(2, docs.getNumFound());
assertEquals(2, docs.size());
for (SolrDocument doc : docs) {
String expected = COLLECTION_NAME + "_" + doc.getFirstValue("expected_shard_s") + "_replica";
String docShard = doc.getFirstValue("[shard]").toString();
assertTrue("shard routing prefixes don't seem to be aligned anymore, " + "did someone change the default routing rules? " + "and/or the the default core name rules? " + "and/or the numShards used by this test? ... " + "couldn't find " + expected + " as substring of [shard] == '" + docShard + "' ... for docId == " + doc.getFirstValue("id"), docShard.contains(expected));
}
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class TestRandomFlRTGCloud method assertRTG.
/**
* Does one or more RTG request for the specified docIds with a randomized fl & fq params, asserting
* that the returned document (if any) makes sense given the expected SolrInputDocuments
*/
private void assertRTG(final SolrInputDocument[] knownDocs, final int[] docIds) throws IOException, SolrServerException {
final SolrClient client = getRandClient(random());
// NOTE: not using SolrClient.getById or getByIds because we want to force choice of "id" vs "ids" params
final ModifiableSolrParams params = params("qt", "/get");
// random fq -- nothing fancy, secondary concern for our test
final Integer FQ_MAX = usually() ? null : random().nextInt();
if (null != FQ_MAX) {
params.add("fq", "aaa_i:[* TO " + FQ_MAX + "]");
}
final Set<FlValidator> validators = new LinkedHashSet<>();
// always include id so we can be confident which doc we're looking at
validators.add(ID_VALIDATOR);
addRandomFlValidators(random(), validators);
FlValidator.addParams(validators, params);
final List<String> idsToRequest = new ArrayList<>(docIds.length);
final List<SolrInputDocument> docsToExpect = new ArrayList<>(docIds.length);
for (int docId : docIds) {
// every docId will be included in the request
idsToRequest.add("" + docId);
// only docs that should actually exist and match our (optional) filter will be expected in response
if (null != knownDocs[docId]) {
Integer filterVal = (Integer) knownDocs[docId].getFieldValue("aaa_i");
if (null == FQ_MAX || ((null != filterVal) && filterVal.intValue() <= FQ_MAX.intValue())) {
docsToExpect.add(knownDocs[docId]);
}
}
}
// even w/only 1 docId requested, the response format can vary depending on how we request it
final boolean askForList = random().nextBoolean() || (1 != idsToRequest.size());
if (askForList) {
if (1 == idsToRequest.size()) {
// have to be careful not to try to use "multi" 'id' params with only 1 docId
// with a single docId, the only way to ask for a list is with the "ids" param
params.add("ids", idsToRequest.get(0));
} else {
if (random().nextBoolean()) {
// each id in it's own param
for (String id : idsToRequest) {
params.add("id", id);
}
} else {
// add one or more comma separated ids params
params.add(buildCommaSepParams(random(), "ids", idsToRequest));
}
}
} else {
assert 1 == idsToRequest.size();
params.add("id", idsToRequest.get(0));
}
final QueryResponse rsp = client.query(params);
assertNotNull(params.toString(), rsp);
final SolrDocumentList docs = getDocsFromRTGResponse(askForList, rsp);
assertNotNull(params + " => " + rsp, docs);
assertEquals("num docs mismatch: " + params + " => " + docsToExpect + " vs " + docs, docsToExpect.size(), docs.size());
// NOTE: RTG makes no garuntees about the order docs will be returned in when multi requested
for (SolrDocument actual : docs) {
try {
int actualId = assertParseInt("id", actual.getFirstValue("id"));
final SolrInputDocument expected = knownDocs[actualId];
assertNotNull("expected null doc but RTG returned: " + actual, expected);
Set<String> expectedFieldNames = new TreeSet<>();
for (FlValidator v : validators) {
expectedFieldNames.addAll(v.assertRTGResults(validators, expected, actual));
}
// ensure only expected field names are in the actual document
Set<String> actualFieldNames = new TreeSet<>(actual.getFieldNames());
assertEquals("Actual field names returned differs from expected", expectedFieldNames, actualFieldNames);
} catch (AssertionError ae) {
throw new AssertionError(params + " => " + actual + ": " + ae.getMessage(), ae);
}
}
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class TestSegmentSorting method testAtomicUpdateOfSegmentSortField.
/**
* Verify that atomic updates against our (DVO) segment sort field doesn't cause errors.
* In this situation, the updates should *NOT* be done inplace, because that would
* break the index sorting
*/
public void testAtomicUpdateOfSegmentSortField() throws Exception {
final CloudSolrClient cloudSolrClient = cluster.getSolrClient();
final String updateField = SegmentTerminateEarlyTestState.TIMESTAMP_FIELD;
// sanity check that updateField is in fact a DocValues only field, meaning it
// would normally be eligable for inplace updates -- if it weren't also used for merge sorting
final Map<String, Object> schemaOpts = new Field(updateField, params("includeDynamic", "true", "showDefaults", "true")).process(cloudSolrClient).getField();
assertEquals(true, schemaOpts.get("docValues"));
assertEquals(false, schemaOpts.get("indexed"));
assertEquals(false, schemaOpts.get("stored"));
// add some documents
final int numDocs = atLeast(1000);
for (int id = 1; id <= numDocs; id++) {
cloudSolrClient.add(sdoc("id", id, updateField, random().nextInt(60)));
}
cloudSolrClient.commit();
// (at this point we're just sanity checking no serious failures)
for (int iter = 0; iter < 20; iter++) {
final int iterSize = atLeast(20);
for (int i = 0; i < iterSize; i++) {
// replace
cloudSolrClient.add(sdoc("id", TestUtil.nextInt(random(), 1, numDocs), updateField, random().nextInt(60)));
// atomic update
cloudSolrClient.add(sdoc("id", TestUtil.nextInt(random(), 1, numDocs), updateField, map("set", random().nextInt(60))));
}
cloudSolrClient.commit();
}
// pick a random doc, and verify that doing an atomic update causes the docid to change
// ie: not an inplace update
final int id = TestUtil.nextInt(random(), 1, numDocs);
final int oldDocId = (Integer) cloudSolrClient.getById("" + id, params("fl", "[docid]")).get("[docid]");
cloudSolrClient.add(sdoc("id", id, updateField, map("inc", "666")));
cloudSolrClient.commit();
// loop incase we're waiting for a newSearcher to be opened
int newDocId = -1;
int attempts = 10;
while ((newDocId < 0) && (0 < attempts--)) {
SolrDocumentList docs = cloudSolrClient.query(params("q", "id:" + id, "fl", "[docid]", "fq", updateField + "[666 TO *]")).getResults();
if (0 < docs.size()) {
newDocId = (Integer) docs.get(0).get("[docid]");
} else {
Thread.sleep(50);
}
}
assertTrue(oldDocId != newDocId);
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class QueryResponse method setResponse.
@Override
public void setResponse(NamedList<Object> res) {
super.setResponse(res);
// Look for known things
for (int i = 0; i < res.size(); i++) {
String n = res.getName(i);
if ("responseHeader".equals(n)) {
_header = (NamedList<Object>) res.getVal(i);
} else if ("response".equals(n)) {
_results = (SolrDocumentList) res.getVal(i);
} else if ("sort_values".equals(n)) {
_sortvalues = (NamedList<ArrayList>) res.getVal(i);
} else if ("facet_counts".equals(n)) {
_facetInfo = (NamedList<Object>) res.getVal(i);
// extractFacetInfo inspects _results, so defer calling it
// in case it hasn't been populated yet.
} else if ("debug".equals(n)) {
_debugInfo = (NamedList<Object>) res.getVal(i);
extractDebugInfo(_debugInfo);
} else if ("grouped".equals(n)) {
_groupedInfo = (NamedList<Object>) res.getVal(i);
extractGroupedInfo(_groupedInfo);
} else if ("expanded".equals(n)) {
NamedList map = (NamedList) res.getVal(i);
_expandedResults = map.asMap(1);
} else if ("highlighting".equals(n)) {
_highlightingInfo = (NamedList<Object>) res.getVal(i);
extractHighlightingInfo(_highlightingInfo);
} else if ("spellcheck".equals(n)) {
_spellInfo = (NamedList<Object>) res.getVal(i);
extractSpellCheckInfo(_spellInfo);
} else if ("clusters".equals(n)) {
_clusterInfo = (ArrayList<NamedList<Object>>) res.getVal(i);
extractClusteringInfo(_clusterInfo);
} else if ("suggest".equals(n)) {
_suggestInfo = (Map<String, NamedList<Object>>) res.getVal(i);
extractSuggesterInfo(_suggestInfo);
} else if ("stats".equals(n)) {
_statsInfo = (NamedList<Object>) res.getVal(i);
extractStatsInfo(_statsInfo);
} else if ("terms".equals(n)) {
_termsInfo = (NamedList<NamedList<Object>>) res.getVal(i);
extractTermsInfo(_termsInfo);
} else if ("moreLikeThis".equals(n)) {
_moreLikeThisInfo = (NamedList<SolrDocumentList>) res.getVal(i);
} else if (CursorMarkParams.CURSOR_MARK_NEXT.equals(n)) {
_cursorMarkNext = (String) res.getVal(i);
}
}
if (_facetInfo != null)
extractFacetInfo(_facetInfo);
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class QueryResponse method extractGroupedInfo.
private void extractGroupedInfo(NamedList<Object> info) {
if (info != null) {
_groupResponse = new GroupResponse();
int size = info.size();
for (int i = 0; i < size; i++) {
String fieldName = info.getName(i);
Object fieldGroups = info.getVal(i);
SimpleOrderedMap<Object> simpleOrderedMap = (SimpleOrderedMap<Object>) fieldGroups;
Object oMatches = simpleOrderedMap.get("matches");
Object oNGroups = simpleOrderedMap.get("ngroups");
Object oGroups = simpleOrderedMap.get("groups");
Object queryCommand = simpleOrderedMap.get("doclist");
if (oMatches == null) {
continue;
}
if (oGroups != null) {
Integer iMatches = (Integer) oMatches;
ArrayList<Object> groupsArr = (ArrayList<Object>) oGroups;
GroupCommand groupedCommand;
if (oNGroups != null) {
Integer iNGroups = (Integer) oNGroups;
groupedCommand = new GroupCommand(fieldName, iMatches, iNGroups);
} else {
groupedCommand = new GroupCommand(fieldName, iMatches);
}
for (Object oGrp : groupsArr) {
SimpleOrderedMap grpMap = (SimpleOrderedMap) oGrp;
Object sGroupValue = grpMap.get("groupValue");
SolrDocumentList doclist = (SolrDocumentList) grpMap.get("doclist");
Group group = new Group(sGroupValue != null ? sGroupValue.toString() : null, doclist);
groupedCommand.add(group);
}
_groupResponse.add(groupedCommand);
} else if (queryCommand != null) {
Integer iMatches = (Integer) oMatches;
GroupCommand groupCommand;
if (oNGroups != null) {
Integer iNGroups = (Integer) oNGroups;
groupCommand = new GroupCommand(fieldName, iMatches, iNGroups);
} else {
groupCommand = new GroupCommand(fieldName, iMatches);
}
SolrDocumentList docList = (SolrDocumentList) queryCommand;
groupCommand.add(new Group(fieldName, docList));
_groupResponse.add(groupCommand);
}
}
}
}
Aggregations