use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class SegmentTerminateEarlyTestState method queryTimestampAscendingSegmentTerminateEarlyYes.
void queryTimestampAscendingSegmentTerminateEarlyYes(CloudSolrClient cloudSolrClient) throws Exception {
TestMiniSolrCloudCluster.assertFalse(minTimestampDocKeys.isEmpty());
TestMiniSolrCloudCluster.assertTrue("numDocs=" + numDocs + " is not even", (numDocs % 2) == 0);
final Long oddFieldValue = new Long(minTimestampDocKeys.iterator().next().intValue() % 2);
final SolrQuery query = new SolrQuery(ODD_FIELD + ":" + oddFieldValue);
// a sort order that is _not_ compatible with the merge sort order
query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.asc);
query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
query.setRows(1);
query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
final QueryResponse rsp = cloudSolrClient.query(query);
// check correctness of the results count
TestMiniSolrCloudCluster.assertEquals("numFound", numDocs / 2, rsp.getResults().getNumFound());
// check correctness of the first result
if (rsp.getResults().getNumFound() > 0) {
final SolrDocument solrDocument0 = rsp.getResults().get(0);
TestMiniSolrCloudCluster.assertTrue(KEY_FIELD + " of (" + solrDocument0 + ") is not in minTimestampDocKeys(" + minTimestampDocKeys + ")", minTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
}
// check segmentTerminatedEarly flag
TestMiniSolrCloudCluster.assertNotNull("responseHeader.segmentTerminatedEarly missing in " + rsp.getResponseHeader(), rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
// segmentTerminateEarly cannot be used with incompatible sort orders
TestMiniSolrCloudCluster.assertTrue("responseHeader.segmentTerminatedEarly missing/true in " + rsp.getResponseHeader(), Boolean.FALSE.equals(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY)));
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class SegmentTerminateEarlyTestState method queryTimestampDescending.
void queryTimestampDescending(CloudSolrClient cloudSolrClient) throws Exception {
TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
TestMiniSolrCloudCluster.assertTrue("numDocs=" + numDocs + " is not even", (numDocs % 2) == 0);
final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue() % 2);
final SolrQuery query = new SolrQuery(ODD_FIELD + ":" + oddFieldValue);
query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
query.setRows(1);
// CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent
final QueryResponse rsp = cloudSolrClient.query(query);
// check correctness of the results count
TestMiniSolrCloudCluster.assertEquals("numFound", numDocs / 2, rsp.getResults().getNumFound());
// check correctness of the first result
if (rsp.getResults().getNumFound() > 0) {
final SolrDocument solrDocument0 = rsp.getResults().get(0);
TestMiniSolrCloudCluster.assertTrue(KEY_FIELD + " of (" + solrDocument0 + ") is not in maxTimestampDocKeys(" + maxTimestampDocKeys + ")", maxTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
}
// check segmentTerminatedEarly flag
TestMiniSolrCloudCluster.assertNull("responseHeader.segmentTerminatedEarly present in " + rsp.getResponseHeader(), rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class TestTolerantUpdateProcessorRandomCloud method allDocs.
/** uses a Cursor to iterate over every doc in the index, recording the 'id_i' value in a BitSet */
private static final BitSet allDocs(final SolrClient c, final int maxDocIdExpected) throws Exception {
BitSet docs = new BitSet(maxDocIdExpected + 1);
String cursorMark = CURSOR_MARK_START;
int docsOnThisPage = Integer.MAX_VALUE;
while (0 < docsOnThisPage) {
final SolrParams p = params("q", "*:*", "rows", "100", // note: not numeric, but we don't actual care about the order
"sort", "id asc", CURSOR_MARK_PARAM, cursorMark);
QueryResponse rsp = c.query(p);
cursorMark = rsp.getNextCursorMark();
docsOnThisPage = 0;
for (SolrDocument doc : rsp.getResults()) {
docsOnThisPage++;
int id_i = ((Integer) doc.get("id_i")).intValue();
assertTrue("found id_i bigger then expected " + maxDocIdExpected + ": " + id_i, id_i <= maxDocIdExpected);
docs.set(id_i);
}
cursorMark = rsp.getNextCursorMark();
}
return docs;
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class TriLevelCompositeIdRoutingTest method doQueryGetUniqueIdKeys.
Set<String> doQueryGetUniqueIdKeys(String... queryParams) throws Exception {
QueryResponse rsp = cloudClient.query(params(queryParams));
Set<String> obtainedIdKeys = new HashSet<>();
for (SolrDocument doc : rsp.getResults()) {
obtainedIdKeys.add(getKey((String) doc.get("id")));
}
return obtainedIdKeys;
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class TestReplicationHandler method doTestIndexAndConfigReplication.
@Test
public void doTestIndexAndConfigReplication() throws Exception {
clearIndexWithReplication();
nDocs--;
for (int i = 0; i < nDocs; i++) index(masterClient, "id", i, "name", "name = " + i);
masterClient.commit();
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp.get("response");
assertEquals(nDocs, numFound(masterQueryRsp));
//get docs from slave and check if number is equal to master
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response");
assertEquals(nDocs, numFound(slaveQueryRsp));
//compare results
String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null);
assertEquals(null, cmp);
assertVersions(masterClient, slaveClient);
//start config files replication test
masterClient.deleteByQuery("*:*");
masterClient.commit();
//change the schema on master
master.copyConfigFile(CONF_DIR + "schema-replication2.xml", "schema.xml");
masterJetty.stop();
masterJetty = createJetty(master);
masterClient.close();
masterClient = createNewSolrClient(masterJetty.getLocalPort());
slave.setTestPort(masterJetty.getLocalPort());
slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml");
slaveJetty.stop();
// setup an xslt dir to force subdir file replication
File masterXsltDir = new File(master.getConfDir() + File.separator + "xslt");
File masterXsl = new File(masterXsltDir, "dummy.xsl");
assertTrue("could not make dir " + masterXsltDir, masterXsltDir.mkdirs());
assertTrue(masterXsl.createNewFile());
File slaveXsltDir = new File(slave.getConfDir() + File.separator + "xslt");
File slaveXsl = new File(slaveXsltDir, "dummy.xsl");
assertFalse(slaveXsltDir.exists());
slaveJetty = createJetty(slave);
slaveClient.close();
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
//add a doc with new field and commit on master to trigger index fetch from slave.
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
masterClient.commit();
assertEquals(1, numFound(rQuery(1, "*:*", masterClient)));
slaveQueryRsp = rQuery(1, "*:*", slaveClient);
assertVersions(masterClient, slaveClient);
SolrDocument d = ((SolrDocumentList) slaveQueryRsp.get("response")).get(0);
assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
assertTrue(slaveXsltDir.isDirectory());
assertTrue(slaveXsl.exists());
checkForSingleIndex(masterJetty);
checkForSingleIndex(slaveJetty, true);
}
Aggregations