use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class UpdateLogTest method testApplyPartialUpdatesOnMultipleInPlaceUpdatesInSequence.
@Test
public /**
* @see org.apache.solr.update.UpdateLog#applyPartialUpdates(BytesRef,long,long,SolrDocumentBase)
*/
void testApplyPartialUpdatesOnMultipleInPlaceUpdatesInSequence() {
// Add a full update, two in-place updates and verify applying partial updates is working
ulogAdd(ulog, null, sdoc("id", "1", "title_s", "title1", "val1_i_dvo", "1", "_version_", "100"));
ulogAdd(ulog, 100L, sdoc("id", "1", "price", "1000", "val1_i_dvo", "2", "_version_", "101"));
ulogAdd(ulog, 101L, sdoc("id", "1", "val1_i_dvo", "3", "_version_", "102"));
Object partialUpdate = ulog.lookup(DOC_1_INDEXED_ID);
SolrDocument partialDoc = RealTimeGetComponent.toSolrDoc((SolrInputDocument) ((List) partialUpdate).get(4), h.getCore().getLatestSchema());
long prevVersion = (Long) ((List) partialUpdate).get(3);
long prevPointer = (Long) ((List) partialUpdate).get(2);
assertEquals(3L, ((NumericDocValuesField) partialDoc.getFieldValue("val1_i_dvo")).numericValue());
assertFalse(partialDoc.containsKey("title_s"));
long returnVal = ulog.applyPartialUpdates(DOC_1_INDEXED_ID, prevPointer, prevVersion, null, partialDoc);
assertEquals(0, returnVal);
assertEquals(1000, Integer.parseInt(partialDoc.getFieldValue("price").toString()));
assertEquals(3L, ((NumericDocValuesField) partialDoc.getFieldValue("val1_i_dvo")).numericValue());
assertEquals("title1", partialDoc.getFieldValue("title_s"));
// Add a full update, commit, then two in-place updates, and verify that applying partial updates is working (since
// the prevTlog and prevTlog2 are retained after a commit
ulogCommit(ulog);
if (random().nextBoolean()) {
// sometimes also try a second commit
ulogCommit(ulog);
}
ulogAdd(ulog, 102L, sdoc("id", "1", "price", "2000", "val1_i_dvo", "4", "_version_", "200"));
ulogAdd(ulog, 200L, sdoc("id", "1", "val1_i_dvo", "5", "_version_", "201"));
partialUpdate = ulog.lookup(DOC_1_INDEXED_ID);
partialDoc = RealTimeGetComponent.toSolrDoc((SolrInputDocument) ((List) partialUpdate).get(4), h.getCore().getLatestSchema());
prevVersion = (Long) ((List) partialUpdate).get(3);
prevPointer = (Long) ((List) partialUpdate).get(2);
assertEquals(5L, ((NumericDocValuesField) partialDoc.getFieldValue("val1_i_dvo")).numericValue());
assertFalse(partialDoc.containsKey("title_s"));
returnVal = ulog.applyPartialUpdates(DOC_1_INDEXED_ID, prevPointer, prevVersion, null, partialDoc);
assertEquals(0, returnVal);
assertEquals(2000, Integer.parseInt(partialDoc.getFieldValue("price").toString()));
assertEquals(5L, ((NumericDocValuesField) partialDoc.getFieldValue("val1_i_dvo")).numericValue());
assertEquals("title1", partialDoc.getFieldValue("title_s"));
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class UpdateLogTest method testApplyPartialUpdatesAfterMultipleCommits.
@Test
public void testApplyPartialUpdatesAfterMultipleCommits() {
ulogAdd(ulog, null, sdoc("id", "1", "title_s", "title1", "val1_i_dvo", "1", "_version_", "100"));
ulogAdd(ulog, 100L, sdoc("id", "1", "price", "1000", "val1_i_dvo", "2", "_version_", "101"));
ulogAdd(ulog, 101L, sdoc("id", "1", "val1_i_dvo", "3", "_version_", "102"));
// Do 3 commits, then in-place update, and verify that applying partial updates can't find full doc
for (int i = 0; i < 3; i++) ulogCommit(ulog);
ulogAdd(ulog, 101L, sdoc("id", "1", "val1_i_dvo", "6", "_version_", "300"));
Object partialUpdate = ulog.lookup(DOC_1_INDEXED_ID);
SolrDocument partialDoc = RealTimeGetComponent.toSolrDoc((SolrInputDocument) ((List) partialUpdate).get(4), h.getCore().getLatestSchema());
long prevVersion = (Long) ((List) partialUpdate).get(3);
long prevPointer = (Long) ((List) partialUpdate).get(2);
assertEquals(6L, ((NumericDocValuesField) partialDoc.getFieldValue("val1_i_dvo")).numericValue());
assertFalse(partialDoc.containsKey("title_s"));
long returnVal = ulog.applyPartialUpdates(DOC_1_INDEXED_ID, prevPointer, prevVersion, null, partialDoc);
assertEquals(-1, returnVal);
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class UpdateLogTest method testApplyPartialUpdatesDependingOnNonAddShouldThrowException.
@Test
public void testApplyPartialUpdatesDependingOnNonAddShouldThrowException() {
ulogAdd(ulog, null, sdoc("id", "1", "title_s", "title1", "val1_i_dvo", "1", "_version_", "100"));
// dbi
ulogDelete(ulog, "1", 500L, false);
ulogAdd(ulog, 500L, sdoc("id", "1", "val1_i_dvo", "2", "_version_", "501"));
ulogAdd(ulog, 501L, sdoc("id", "1", "val1_i_dvo", "3", "_version_", "502"));
Object partialUpdate = ulog.lookup(DOC_1_INDEXED_ID);
SolrDocument partialDoc = RealTimeGetComponent.toSolrDoc((SolrInputDocument) ((List) partialUpdate).get(4), h.getCore().getLatestSchema());
long prevVersion = (Long) ((List) partialUpdate).get(3);
long prevPointer = (Long) ((List) partialUpdate).get(2);
assertEquals(3L, ((NumericDocValuesField) partialDoc.getFieldValue("val1_i_dvo")).numericValue());
assertEquals(502L, ((NumericDocValuesField) partialDoc.getFieldValue("_version_")).numericValue());
assertFalse(partialDoc.containsKey("title_s"));
// If an in-place update depends on a non-add (i.e. DBI), assert that an exception is thrown.
SolrException ex = expectThrows(SolrException.class, () -> {
long returnVal = ulog.applyPartialUpdates(DOC_1_INDEXED_ID, prevPointer, prevVersion, null, partialDoc);
fail("502 depends on 501, 501 depends on 500, but 500 is a" + " DELETE. This should've generated an exception. returnVal is: " + returnVal);
});
assertEquals(ex.toString(), SolrException.ErrorCode.INVALID_STATE.code, ex.code());
assertThat(ex.getMessage(), containsString("should've been either ADD or UPDATE_INPLACE"));
assertThat(ex.getMessage(), containsString("looking for id=1"));
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method reorderedDBQsResurrectionTest.
/* Test for a situation when a document requiring in-place update cannot be "resurrected"
* when the original full indexed document has been deleted by an out of order DBQ.
* Expected behaviour in this case should be to throw the replica into LIR (since this will
* be rare). Here's an example of the situation:
ADD(id=x, val=5, ver=1)
UPD(id=x, val=10, ver = 2)
DBQ(q=val:10, v=4)
DV(id=x, val=5, ver=3)
*/
private void reorderedDBQsResurrectionTest() throws Exception {
if (onlyLeaderIndexes) {
log.info("RTG with DBQs are not working in tlog replicas");
return;
}
clearIndex();
commit();
buildRandomIndex(0);
// RTG straight from the index
SolrDocument sdoc = LEADER.getById("0");
//assertEquals(value, sdoc.get("inplace_updatable_float"));
assertEquals("title0", sdoc.get("title_s"));
long version0 = (long) sdoc.get("_version_");
String field = "inplace_updatable_int";
// put replica out of sync
List<UpdateRequest> updates = new ArrayList<>();
// full update
updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", field, 5, "_version_", version0 + 1));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, field, 10, "_version_", version0 + 2));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 2, "id", 0, field, 5, "_version_", version0 + 3));
// supposed to not delete anything
updates.add(simulatedDeleteRequest(field + ":10", version0 + 4));
// order the updates correctly for NONLEADER 1
for (UpdateRequest update : updates) {
log.info("Issuing well ordered update: " + update.getDocuments());
NONLEADERS.get(1).request(update);
}
// Reordering needs to happen using parallel threads
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
// re-order the last two updates for NONLEADER 0
List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
Collections.swap(reorderedUpdates, 2, 3);
List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
for (UpdateRequest update : reorderedUpdates) {
// pretend as this update is coming from the other non-leader, so that
// the resurrection can happen from there (instead of the leader)
update.setParam(DistributedUpdateProcessor.DISTRIB_FROM, ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
updateResponses.add(threadpool.submit(task));
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(10);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
int successful = 0;
for (Future<UpdateResponse> resp : updateResponses) {
try {
UpdateResponse r = resp.get();
if (r.getStatus() == 0) {
successful++;
}
} catch (Exception ex) {
if (!ex.getMessage().contains("Tried to fetch missing update" + " from the leader, but missing wasn't present at leader.")) {
throw ex;
}
}
}
// All should succeed, i.e. no LIR
assertEquals(updateResponses.size(), successful);
log.info("Non leader 0: " + ((HttpSolrClient) NONLEADERS.get(0)).getBaseURL());
log.info("Non leader 1: " + ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
SolrDocument doc0 = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
SolrDocument doc1 = NONLEADERS.get(1).getById(String.valueOf(0), params("distrib", "false"));
log.info("Doc in both replica 0: " + doc0);
log.info("Doc in both replica 1: " + doc1);
// assert both replicas have same effect
for (int i = 0; i < NONLEADERS.size(); i++) {
// 0th is re-ordered replica, 1st is well-ordered replica
SolrClient client = NONLEADERS.get(i);
SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
assertNotNull("Client: " + ((HttpSolrClient) client).getBaseURL(), doc);
assertEquals("Client: " + ((HttpSolrClient) client).getBaseURL(), 5, doc.getFieldValue(field));
}
log.info("reorderedDBQsResurrectionTest: This test passed fine...");
clearIndex();
commit();
}
use of org.apache.solr.common.SolrDocument in project lucene-solr by apache.
the class SolrExampleTests method testChildDoctransformer.
@Test
public void testChildDoctransformer() throws IOException, SolrServerException {
SolrClient client = getSolrClient();
client.deleteByQuery("*:*");
client.commit();
int numRootDocs = TestUtil.nextInt(random(), 10, 100);
int maxDepth = TestUtil.nextInt(random(), 2, 5);
Map<String, SolrInputDocument> allDocs = new HashMap<>();
for (int i = 0; i < numRootDocs; i++) {
client.add(genNestedDocuments(allDocs, 0, maxDepth));
}
client.commit();
// sanity check
SolrQuery q = new SolrQuery("*:*");
QueryResponse resp = client.query(q);
assertEquals("Doc count does not match", allDocs.size(), resp.getResults().getNumFound());
// base check - we know there is an exact number of these root docs
q = new SolrQuery("level_i:0");
q.setFields("*", "[child parentFilter=\"level_i:0\"]");
resp = client.query(q);
assertEquals("topLevel count does not match", numRootDocs, resp.getResults().getNumFound());
for (SolrDocument outDoc : resp.getResults()) {
String docId = (String) outDoc.getFieldValue("id");
SolrInputDocument origDoc = allDocs.get(docId);
assertNotNull("docId not found: " + docId, origDoc);
assertEquals("kids mismatch", origDoc.hasChildDocuments(), outDoc.hasChildDocuments());
if (outDoc.hasChildDocuments()) {
for (SolrDocument kid : outDoc.getChildDocuments()) {
String kidId = (String) kid.getFieldValue("id");
SolrInputDocument origChild = findDecendent(origDoc, kidId);
assertNotNull(docId + " doesn't have decendent " + kidId, origChild);
}
}
}
// simple check: direct verification of direct children on random docs
{
int parentLevel = TestUtil.nextInt(random(), 0, maxDepth);
int kidLevel = parentLevel + 1;
String parentFilter = "level_i:" + parentLevel;
String childFilter = "level_i:" + kidLevel;
int maxKidCount = TestUtil.nextInt(random(), 1, 37);
q = new SolrQuery("*:*");
q.setFilterQueries(parentFilter);
q.setFields("id,[child parentFilter=\"" + parentFilter + "\" childFilter=\"" + childFilter + "\" limit=\"" + maxKidCount + "\"]");
resp = client.query(q);
for (SolrDocument outDoc : resp.getResults()) {
String docId = (String) outDoc.getFieldValue("id");
SolrInputDocument origDoc = allDocs.get(docId);
assertNotNull("docId not found: " + docId, origDoc);
assertEquals("kids mismatch", origDoc.hasChildDocuments(), outDoc.hasChildDocuments());
if (outDoc.hasChildDocuments()) {
// since we know we are looking at our direct children
// we can verify the count
int numOrigKids = origDoc.getChildDocuments().size();
int numOutKids = outDoc.getChildDocuments().size();
assertEquals("Num kids mismatch: " + numOrigKids + "/" + maxKidCount, (maxKidCount < numOrigKids ? maxKidCount : numOrigKids), numOutKids);
for (SolrDocument kid : outDoc.getChildDocuments()) {
String kidId = (String) kid.getFieldValue("id");
assertEquals("kid is the wrong level", kidLevel, (int) kid.getFieldValue("level_i"));
SolrInputDocument origChild = findDecendent(origDoc, kidId);
assertNotNull(docId + " doesn't have decendent " + kidId, origChild);
}
}
}
}
// fully randomized
// verifications are driven only by the results
{
int parentLevel = TestUtil.nextInt(random(), 0, maxDepth - 1);
int kidLevelMin = TestUtil.nextInt(random(), parentLevel + 1, maxDepth);
int kidLevelMax = TestUtil.nextInt(random(), kidLevelMin, maxDepth);
String parentFilter = "level_i:" + parentLevel;
String childFilter = "level_i:[" + kidLevelMin + " TO " + kidLevelMax + "]";
int maxKidCount = TestUtil.nextInt(random(), 1, 7);
q = new SolrQuery("*:*");
if (random().nextBoolean()) {
String name = names[TestUtil.nextInt(random(), 0, names.length - 1)];
q = new SolrQuery("name:" + name);
}
q.setFilterQueries(parentFilter);
q.setFields("id,[child parentFilter=\"" + parentFilter + "\" childFilter=\"" + childFilter + "\" limit=\"" + maxKidCount + "\"]");
resp = client.query(q);
for (SolrDocument outDoc : resp.getResults()) {
String docId = (String) outDoc.getFieldValue("id");
SolrInputDocument origDoc = allDocs.get(docId);
assertNotNull("docId not found: " + docId, origDoc);
// might not go deep enough for childFilter...
if (outDoc.hasChildDocuments()) {
// ...however if there are out kids, there *have* to be orig kids
assertTrue("orig doc had no kids at all", origDoc.hasChildDocuments());
for (SolrDocument kid : outDoc.getChildDocuments()) {
String kidId = (String) kid.getFieldValue("id");
int kidLevel = (int) kid.getFieldValue("level_i");
assertTrue("kid level to high: " + kidLevelMax + "<" + kidLevel, kidLevel <= kidLevelMax);
assertTrue("kid level to low: " + kidLevelMin + ">" + kidLevel, kidLevelMin <= kidLevel);
SolrInputDocument origChild = findDecendent(origDoc, kidId);
assertNotNull(docId + " doesn't have decendent " + kidId, origChild);
}
}
}
}
}
Aggregations