use of org.apache.solr.cloud.TestTolerantUpdateProcessorCloud.ExpectedErr in project lucene-solr by apache.
the class TestTolerantUpdateProcessorRandomCloud method testRandomUpdates.
public void testRandomUpdates() throws Exception {
final int maxDocId = atLeast(10000);
final BitSet expectedDocIds = new BitSet(maxDocId + 1);
final int numIters = atLeast(50);
for (int i = 0; i < numIters; i++) {
log.info("BEGIN ITER #{}", i);
final UpdateRequest req = update(params("maxErrors", "-1", "update.chain", "tolerant-chain-max-errors-10"));
final int numCmds = TestUtil.nextInt(random(), 1, 20);
final List<ExpectedErr> expectedErrors = new ArrayList<ExpectedErr>(numCmds);
int expectedErrorsCount = 0;
// it's ambigious/confusing which order mixed DELQ + ADD (or ADD and DELI for the same ID)
// in the same request wll be processed by various clients, so we keep things simple
// and ensure that no single doc Id is affected by more then one command in the same request
final BitSet docsAffectedThisRequest = new BitSet(maxDocId + 1);
for (int cmdIter = 0; cmdIter < numCmds; cmdIter++) {
if ((maxDocId / 2) < docsAffectedThisRequest.cardinality()) {
// we're already mucking with more then half the docs in the index
break;
}
final boolean causeError = random().nextBoolean();
if (causeError) {
expectedErrorsCount++;
}
if (random().nextBoolean()) {
// add a doc
String id = null;
SolrInputDocument doc = null;
if (causeError && (0 == TestUtil.nextInt(random(), 0, 21))) {
doc = doc(f("foo_s", "no unique key"));
expectedErrors.add(addErr("(unknown)"));
} else {
final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
docsAffectedThisRequest.set(id_i);
id = "id_" + id_i;
if (causeError) {
expectedErrors.add(addErr(id));
} else {
expectedDocIds.set(id_i);
}
final String val = causeError ? "bogus_val" : ("" + TestUtil.nextInt(random(), 42, 666));
doc = doc(f("id", id), f("id_i", id_i), f("foo_i", val));
}
req.add(doc);
log.info("ADD: {} = {}", id, doc);
} else {
// delete something
if (random().nextBoolean()) {
// delete by id
final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
final String id = "id_" + id_i;
final boolean docExists = expectedDocIds.get(id_i);
docsAffectedThisRequest.set(id_i);
long versionConstraint = docExists ? 1 : -1;
if (causeError) {
versionConstraint = -1 * versionConstraint;
expectedErrors.add(delIErr(id));
} else {
// if doc exists it will legitimately be deleted
expectedDocIds.clear(id_i);
}
req.deleteById(id, versionConstraint);
log.info("DEL: {} = {}", id, causeError ? "ERR" : "OK");
} else {
// delete by query
final String q;
if (causeError) {
// even though our DBQ is gibberish that's going to fail, record a docId as affected
// so that we don't generate the same random DBQ and get redundent errors
// (problematic because of how DUP forwarded DBQs have to have their errors deduped by TUP)
final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
docsAffectedThisRequest.set(id_i);
q = "foo_i:[" + id_i + " TO ....giberish";
expectedErrors.add(delQErr(q));
} else {
// ensure our DBQ is only over a range of docs not already affected
// by any other cmds in this request
final int rangeAxis = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
final int loBound = docsAffectedThisRequest.previousSetBit(rangeAxis);
final int hiBound = docsAffectedThisRequest.nextSetBit(rangeAxis);
final int lo = TestUtil.nextInt(random(), loBound + 1, rangeAxis);
final int hi = TestUtil.nextInt(random(), rangeAxis, // bound might be negative if no set bits above axis
(hiBound < 0) ? maxDocId : hiBound - 1);
if (lo != hi) {
assert lo < hi : "lo=" + lo + " hi=" + hi;
// NOTE: clear & set are exclusive of hi, so we use "}" in range query accordingly
q = "id_i:[" + lo + " TO " + hi + "}";
expectedDocIds.clear(lo, hi);
docsAffectedThisRequest.set(lo, hi);
} else {
// edge case: special case DBQ of one doc
assert (lo == rangeAxis && hi == rangeAxis) : "lo=" + lo + " axis=" + rangeAxis + " hi=" + hi;
// have to be inclusive of both ends
q = "id_i:[" + lo + " TO " + lo + "]";
expectedDocIds.clear(lo);
docsAffectedThisRequest.set(lo);
}
}
req.deleteByQuery(q);
log.info("DEL: {}", q);
}
}
}
assertEquals("expected error count sanity check: " + req.toString(), expectedErrorsCount, expectedErrors.size());
final SolrClient client = random().nextBoolean() ? CLOUD_CLIENT : NODE_CLIENTS.get(TestUtil.nextInt(random(), 0, NODE_CLIENTS.size() - 1));
final UpdateResponse rsp = req.process(client);
assertUpdateTolerantErrors(client.toString() + " => " + expectedErrors.toString(), rsp, expectedErrors.toArray(new ExpectedErr[expectedErrors.size()]));
log.info("END ITER #{}, expecting #docs: {}", i, expectedDocIds.cardinality());
assertEquals("post update commit failed?", 0, CLOUD_CLIENT.commit().getStatus());
for (int j = 0; j < 5; j++) {
if (expectedDocIds.cardinality() == countDocs(CLOUD_CLIENT)) {
break;
}
log.info("sleeping to give searchers a chance to re-open #" + j);
Thread.sleep(200);
}
// check the index contents against our expectations
final BitSet actualDocIds = allDocs(CLOUD_CLIENT, maxDocId);
if (expectedDocIds.cardinality() != actualDocIds.cardinality()) {
log.error("cardinality mismatch: expected {} BUT actual {}", expectedDocIds.cardinality(), actualDocIds.cardinality());
}
final BitSet x = (BitSet) actualDocIds.clone();
x.xor(expectedDocIds);
for (int b = x.nextSetBit(0); 0 <= b; b = x.nextSetBit(b + 1)) {
final boolean expectedBit = expectedDocIds.get(b);
final boolean actualBit = actualDocIds.get(b);
log.error("bit #" + b + " mismatch: expected {} BUT actual {}", expectedBit, actualBit);
}
assertEquals(x.cardinality() + " mismatched bits", expectedDocIds.cardinality(), actualDocIds.cardinality());
}
}
Aggregations