use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestAddIndexes method testMergeAfterCopy.
// case 4: tail segments, invariants hold, copy, invariants not hold
public void testMergeAfterCopy() throws IOException {
// main directory
Directory dir = newDirectory();
// auxiliary directory
Directory aux = newDirectory();
setUpDirs(dir, aux, true);
IndexWriterConfig dontMergeConfig = new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter writer = new IndexWriter(aux, dontMergeConfig);
for (int i = 0; i < 20; i++) {
writer.deleteDocuments(new Term("id", "" + i));
}
writer.close();
IndexReader reader = DirectoryReader.open(aux);
assertEquals(10, reader.numDocs());
reader.close();
writer = newWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(4).setMergePolicy(newLogMergePolicy(4)));
if (VERBOSE) {
System.out.println("\nTEST: now addIndexes");
}
writer.addIndexes(aux, new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(aux)));
assertEquals(1020, writer.maxDoc());
assertEquals(1000, writer.maxDoc(0));
writer.close();
dir.close();
aux.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestPerSegmentDeletes method testDeletes1.
public void testDeletes1() throws Exception {
//IndexWriter.debug2 = System.out;
Directory dir = new MockDirectoryWrapper(new Random(random().nextLong()), new RAMDirectory());
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
iwc.setMergeScheduler(new SerialMergeScheduler());
iwc.setMaxBufferedDocs(5000);
iwc.setRAMBufferSizeMB(100);
RangeMergePolicy fsmp = new RangeMergePolicy(false);
iwc.setMergePolicy(fsmp);
IndexWriter writer = new IndexWriter(dir, iwc);
for (int x = 0; x < 5; x++) {
writer.addDocument(DocHelper.createDocument(x, "1", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
//System.out.println("commit1");
writer.commit();
assertEquals(1, writer.segmentInfos.size());
for (int x = 5; x < 10; x++) {
writer.addDocument(DocHelper.createDocument(x, "2", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
//System.out.println("commit2");
writer.commit();
assertEquals(2, writer.segmentInfos.size());
for (int x = 10; x < 15; x++) {
writer.addDocument(DocHelper.createDocument(x, "3", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
writer.deleteDocuments(new Term("id", "1"));
writer.deleteDocuments(new Term("id", "11"));
// flushing without applying deletes means
// there will still be deletes in the segment infos
writer.flush(false, false);
assertTrue(writer.bufferedUpdatesStream.any());
// get reader flushes pending deletes
// so there should not be anymore
IndexReader r1 = writer.getReader();
assertFalse(writer.bufferedUpdatesStream.any());
r1.close();
// delete id:2 from the first segment
// merge segments 0 and 1
// which should apply the delete id:2
writer.deleteDocuments(new Term("id", "2"));
writer.flush(false, false);
fsmp = (RangeMergePolicy) writer.getConfig().getMergePolicy();
fsmp.doMerge = true;
fsmp.start = 0;
fsmp.length = 2;
writer.maybeMerge();
assertEquals(2, writer.segmentInfos.size());
// id:2 shouldn't exist anymore because
// it's been applied in the merge and now it's gone
IndexReader r2 = writer.getReader();
int[] id2docs = toDocsArray(new Term("id", "2"), null, r2);
assertTrue(id2docs == null);
r2.close();
/**
// added docs are in the ram buffer
for (int x = 15; x < 20; x++) {
writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
assertTrue(writer.numRamDocs() > 0);
// delete from the ram buffer
writer.deleteDocuments(new Term("id", Integer.toString(13)));
Term id3 = new Term("id", Integer.toString(3));
// delete from the 1st segment
writer.deleteDocuments(id3);
assertTrue(writer.numRamDocs() > 0);
//System.out
// .println("segdels1:" + writer.docWriter.deletesToString());
//assertTrue(writer.docWriter.segmentDeletes.size() > 0);
// we cause a merge to happen
fsmp.doMerge = true;
fsmp.start = 0;
fsmp.length = 2;
System.out.println("maybeMerge "+writer.segmentInfos);
SegmentInfo info0 = writer.segmentInfos.info(0);
SegmentInfo info1 = writer.segmentInfos.info(1);
writer.maybeMerge();
System.out.println("maybeMerge after "+writer.segmentInfos);
// there should be docs in RAM
assertTrue(writer.numRamDocs() > 0);
// assert we've merged the 1 and 2 segments
// and still have a segment leftover == 2
assertEquals(2, writer.segmentInfos.size());
assertFalse(segThere(info0, writer.segmentInfos));
assertFalse(segThere(info1, writer.segmentInfos));
//System.out.println("segdels2:" + writer.docWriter.deletesToString());
//assertTrue(writer.docWriter.segmentDeletes.size() > 0);
IndexReader r = writer.getReader();
IndexReader r1 = r.getSequentialSubReaders()[0];
printDelDocs(r1.getLiveDocs());
int[] docs = toDocsArray(id3, null, r);
System.out.println("id3 docs:"+Arrays.toString(docs));
// there shouldn't be any docs for id:3
assertTrue(docs == null);
r.close();
part2(writer, fsmp);
**/
// System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
//System.out.println("close");
writer.close();
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestPersistentSnapshotDeletionPolicy method testExistingSnapshots.
@Test
public void testExistingSnapshots() throws Exception {
int numSnapshots = 3;
MockDirectoryWrapper dir = newMockDirectory();
IndexWriter writer = new IndexWriter(dir, getConfig(random(), getDeletionPolicy(dir)));
PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
assertNull(psdp.getLastSaveFile());
prepareIndexAndSnapshots(psdp, writer, numSnapshots);
assertNotNull(psdp.getLastSaveFile());
writer.close();
// Make sure only 1 save file exists:
int count = 0;
for (String file : dir.listAll()) {
if (file.startsWith(PersistentSnapshotDeletionPolicy.SNAPSHOTS_PREFIX)) {
count++;
}
}
assertEquals(1, count);
// Make sure we fsync:
dir.crash();
dir.clearCrash();
// Re-initialize and verify snapshots were persisted
psdp = new PersistentSnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy(), dir, OpenMode.APPEND);
writer = new IndexWriter(dir, getConfig(random(), psdp));
psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
assertEquals(numSnapshots, psdp.getSnapshots().size());
assertEquals(numSnapshots, psdp.getSnapshotCount());
assertSnapshotExists(dir, psdp, numSnapshots, false);
writer.addDocument(new Document());
writer.commit();
snapshots.add(psdp.snapshot());
assertEquals(numSnapshots + 1, psdp.getSnapshots().size());
assertEquals(numSnapshots + 1, psdp.getSnapshotCount());
assertSnapshotExists(dir, psdp, numSnapshots + 1, false);
writer.close();
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestPersistentSnapshotDeletionPolicy method testExceptionDuringSave.
public void testExceptionDuringSave() throws Exception {
MockDirectoryWrapper dir = newMockDirectory();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
StackTraceElement[] trace = Thread.currentThread().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if (PersistentSnapshotDeletionPolicy.class.getName().equals(trace[i].getClassName()) && "persist".equals(trace[i].getMethodName())) {
throw new IOException("now fail on purpose");
}
}
}
});
IndexWriter writer = new IndexWriter(dir, getConfig(random(), new PersistentSnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy(), dir, OpenMode.CREATE_OR_APPEND)));
writer.addDocument(new Document());
writer.commit();
PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
try {
psdp.snapshot();
} catch (IOException ioe) {
if (ioe.getMessage().equals("now fail on purpose")) {
// ok
} else {
throw ioe;
}
}
assertEquals(0, psdp.getSnapshotCount());
writer.close();
assertEquals(1, DirectoryReader.listCommits(dir).size());
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestStressNRTReplication method test.
@Nightly
public void test() throws Exception {
Node.globalStartNS = System.nanoTime();
message("change thread name from " + Thread.currentThread().getName());
Thread.currentThread().setName("main");
childTempDir = createTempDir("child");
// We are parent process:
// Silly bootstrapping:
versionToTransLogLocation.put(0L, 0L);
versionToMarker.put(0L, 0);
int numNodes;
if (NUM_NODES == null) {
numNodes = TestUtil.nextInt(random(), 2, 10);
} else {
numNodes = NUM_NODES.intValue();
}
System.out.println("TEST: using " + numNodes + " nodes");
transLogPath = createTempDir("NRTReplication").resolve("translog");
transLog = new SimpleTransLog(transLogPath);
//state.rateLimiters = new RateLimiter[numNodes];
indexPaths = new Path[numNodes];
nodes = new NodeProcess[numNodes];
nodeTimeStamps = new long[numNodes];
Arrays.fill(nodeTimeStamps, Node.globalStartNS);
starting = new boolean[numNodes];
for (int i = 0; i < numNodes; i++) {
indexPaths[i] = createTempDir("index" + i);
}
Thread[] indexers = new Thread[TestUtil.nextInt(random(), 1, 3)];
System.out.println("TEST: launch " + indexers.length + " indexer threads");
for (int i = 0; i < indexers.length; i++) {
indexers[i] = new IndexThread();
indexers[i].setName("indexer" + i);
indexers[i].setDaemon(true);
indexers[i].start();
}
Thread[] searchers = new Thread[TestUtil.nextInt(random(), 1, 3)];
System.out.println("TEST: launch " + searchers.length + " searcher threads");
for (int i = 0; i < searchers.length; i++) {
searchers[i] = new SearchThread();
searchers[i].setName("searcher" + i);
searchers[i].setDaemon(true);
searchers[i].start();
}
Thread restarter = new RestartThread();
restarter.setName("restarter");
restarter.setDaemon(true);
restarter.start();
int runTimeSec;
if (TEST_NIGHTLY) {
runTimeSec = RANDOM_MULTIPLIER * TestUtil.nextInt(random(), 120, 240);
} else {
runTimeSec = RANDOM_MULTIPLIER * TestUtil.nextInt(random(), 45, 120);
}
System.out.println("TEST: will run for " + runTimeSec + " sec");
long endTime = System.nanoTime() + runTimeSec * 1000000000L;
sendReplicasToPrimary();
while (failed.get() == false && System.nanoTime() < endTime) {
// Wait a bit:
Thread.sleep(TestUtil.nextInt(random(), Math.min(runTimeSec * 4, 200), runTimeSec * 4));
if (primary != null && random().nextBoolean()) {
NodeProcess curPrimary = primary;
if (curPrimary != null) {
// Save these before we start flush:
long nextTransLogLoc = transLog.getNextLocation();
int markerUptoSav = markerUpto.get();
message("top: now flush primary; at least marker count=" + markerUptoSav);
long result;
try {
result = primary.flush(markerUptoSav);
} catch (Throwable t) {
message("top: flush failed; skipping: " + t.getMessage());
result = -1;
}
if (result > 0) {
// There were changes
message("top: flush finished with changed; new primary version=" + result);
lastPrimaryVersion = result;
addTransLogLoc(lastPrimaryVersion, nextTransLogLoc);
addVersionMarker(lastPrimaryVersion, markerUptoSav);
}
}
}
StringBuilder sb = new StringBuilder();
int liveCount = 0;
for (int i = 0; i < nodes.length; i++) {
NodeProcess node = nodes[i];
if (node != null) {
if (sb.length() != 0) {
sb.append(" ");
}
liveCount++;
if (node.isPrimary) {
sb.append('P');
} else {
sb.append('R');
}
sb.append(i);
}
}
message("PG=" + (primary == null ? "X" : primaryGen) + " " + liveCount + " (of " + nodes.length + ") nodes running: " + sb);
if (random().nextInt(10) == 1) {
NodeProcess node = nodes[random().nextInt(nodes.length)];
if (node != null && node.nodeIsClosing.get() == false) {
// TODO: if this node is primary, it means we committed an unpublished version (not exposed as an NRT point)... not sure it matters.
// maybe we somehow allow IW to commit a specific sis (the one we just flushed)?
message("top: now commit node=" + node);
try {
node.commitAsync();
} catch (Throwable t) {
message("top: hit exception during commit with R" + node.id + "; skipping");
t.printStackTrace(System.out);
}
}
}
}
message("TEST: top: test done, now close");
stop.set(true);
for (Thread thread : indexers) {
thread.join();
}
for (Thread thread : searchers) {
thread.join();
}
restarter.join();
// Close replicas before primary so we cancel any in-progres replications:
System.out.println("TEST: top: now close replicas");
List<Closeable> toClose = new ArrayList<>();
for (NodeProcess node : nodes) {
if (node != primary && node != null) {
toClose.add(node);
}
}
IOUtils.close(toClose);
IOUtils.close(primary);
IOUtils.close(transLog);
if (failed.get() == false) {
message("TEST: top: now checkIndex");
for (Path path : indexPaths) {
message("TEST: check " + path);
MockDirectoryWrapper dir = newMockFSDirectory(path);
// Just too slow otherwise
dir.setCrossCheckTermVectorsOnClose(false);
dir.close();
}
} else {
message("TEST: failed; skip checkIndex");
}
}
Aggregations