use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class IndexRevisionTest method testOpen.
@Test
public void testOpen() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter writer = new IndexWriter(dir, conf);
try {
writer.addDocument(new Document());
writer.commit();
Revision rev = new IndexRevision(writer);
@SuppressWarnings("unchecked") Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
String source = sourceFiles.keySet().iterator().next();
for (RevisionFile file : sourceFiles.values().iterator().next()) {
IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
InputStream in = rev.open(source, file.fileName);
assertEquals(src.length(), in.available());
byte[] srcBytes = new byte[(int) src.length()];
byte[] inBytes = new byte[(int) src.length()];
int offset = 0;
if (random().nextBoolean()) {
int skip = random().nextInt(10);
if (skip >= src.length()) {
skip = 0;
}
in.skip(skip);
src.seek(skip);
offset = skip;
}
src.readBytes(srcBytes, offset, srcBytes.length - offset);
in.read(inBytes, offset, inBytes.length - offset);
assertArrayEquals(srcBytes, inBytes);
IOUtils.close(src, in);
}
writer.close();
} finally {
IOUtils.close(dir);
}
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class IndexRevisionTest method testNoCommit.
@Test
public void testNoCommit() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter writer = new IndexWriter(dir, conf);
// should fail when there are no commits to snapshot"
expectThrows(IllegalStateException.class, () -> {
new IndexRevision(writer);
});
writer.close();
IOUtils.close(dir);
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class IndexRevisionTest method testRevisionRelease.
@Test
public void testRevisionRelease() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter writer = new IndexWriter(dir, conf);
try {
writer.addDocument(new Document());
writer.commit();
Revision rev1 = new IndexRevision(writer);
// releasing that revision should not delete the files
rev1.release();
assertTrue(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
// create revision again, so the files are snapshotted
rev1 = new IndexRevision(writer);
writer.addDocument(new Document());
writer.commit();
assertNotNull(new IndexRevision(writer));
// this release should trigger the delete of segments_1
rev1.release();
assertFalse(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
} finally {
IOUtils.close(writer, dir);
}
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class SessionTokenTest method testSerialization.
@Test
public void testSerialization() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter writer = new IndexWriter(dir, conf);
writer.addDocument(new Document());
writer.commit();
Revision rev = new IndexRevision(writer);
SessionToken session1 = new SessionToken("17", rev);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
session1.serialize(new DataOutputStream(baos));
byte[] b = baos.toByteArray();
SessionToken session2 = new SessionToken(new DataInputStream(new ByteArrayInputStream(b)));
assertEquals(session1.id, session2.id);
assertEquals(session1.version, session2.version);
assertEquals(1, session2.sourceFiles.size());
assertEquals(session1.sourceFiles.size(), session2.sourceFiles.size());
assertEquals(session1.sourceFiles.keySet(), session2.sourceFiles.keySet());
List<RevisionFile> files1 = session1.sourceFiles.values().iterator().next();
List<RevisionFile> files2 = session2.sourceFiles.values().iterator().next();
assertEquals(files1, files2);
writer.close();
IOUtils.close(dir);
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project elasticsearch by elastic.
the class InternalEngineTests method testConcurrentWritesAndCommits.
// this test writes documents to the engine while concurrently flushing/commit
// and ensuring that the commit points contain the correct sequence number data
public void testConcurrentWritesAndCommits() throws Exception {
try (Store store = createStore();
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), new SnapshotDeletionPolicy(NoDeletionPolicy.INSTANCE), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final int numIndexingThreads = scaledRandomIntBetween(3, 6);
final int numDocsPerThread = randomIntBetween(500, 1000);
final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
final List<Thread> indexingThreads = new ArrayList<>();
// create N indexing threads to index documents simultaneously
for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
final int threadIdx = threadNum;
Thread indexingThread = new Thread(() -> {
try {
// wait for all threads to start at the same time
barrier.await();
// index random number of docs
for (int i = 0; i < numDocsPerThread; i++) {
final String id = "thread" + threadIdx + "#" + i;
ParsedDocument doc = testParsedDocument(id, "test", null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
});
indexingThreads.add(indexingThread);
}
// start the indexing threads
for (Thread thread : indexingThreads) {
thread.start();
}
// wait for indexing threads to all be ready to start
barrier.await();
// create random commit points
boolean doneIndexing;
do {
doneIndexing = indexingThreads.stream().filter(Thread::isAlive).count() == 0;
//engine.flush(); // flush and commit
} while (doneIndexing == false);
// now, verify all the commits have the correct docs according to the user commit data
long prevLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED;
long prevMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
for (IndexCommit commit : DirectoryReader.listCommits(store.directory())) {
Map<String, String> userData = commit.getUserData();
long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ? Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : SequenceNumbersService.NO_OPS_PERFORMED;
long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ? Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : SequenceNumbersService.UNASSIGNED_SEQ_NO;
// local checkpoint and max seq no shouldn't go backwards
assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint));
assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo));
try (IndexReader reader = DirectoryReader.open(commit)) {
FieldStats stats = SeqNoFieldMapper.SeqNoDefaults.FIELD_TYPE.stats(reader);
final long highestSeqNo;
if (stats != null) {
highestSeqNo = (long) stats.getMaxValue();
} else {
highestSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
}
// make sure localCheckpoint <= highest seq no found <= maxSeqNo
assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint));
assertThat(highestSeqNo, lessThanOrEqualTo(maxSeqNo));
// make sure all sequence numbers up to and including the local checkpoint are in the index
FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo);
for (int i = 0; i <= localCheckpoint; i++) {
assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed", seqNosBitSet.get(i));
}
}
prevLocalCheckpoint = localCheckpoint;
prevMaxSeqNo = maxSeqNo;
}
}
}
Aggregations