use of org.apache.lucene.index.SnapshotDeletionPolicy in project elasticsearch by elastic.
the class StoreTests method testUserDataRead.
public void testUserDataRead() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
config.setIndexDeletionPolicy(deletionPolicy);
IndexWriter writer = new IndexWriter(store.directory(), config);
Document doc = new Document();
doc.add(new TextField("id", "1", Field.Store.NO));
writer.addDocument(doc);
Map<String, String> commitData = new HashMap<>(2);
String syncId = "a sync id";
String translogId = "a translog id";
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogId);
writer.setCommitData(commitData);
writer.commit();
writer.close();
Store.MetadataSnapshot metadata;
metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot());
assertFalse(metadata.asMap().isEmpty());
// do not check for correct files, we have enough tests for that above
assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_GENERATION_KEY), equalTo(translogId));
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, directoryService);
IOUtils.close(store);
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project neo4j by neo4j.
the class LuceneDataSource method listStoreFiles.
public ResourceIterator<File> listStoreFiles(boolean includeLogicalLogs) throws IOException {
// Never include logical logs since they are of little importance
final Collection<File> files = new ArrayList<>();
final Collection<Pair<SnapshotDeletionPolicy, IndexCommit>> snapshots = new ArrayList<>();
makeSureAllIndexesAreInstantiated();
for (IndexReference writer : getAllIndexes()) {
SnapshotDeletionPolicy deletionPolicy = (SnapshotDeletionPolicy) writer.getWriter().getConfig().getIndexDeletionPolicy();
File indexDirectory = getFileDirectory(baseStorePath, writer.getIdentifier());
IndexCommit commit;
try {
// Throws IllegalStateException if no commits yet
commit = deletionPolicy.snapshot();
} catch (IllegalStateException e) {
/*
* This is insane but happens if we try to snapshot an existing index
* that has no commits. This is a bad API design - it should return null
* or something. This is not exceptional.
*
* For the time being we just do a commit and try again.
*/
writer.getWriter().commit();
commit = deletionPolicy.snapshot();
}
for (String fileName : commit.getFileNames()) {
files.add(new File(indexDirectory, fileName));
}
snapshots.add(Pair.of(deletionPolicy, commit));
}
return new PrefetchingResourceIterator<File>() {
private final Iterator<File> filesIterator = files.iterator();
@Override
protected File fetchNextOrNull() {
return filesIterator.hasNext() ? filesIterator.next() : null;
}
@Override
public void close() {
for (Pair<SnapshotDeletionPolicy, IndexCommit> policyAndCommit : snapshots) {
try {
policyAndCommit.first().release(policyAndCommit.other());
} catch (IOException e) {
// TODO What to do?
e.printStackTrace();
}
}
}
};
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class IndexAndTaxonomyRevisionTest method testSegmentsFileLast.
@Test
public void testSegmentsFileLast() throws Exception {
Directory indexDir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter indexWriter = new IndexWriter(indexDir, conf);
Directory taxoDir = newDirectory();
SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
try {
indexWriter.addDocument(newDocument(taxoWriter));
indexWriter.commit();
taxoWriter.commit();
Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
assertEquals(2, sourceFiles.size());
for (List<RevisionFile> files : sourceFiles.values()) {
String lastFile = files.get(files.size() - 1).fileName;
assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
}
indexWriter.close();
} finally {
IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
}
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class IndexAndTaxonomyRevisionTest method testOpen.
@Test
public void testOpen() throws Exception {
Directory indexDir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter indexWriter = new IndexWriter(indexDir, conf);
Directory taxoDir = newDirectory();
SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
try {
indexWriter.addDocument(newDocument(taxoWriter));
indexWriter.commit();
taxoWriter.commit();
Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
for (Entry<String, List<RevisionFile>> e : rev.getSourceFiles().entrySet()) {
String source = e.getKey();
// silly, both directories are closed in the end
@SuppressWarnings("resource") Directory dir = source.equals(IndexAndTaxonomyRevision.INDEX_SOURCE) ? indexDir : taxoDir;
for (RevisionFile file : e.getValue()) {
IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
InputStream in = rev.open(source, file.fileName);
assertEquals(src.length(), in.available());
byte[] srcBytes = new byte[(int) src.length()];
byte[] inBytes = new byte[(int) src.length()];
int offset = 0;
if (random().nextBoolean()) {
int skip = random().nextInt(10);
if (skip >= src.length()) {
skip = 0;
}
in.skip(skip);
src.seek(skip);
offset = skip;
}
src.readBytes(srcBytes, offset, srcBytes.length - offset);
in.read(inBytes, offset, inBytes.length - offset);
assertArrayEquals(srcBytes, inBytes);
IOUtils.close(src, in);
}
}
indexWriter.close();
} finally {
IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
}
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project lucene-solr by apache.
the class IndexRevisionTest method testOpen.
@Test
public void testOpen() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter writer = new IndexWriter(dir, conf);
try {
writer.addDocument(new Document());
writer.commit();
Revision rev = new IndexRevision(writer);
@SuppressWarnings("unchecked") Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
String source = sourceFiles.keySet().iterator().next();
for (RevisionFile file : sourceFiles.values().iterator().next()) {
IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
InputStream in = rev.open(source, file.fileName);
assertEquals(src.length(), in.available());
byte[] srcBytes = new byte[(int) src.length()];
byte[] inBytes = new byte[(int) src.length()];
int offset = 0;
if (random().nextBoolean()) {
int skip = random().nextInt(10);
if (skip >= src.length()) {
skip = 0;
}
in.skip(skip);
src.seek(skip);
offset = skip;
}
src.readBytes(srcBytes, offset, srcBytes.length - offset);
in.read(inBytes, offset, inBytes.length - offset);
assertArrayEquals(srcBytes, inBytes);
IOUtils.close(src, in);
}
writer.close();
} finally {
IOUtils.close(dir);
}
}
Aggregations