use of org.apache.lucene.index.SnapshotDeletionPolicy in project elasticsearch by elastic.
the class RefreshListenersTests method setupListeners.
@Before
public void setupListeners() throws Exception {
// Setup dependencies of the listeners
maxListeners = randomIntBetween(1, 1000);
listeners = new RefreshListeners(() -> maxListeners, () -> engine.refresh("too-many-listeners"), // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test.
Runnable::run, logger);
// Now setup the InternalEngine which is much more complicated because we aren't mocking anything
threadPool = new TestThreadPool(getTestName());
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
Directory directory = newDirectory();
DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return directory;
}
};
store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
Engine.EventListener eventListener = new Engine.EventListener() {
@Override
public void onFailedEngine(String reason, @Nullable Exception e) {
// we don't need to notify anybody in this test
}
};
TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), shardId.getIndexName(), logger);
EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool, indexSettings, null, store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, translogHandler, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
engine = new InternalEngine(config);
listeners.setTranslog(engine.getTranslog());
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project graphdb by neo4j-attic.
the class LuceneDataSource method getIndexSearcher.
synchronized IndexSearcherRef getIndexSearcher(IndexIdentifier identifier, boolean incRef) {
try {
IndexSearcherRef searcher = indexSearchers.get(identifier);
if (searcher == null) {
IndexWriter writer = getIndexWriter(identifier);
IndexReader reader = writer.getReader();
IndexSearcher indexSearcher = new IndexSearcher(reader);
searcher = new IndexSearcherRef(identifier, indexSearcher);
indexSearchers.put(identifier, searcher);
} else {
Triplet<IndexWriter, AtomicBoolean, SnapshotDeletionPolicy> writer = indexWriters.get(identifier);
if (writer != null && writer.second().compareAndSet(true, false)) {
searcher = refreshSearcher(searcher);
if (searcher != null) {
indexSearchers.put(identifier, searcher);
}
}
}
if (incRef) {
searcher.incRef();
}
return searcher;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project graphdb by neo4j-attic.
the class LuceneDataSource method getIndexWriter.
synchronized IndexWriter getIndexWriter(IndexIdentifier identifier) {
Triplet<IndexWriter, AtomicBoolean, SnapshotDeletionPolicy> writer = indexWriters.get(identifier);
if (writer != null) {
return writer.first();
}
try {
Directory dir = getDirectory(baseStorePath, identifier);
directoryExists(dir);
IndexType type = getType(identifier);
SnapshotDeletionPolicy deletionPolicy = new MultipleBackupDeletionPolicy();
IndexWriter indexWriter = new IndexWriter(dir, type.analyzer, deletionPolicy, MaxFieldLength.UNLIMITED);
writer = Triplet.of(indexWriter, new AtomicBoolean(), deletionPolicy);
Similarity similarity = type.getSimilarity();
if (similarity != null) {
writer.first().setSimilarity(similarity);
}
// TODO We should tamper with this value and see how it affects the
// general performance. Lucene docs says rather <10 for mixed
// reads/writes
// writer.setMergeFactor( 8 );
indexWriters.put(identifier, writer);
return writer.first();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project graphdb by neo4j-attic.
the class LuceneDataSource method listStoreFiles.
@Override
public ClosableIterable<File> listStoreFiles() throws IOException {
final Collection<File> files = new ArrayList<File>();
final Collection<SnapshotDeletionPolicy> snapshots = new ArrayList<SnapshotDeletionPolicy>();
for (Map.Entry<IndexIdentifier, Triplet<IndexWriter, AtomicBoolean, SnapshotDeletionPolicy>> writer : indexWriters.entrySet()) {
SnapshotDeletionPolicy deletionPolicy = writer.getValue().third();
File indexDirectory = getFileDirectory(baseStorePath, writer.getKey());
for (String fileName : deletionPolicy.snapshot().getFileNames()) {
files.add(new File(indexDirectory, fileName));
}
snapshots.add(deletionPolicy);
}
files.add(providerStore.getFile());
return new ClosableIterable<File>() {
public Iterator<File> iterator() {
return files.iterator();
}
public void close() {
for (SnapshotDeletionPolicy deletionPolicy : snapshots) {
deletionPolicy.release();
}
}
};
}
use of org.apache.lucene.index.SnapshotDeletionPolicy in project neo4j-mobile-android by neo4j-contrib.
the class LuceneDataSource method listStoreFiles.
@Override
public ClosableIterable<File> listStoreFiles(boolean includeLogicalLogs) throws IOException {
// Never include logical logs since they are of little importance
final Collection<File> files = new ArrayList<File>();
final Collection<SnapshotDeletionPolicy> snapshots = new ArrayList<SnapshotDeletionPolicy>();
makeSureAllIndexesAreInstantiated();
for (Map.Entry<IndexIdentifier, Pair<IndexWriter, AtomicBoolean>> writer : indexWriters.entrySet()) {
SnapshotDeletionPolicy deletionPolicy = (SnapshotDeletionPolicy) writer.getValue().first().getConfig().getIndexDeletionPolicy();
File indexDirectory = getFileDirectory(baseStorePath, writer.getKey());
try {
// Throws IllegalStateException if no commits yet
IndexCommit commit = deletionPolicy.snapshot(SNAPSHOT_ID);
for (String fileName : commit.getFileNames()) {
files.add(new File(indexDirectory, fileName));
}
snapshots.add(deletionPolicy);
} catch (IllegalStateException e) {
// TODO Review this
/*
* This is insane but happens if we try to snapshot an existing index
* that has no commits. This is a bad API design - it should return null
* or something. This is not exceptional.
*/
}
}
files.add(providerStore.getFile());
return new ClosableIterable<File>() {
public Iterator<File> iterator() {
return files.iterator();
}
public void close() {
for (SnapshotDeletionPolicy deletionPolicy : snapshots) {
try {
deletionPolicy.release(SNAPSHOT_ID);
} catch (IOException e) {
// TODO What to do?
e.printStackTrace();
}
}
}
};
}
Aggregations