use of org.apache.lucene.store.NRTCachingDirectory in project jackrabbit-oak by apache.
the class NRTIndex method createWriter.
private synchronized NRTIndexWriter createWriter() throws IOException {
String dirName = generateDirName();
indexDir = indexCopier.getIndexDir(definition, definition.getIndexPath(), dirName);
Directory fsdir = FSDirectory.open(indexDir);
//TODO make these configurable
directory = new NRTCachingDirectory(fsdir, 1, 1);
IndexWriterConfig config = IndexWriterUtils.getIndexWriterConfig(definition, false);
//TODO Explore following for optimizing indexing speed
//config.setUseCompoundFile(false);
//config.setRAMBufferSizeMB(1024*1024*25);
indexWriter = new IndexWriter(directory, config);
return new NRTIndexWriter(indexWriter);
}
use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class TestControlledRealTimeReopenThread method testCRTReopen.
// Relies on wall clock time, so it can easily false-fail when the machine is otherwise busy:
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-5737")
public // LUCENE-5461
void testCRTReopen() throws Exception {
//test behaving badly
//should be high enough
int maxStaleSecs = 20;
//build crap data just to store it.
String s = " abcdefghijklmnopqrstuvwxyz ";
char[] chars = s.toCharArray();
StringBuilder builder = new StringBuilder(2048);
for (int i = 0; i < 2048; i++) {
builder.append(chars[random().nextInt(chars.length)]);
}
String content = builder.toString();
final SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
final Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir("nrt")), 5, 128);
IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
config.setCommitOnClose(true);
config.setIndexDeletionPolicy(sdp);
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
final IndexWriter iw = new IndexWriter(dir, config);
SearcherManager sm = new SearcherManager(iw, new SearcherFactory());
ControlledRealTimeReopenThread<IndexSearcher> controlledRealTimeReopenThread = new ControlledRealTimeReopenThread<>(iw, sm, maxStaleSecs, 0);
controlledRealTimeReopenThread.setDaemon(true);
controlledRealTimeReopenThread.start();
List<Thread> commitThreads = new ArrayList<>();
for (int i = 0; i < 500; i++) {
if (i > 0 && i % 50 == 0) {
Thread commitThread = new Thread(new Runnable() {
@Override
public void run() {
try {
iw.commit();
IndexCommit ic = sdp.snapshot();
for (String name : ic.getFileNames()) {
//distribute, and backup
//System.out.println(names);
assertTrue(slowFileExists(dir, name));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
commitThread.start();
commitThreads.add(commitThread);
}
Document d = new Document();
d.add(new TextField("count", i + "", Field.Store.NO));
d.add(new TextField("content", content, Field.Store.YES));
long start = System.currentTimeMillis();
long l = iw.addDocument(d);
controlledRealTimeReopenThread.waitForGeneration(l);
long wait = System.currentTimeMillis() - start;
assertTrue("waited too long for generation " + wait, wait < (maxStaleSecs * 1000));
IndexSearcher searcher = sm.acquire();
TopDocs td = searcher.search(new TermQuery(new Term("count", i + "")), 10);
sm.release(searcher);
assertEquals(1, td.totalHits);
}
for (Thread commitThread : commitThreads) {
commitThread.join();
}
controlledRealTimeReopenThread.close();
sm.close();
iw.close();
dir.close();
}
use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class HdfsWriteToMultipleCollectionsTest method test.
@Test
public void test() throws Exception {
int docCount = random().nextInt(1313) + 1;
int cnt = random().nextInt(4) + 1;
for (int i = 0; i < cnt; i++) {
createCollection(ACOLLECTION + i, 2, 2, 9);
}
for (int i = 0; i < cnt; i++) {
waitForRecoveriesToFinish(ACOLLECTION + i, false);
}
List<CloudSolrClient> cloudClients = new ArrayList<>();
List<StoppableIndexingThread> threads = new ArrayList<>();
for (int i = 0; i < cnt; i++) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress());
client.setDefaultCollection(ACOLLECTION + i);
cloudClients.add(client);
StoppableIndexingThread indexThread = new StoppableIndexingThread(null, client, "1", true, docCount, 1, true);
threads.add(indexThread);
indexThread.start();
}
int addCnt = 0;
for (StoppableIndexingThread thread : threads) {
thread.join();
addCnt += thread.getNumAdds() - thread.getNumDeletes();
}
long collectionsCount = 0;
for (CloudSolrClient client : cloudClients) {
client.commit();
collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
}
IOUtils.close(cloudClients);
assertEquals(addCnt, collectionsCount);
BlockCache lastBlockCache = null;
// assert that we are using the block directory and that write and read caching are being used
for (JettySolrRunner jetty : jettys) {
CoreContainer cores = jetty.getCoreContainer();
Collection<SolrCore> solrCores = cores.getCores();
for (SolrCore core : solrCores) {
if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().startsWith(ACOLLECTION)) {
DirectoryFactory factory = core.getDirectoryFactory();
assertTrue("Found: " + core.getDirectoryFactory().getClass().getName(), factory instanceof HdfsDirectoryFactory);
Directory dir = factory.get(core.getDataDir(), null, null);
try {
long dataDirSize = factory.size(dir);
FileSystem fileSystem = null;
fileSystem = FileSystem.newInstance(new Path(core.getDataDir()).toUri(), new Configuration());
long size = fileSystem.getContentSummary(new Path(core.getDataDir())).getLength();
assertEquals(size, dataDirSize);
} finally {
core.getDirectoryFactory().release(dir);
}
RefCounted<IndexWriter> iwRef = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
try {
IndexWriter iw = iwRef.get();
NRTCachingDirectory directory = (NRTCachingDirectory) iw.getDirectory();
BlockDirectory blockDirectory = (BlockDirectory) directory.getDelegate();
assertTrue(blockDirectory.isBlockCacheReadEnabled());
// see SOLR-6424
assertFalse(blockDirectory.isBlockCacheWriteEnabled());
Cache cache = blockDirectory.getCache();
// we know it's a BlockDirectoryCache, but future proof
assertTrue(cache instanceof BlockDirectoryCache);
BlockCache blockCache = ((BlockDirectoryCache) cache).getBlockCache();
if (lastBlockCache != null) {
if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
assertEquals(lastBlockCache, blockCache);
} else {
assertNotSame(lastBlockCache, blockCache);
}
}
lastBlockCache = blockCache;
} finally {
iwRef.decref();
}
}
}
}
}
use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class LuceneTestCase method wrapDirectory.
private static BaseDirectoryWrapper wrapDirectory(Random random, Directory directory, boolean bare) {
if (rarely(random) && !bare) {
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
}
if (bare) {
BaseDirectoryWrapper base = new RawDirectoryWrapper(directory);
closeAfterSuite(new CloseableDirectory(base, suiteFailureMarker));
return base;
} else {
MockDirectoryWrapper mock = new MockDirectoryWrapper(random, directory);
mock.setThrottling(TEST_THROTTLING);
closeAfterSuite(new CloseableDirectory(mock, suiteFailureMarker));
return mock;
}
}
use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class BaseCompoundFormatTestCase method testLargeCFS.
// LUCENE-5724: actually test we play nice with NRTCachingDir and massive file
public void testLargeCFS() throws IOException {
final String testfile = "_123.test";
IOContext context = new IOContext(new FlushInfo(0, 512 * 1024 * 1024));
Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir()), 2.0, 25.0);
SegmentInfo si = newSegmentInfo(dir, "_123");
try (IndexOutput out = dir.createOutput(testfile, context)) {
CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix");
byte[] bytes = new byte[512];
for (int i = 0; i < 1024 * 1024; i++) {
out.writeBytes(bytes, 0, bytes.length);
}
CodecUtil.writeFooter(out);
}
si.setFiles(Collections.singleton(testfile));
si.getCodec().compoundFormat().write(dir, si, context);
dir.close();
}
Aggregations