use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class HdfsDirectoryFactory method create.
@Override
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
assert params != null : "init must be called before create";
LOG.info("creating directory factory for path {}", path);
Configuration conf = getConf();
if (metrics == null) {
metrics = MetricsHolder.metrics;
}
boolean blockCacheEnabled = getConfig(BLOCKCACHE_ENABLED, true);
boolean blockCacheGlobal = getConfig(BLOCKCACHE_GLOBAL, true);
boolean blockCacheReadEnabled = getConfig(BLOCKCACHE_READ_ENABLED, true);
final HdfsDirectory hdfsDir;
final Directory dir;
if (blockCacheEnabled && dirContext != DirContext.META_DATA) {
int numberOfBlocksPerBank = getConfig(NUMBEROFBLOCKSPERBANK, 16384);
int blockSize = BlockDirectory.BLOCK_SIZE;
int bankCount = getConfig(BLOCKCACHE_SLAB_COUNT, 1);
boolean directAllocation = getConfig(BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true);
int slabSize = numberOfBlocksPerBank * blockSize;
LOG.info("Number of slabs of block cache [{}] with direct memory allocation set to [{}]", bankCount, directAllocation);
LOG.info("Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes", new Object[] { slabSize, bankCount, ((long) bankCount * (long) slabSize) });
int bsBufferSize = params.getInt("solr.hdfs.blockcache.bufferstore.buffersize", blockSize);
// this is actually total size
int bsBufferCount = params.getInt("solr.hdfs.blockcache.bufferstore.buffercount", 0);
BlockCache blockCache = getBlockDirectoryCache(numberOfBlocksPerBank, blockSize, bankCount, directAllocation, slabSize, bsBufferSize, bsBufferCount, blockCacheGlobal);
Cache cache = new BlockDirectoryCache(blockCache, path, metrics, blockCacheGlobal);
int readBufferSize = params.getInt("solr.hdfs.blockcache.read.buffersize", blockSize);
hdfsDir = new HdfsDirectory(new Path(path), lockFactory, conf, readBufferSize);
dir = new BlockDirectory(path, hdfsDir, cache, null, blockCacheReadEnabled, false, cacheMerges, cacheReadOnce);
} else {
hdfsDir = new HdfsDirectory(new Path(path), conf);
dir = hdfsDir;
}
if (params.getBool(LOCALITYMETRICS_ENABLED, false)) {
LocalityHolder.reporter.registerDirectory(hdfsDir);
}
boolean nrtCachingDirectory = getConfig(NRTCACHINGDIRECTORY_ENABLE, true);
if (nrtCachingDirectory) {
double nrtCacheMaxMergeSizeMB = getConfig(NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 16);
double nrtCacheMaxCacheMB = getConfig(NRTCACHINGDIRECTORY_MAXCACHEMB, 192);
return new NRTCachingDirectory(dir, nrtCacheMaxMergeSizeMB, nrtCacheMaxCacheMB);
}
return dir;
}
use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class TestBinaryDocValuesUpdates method testIOContext.
@Test
public void testIOContext() throws Exception {
// LUCENE-5591: make sure we pass an IOContext with an approximate
// segmentSize in FlushInfo
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
// manually flush
conf.setMaxBufferedDocs(Integer.MAX_VALUE);
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
IndexWriter writer = new IndexWriter(dir, conf);
for (int i = 0; i < 100; i++) {
writer.addDocument(doc(i));
}
writer.commit();
writer.close();
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1 / (1024. * 1024.));
conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
// manually flush
conf.setMaxBufferedDocs(Integer.MAX_VALUE);
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer = new IndexWriter(cachingDir, conf);
writer.updateBinaryDocValue(new Term("id", "doc-0"), "val", toBytes(100L));
// flush
DirectoryReader reader = DirectoryReader.open(writer);
assertEquals(0, cachingDir.listCachedFiles().length);
IOUtils.close(reader, writer, cachingDir);
}
use of org.apache.lucene.store.NRTCachingDirectory in project lucene-solr by apache.
the class TestNumericDocValuesUpdates method testIOContext.
@Test
public void testIOContext() throws Exception {
// LUCENE-5591: make sure we pass an IOContext with an approximate
// segmentSize in FlushInfo
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
// manually flush
conf.setMaxBufferedDocs(Integer.MAX_VALUE);
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
IndexWriter writer = new IndexWriter(dir, conf);
for (int i = 0; i < 100; i++) {
writer.addDocument(doc(i));
}
writer.commit();
writer.close();
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1 / (1024. * 1024.));
conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
// manually flush
conf.setMaxBufferedDocs(Integer.MAX_VALUE);
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer = new IndexWriter(cachingDir, conf);
writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 100L);
// flush
DirectoryReader reader = DirectoryReader.open(writer);
assertEquals(0, cachingDir.listCachedFiles().length);
IOUtils.close(reader, writer, cachingDir);
}
Aggregations