use of org.apache.solr.store.hdfs.HdfsDirectory in project lucene-solr by apache.
the class BlockDirectory method getFileModified.
private long getFileModified(String name) throws IOException {
if (in instanceof FSDirectory) {
File directory = ((FSDirectory) in).getDirectory().toFile();
File file = new File(directory, name);
if (!file.exists()) {
throw new FileNotFoundException("File [" + name + "] not found");
}
return file.lastModified();
} else if (in instanceof HdfsDirectory) {
return ((HdfsDirectory) in).fileModified(name);
} else {
throw new UnsupportedOperationException();
}
}
use of org.apache.solr.store.hdfs.HdfsDirectory in project lucene-solr by apache.
the class CheckHdfsIndexTest method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
directory = new HdfsDirectory(path, conf);
}
use of org.apache.solr.store.hdfs.HdfsDirectory in project lucene-solr by apache.
the class CheckHdfsIndex method doMain.
// actual main: returns exit code instead of terminating JVM (for easy testing)
@SuppressForbidden(reason = "System.out required: command line tool")
protected static int doMain(String[] args) throws IOException, InterruptedException {
CheckIndex.Options opts;
try {
opts = CheckIndex.parseOptions(args);
} catch (IllegalArgumentException e) {
System.out.println(e.getMessage());
return 1;
}
if (!CheckIndex.assertsOn()) {
System.out.println("\nNOTE: testing will be more thorough if you run java with '-ea:org.apache.lucene...', so assertions are enabled");
}
if (opts.getDirImpl() != null) {
System.out.println("\nIgnoring specified -dir-impl, instead using " + HdfsDirectory.class.getSimpleName());
}
System.out.println("\nOpening index @ " + opts.getIndexPath() + "\n");
Directory directory;
try {
directory = new HdfsDirectory(new Path(opts.getIndexPath()), getConf());
} catch (IOException e) {
System.out.println("ERROR: could not open hdfs directory \"" + opts.getIndexPath() + "\"; exiting");
e.printStackTrace(System.out);
return 1;
}
try (Directory dir = directory;
CheckIndex checker = new CheckIndex(dir)) {
opts.setOut(System.out);
return checker.doCheck(opts);
}
}
use of org.apache.solr.store.hdfs.HdfsDirectory in project lucene-solr by apache.
the class HdfsDirectoryFactory method move.
@Override
public void move(Directory fromDir, Directory toDir, String fileName, IOContext ioContext) throws IOException {
Directory baseFromDir = getBaseDir(fromDir);
Directory baseToDir = getBaseDir(toDir);
if (baseFromDir instanceof HdfsDirectory && baseToDir instanceof HdfsDirectory) {
Path dir1 = ((HdfsDirectory) baseFromDir).getHdfsDirPath();
Path dir2 = ((HdfsDirectory) baseToDir).getHdfsDirPath();
Path file1 = new Path(dir1, fileName);
Path file2 = new Path(dir2, fileName);
FileContext fileContext = FileContext.getFileContext(getConf());
fileContext.rename(file1, file2);
return;
}
super.move(fromDir, toDir, fileName, ioContext);
}
use of org.apache.solr.store.hdfs.HdfsDirectory in project lucene-solr by apache.
the class HdfsDirectoryFactory method create.
@Override
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
assert params != null : "init must be called before create";
LOG.info("creating directory factory for path {}", path);
Configuration conf = getConf();
if (metrics == null) {
metrics = MetricsHolder.metrics;
}
boolean blockCacheEnabled = getConfig(BLOCKCACHE_ENABLED, true);
boolean blockCacheGlobal = getConfig(BLOCKCACHE_GLOBAL, true);
boolean blockCacheReadEnabled = getConfig(BLOCKCACHE_READ_ENABLED, true);
final HdfsDirectory hdfsDir;
final Directory dir;
if (blockCacheEnabled && dirContext != DirContext.META_DATA) {
int numberOfBlocksPerBank = getConfig(NUMBEROFBLOCKSPERBANK, 16384);
int blockSize = BlockDirectory.BLOCK_SIZE;
int bankCount = getConfig(BLOCKCACHE_SLAB_COUNT, 1);
boolean directAllocation = getConfig(BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true);
int slabSize = numberOfBlocksPerBank * blockSize;
LOG.info("Number of slabs of block cache [{}] with direct memory allocation set to [{}]", bankCount, directAllocation);
LOG.info("Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes", new Object[] { slabSize, bankCount, ((long) bankCount * (long) slabSize) });
int bsBufferSize = params.getInt("solr.hdfs.blockcache.bufferstore.buffersize", blockSize);
// this is actually total size
int bsBufferCount = params.getInt("solr.hdfs.blockcache.bufferstore.buffercount", 0);
BlockCache blockCache = getBlockDirectoryCache(numberOfBlocksPerBank, blockSize, bankCount, directAllocation, slabSize, bsBufferSize, bsBufferCount, blockCacheGlobal);
Cache cache = new BlockDirectoryCache(blockCache, path, metrics, blockCacheGlobal);
int readBufferSize = params.getInt("solr.hdfs.blockcache.read.buffersize", blockSize);
hdfsDir = new HdfsDirectory(new Path(path), lockFactory, conf, readBufferSize);
dir = new BlockDirectory(path, hdfsDir, cache, null, blockCacheReadEnabled, false, cacheMerges, cacheReadOnce);
} else {
hdfsDir = new HdfsDirectory(new Path(path), conf);
dir = hdfsDir;
}
if (params.getBool(LOCALITYMETRICS_ENABLED, false)) {
LocalityHolder.reporter.registerDirectory(hdfsDir);
}
boolean nrtCachingDirectory = getConfig(NRTCACHINGDIRECTORY_ENABLE, true);
if (nrtCachingDirectory) {
double nrtCacheMaxMergeSizeMB = getConfig(NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 16);
double nrtCacheMaxCacheMB = getConfig(NRTCACHINGDIRECTORY_MAXCACHEMB, 192);
return new NRTCachingDirectory(dir, nrtCacheMaxMergeSizeMB, nrtCacheMaxCacheMB);
}
return dir;
}
Aggregations