use of org.apache.solr.store.hdfs.HdfsDirectory in project carbondata by apache.
the class LuceneCoarseGrainDataMap method init.
/**
* It is called to load the data map to memory or to initialize it.
*/
@Override
public void init(DataMapModel dataMapModel) throws MemoryException, IOException {
// get this path from file path
Path indexPath = FileFactory.getPath(dataMapModel.getFilePath());
LOGGER.info("Lucene index read path " + indexPath.toString());
// get file system , use hdfs file system , realized in solr project
FileSystem fs = FileFactory.getFileSystem(indexPath);
// check this path valid
if (!fs.exists(indexPath)) {
String errorMessage = String.format("index directory %s not exists.", indexPath);
LOGGER.error(errorMessage);
throw new IOException(errorMessage);
}
if (!fs.isDirectory(indexPath)) {
String errorMessage = String.format("error index path %s, must be directory", indexPath);
LOGGER.error(errorMessage);
throw new IOException(errorMessage);
}
// open this index path , use HDFS default configuration
Directory indexDir = new HdfsDirectory(indexPath, FileFactory.getConfiguration());
IndexReader indexReader = DirectoryReader.open(indexDir);
if (indexReader == null) {
throw new RuntimeException("failed to create index reader object");
}
// create a index searcher object
indexSearcher = new IndexSearcher(indexReader);
}
use of org.apache.solr.store.hdfs.HdfsDirectory in project carbondata by apache.
the class LuceneDataMapWriter method onBlockStart.
/**
* Start of new block notification.
*/
public void onBlockStart(String blockId) throws IOException {
// save this block id for lucene index , used in onPageAdd function
this.blockId = blockId;
// get index path, put index data into segment's path
String strIndexPath = getIndexPath();
Path indexPath = FileFactory.getPath(strIndexPath);
FileSystem fs = FileFactory.getFileSystem(indexPath);
// if index path not exists, create it
if (fs.exists(indexPath)) {
fs.mkdirs(indexPath);
}
if (null == analyzer) {
analyzer = new StandardAnalyzer();
}
// create a index writer
Directory indexDir = new HdfsDirectory(indexPath, FileFactory.getConfiguration());
indexWriter = new IndexWriter(indexDir, new IndexWriterConfig(analyzer));
}
use of org.apache.solr.store.hdfs.HdfsDirectory in project carbondata by apache.
the class LuceneFineGrainDataMap method init.
/**
* It is called to load the data map to memory or to initialize it.
*/
public void init(DataMapModel dataMapModel) throws MemoryException, IOException {
// get this path from file path
Path indexPath = FileFactory.getPath(dataMapModel.getFilePath());
LOGGER.info("Lucene index read path " + indexPath.toString());
// get file system , use hdfs file system , realized in solr project
FileSystem fs = FileFactory.getFileSystem(indexPath);
// check this path valid
if (!fs.exists(indexPath)) {
String errorMessage = String.format("index directory %s not exists.", indexPath);
LOGGER.error(errorMessage);
throw new IOException(errorMessage);
}
if (!fs.isDirectory(indexPath)) {
String errorMessage = String.format("error index path %s, must be directory", indexPath);
LOGGER.error(errorMessage);
throw new IOException(errorMessage);
}
// open this index path , use HDFS default configuration
Directory indexDir = new HdfsDirectory(indexPath, FileFactory.getConfiguration());
IndexReader indexReader = DirectoryReader.open(indexDir);
if (indexReader == null) {
throw new RuntimeException("failed to create index reader object");
}
// create a index searcher object
indexSearcher = new IndexSearcher(indexReader);
}
Aggregations