use of org.opengrok.indexer.configuration.RuntimeEnvironment in project OpenGrok by OpenGrok.
the class IndexDatabase method xrefExistsFor.
private boolean xrefExistsFor(String path) {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
File xrefFile = whatXrefFile(path, env.isCompressXref());
if (!xrefFile.exists()) {
LOGGER.log(Level.FINEST, "Missing {0}", xrefFile);
return false;
}
return true;
}
use of org.opengrok.indexer.configuration.RuntimeEnvironment in project OpenGrok by OpenGrok.
the class IndexDatabase method updateAll.
/**
* Update the index database for all of the projects.
*
* @param listener where to signal the changes to the database
* @throws IOException if an error occurs
*/
static CountDownLatch updateAll(IndexChangedListener listener) throws IOException {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
List<IndexDatabase> dbs = new ArrayList<>();
if (env.hasProjects()) {
for (Project project : env.getProjectList()) {
dbs.add(new IndexDatabase(project));
}
} else {
dbs.add(new IndexDatabase());
}
IndexerParallelizer parallelizer = RuntimeEnvironment.getInstance().getIndexerParallelizer();
CountDownLatch latch = new CountDownLatch(dbs.size());
for (IndexDatabase d : dbs) {
final IndexDatabase db = d;
if (listener != null) {
db.addIndexChangedListener(listener);
}
parallelizer.getFixedExecutor().submit(() -> {
try {
db.update();
} catch (Throwable e) {
LOGGER.log(Level.SEVERE, String.format("Problem updating index database in directory %s: ", db.indexDirectory.getDirectory()), e);
} finally {
latch.countDown();
}
});
}
return latch;
}
use of org.opengrok.indexer.configuration.RuntimeEnvironment in project OpenGrok by OpenGrok.
the class IndexDatabase method update.
/**
* Update the content of this index database.
*
* @throws IOException if an error occurs
*/
public void update() throws IOException {
synchronized (lock) {
if (running) {
throw new IOException("Indexer already running!");
}
running = true;
interrupted = false;
}
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
reader = null;
writer = null;
settings = null;
uidIter = null;
postsIter = null;
indexedSymlinks.clear();
IOException finishingException = null;
try {
Analyzer analyzer = AnalyzerGuru.getAnalyzer();
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
iwc.setRAMBufferSizeMB(env.getRamBufferSize());
writer = new IndexWriter(indexDirectory, iwc);
// to make sure index exists on the disk
writer.commit();
completer = new PendingFileCompleter();
if (directories.isEmpty()) {
if (project == null) {
directories.add("");
} else {
directories.add(project.getPath());
}
}
for (String dir : directories) {
File sourceRoot;
if ("".equals(dir)) {
sourceRoot = env.getSourceRootFile();
} else {
sourceRoot = new File(env.getSourceRootFile(), dir);
}
dir = Util.fixPathIfWindows(dir);
String startuid = Util.path2uid(dir, "");
// open existing index
reader = DirectoryReader.open(indexDirectory);
countsAggregator = new NumLinesLOCAggregator();
settings = readAnalysisSettings();
if (settings == null) {
settings = new IndexAnalysisSettings3();
}
Terms terms = null;
if (reader.numDocs() > 0) {
terms = MultiTerms.getTerms(reader, QueryBuilder.U);
NumLinesLOCAccessor countsAccessor = new NumLinesLOCAccessor();
if (countsAccessor.hasStored(reader)) {
isWithDirectoryCounts = true;
isCountingDeltas = true;
} else {
boolean foundCounts = countsAccessor.register(countsAggregator, reader);
isWithDirectoryCounts = false;
isCountingDeltas = foundCounts;
if (!isCountingDeltas) {
LOGGER.info("Forcing reindexing to fully compute directory counts");
}
}
} else {
isWithDirectoryCounts = false;
isCountingDeltas = false;
}
try {
if (terms != null) {
uidIter = terms.iterator();
// init uid
TermsEnum.SeekStatus stat = uidIter.seekCeil(new BytesRef(startuid));
if (stat == TermsEnum.SeekStatus.END) {
uidIter = null;
LOGGER.log(Level.WARNING, "Couldn''t find a start term for {0}, empty u field?", startuid);
}
}
// The actual indexing happens in indexParallel().
IndexDownArgs args = new IndexDownArgs();
Statistics elapsed = new Statistics();
LOGGER.log(Level.INFO, "Starting traversal of directory {0}", dir);
indexDown(sourceRoot, dir, args);
elapsed.report(LOGGER, String.format("Done traversal of directory %s", dir), "indexer.db.directory.traversal");
showFileCount(dir, args);
args.cur_count = 0;
elapsed = new Statistics();
LOGGER.log(Level.INFO, "Starting indexing of directory {0}", dir);
indexParallel(dir, args);
elapsed.report(LOGGER, String.format("Done indexing of directory %s", dir), "indexer.db.directory.index");
// removed and have higher ordering than any present files.
while (uidIter != null && uidIter.term() != null && uidIter.term().utf8ToString().startsWith(startuid)) {
removeFile(true);
BytesRef next = uidIter.next();
if (next == null) {
uidIter = null;
}
}
/*
* As a signifier that #Lines/LOC are comprehensively
* stored so that later calculation is in deltas mode, we
* need at least one D-document saved. For a repo with only
* non-code files, however, no true #Lines/LOC will have
* been saved. Subsequent re-indexing will do more work
* than necessary (until a source code file is placed). We
* can record zeroes for a fake file under the root to get
* a D-document even for this special repo situation.
*
* Metrics are aggregated for directories up to the root,
* so it suffices to put the fake directly under the root.
*/
if (!isWithDirectoryCounts) {
final String ROOT_FAKE_FILE = "/.OpenGrok_fake_file";
countsAggregator.register(new NumLinesLOC(ROOT_FAKE_FILE, 0, 0));
}
NumLinesLOCAccessor countsAccessor = new NumLinesLOCAccessor();
countsAccessor.store(writer, reader, countsAggregator, isWithDirectoryCounts && isCountingDeltas);
markProjectIndexed(project);
} finally {
reader.close();
}
}
// This is deliberate.
try {
finishWriting();
} catch (IOException e) {
finishingException = e;
}
} catch (RuntimeException ex) {
LOGGER.log(Level.SEVERE, "Failed with unexpected RuntimeException", ex);
throw ex;
} finally {
completer = null;
try {
if (writer != null) {
writer.close();
}
} catch (IOException e) {
if (finishingException == null) {
finishingException = e;
}
LOGGER.log(Level.WARNING, "An error occurred while closing writer", e);
} finally {
writer = null;
synchronized (lock) {
running = false;
}
}
}
if (finishingException != null) {
throw finishingException;
}
if (!isInterrupted() && isDirty()) {
if (env.isOptimizeDatabase()) {
optimize();
}
env.setIndexTimestamp();
}
}
use of org.opengrok.indexer.configuration.RuntimeEnvironment in project OpenGrok by OpenGrok.
the class IndexDatabase method optimizeAll.
/**
* Optimize all index databases.
*
* @throws IOException if an error occurs
*/
static CountDownLatch optimizeAll() throws IOException {
List<IndexDatabase> dbs = new ArrayList<>();
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
IndexerParallelizer parallelizer = env.getIndexerParallelizer();
if (env.hasProjects()) {
for (Project project : env.getProjectList()) {
dbs.add(new IndexDatabase(project));
}
} else {
dbs.add(new IndexDatabase());
}
CountDownLatch latch = new CountDownLatch(dbs.size());
for (IndexDatabase d : dbs) {
final IndexDatabase db = d;
if (db.isDirty()) {
parallelizer.getFixedExecutor().submit(() -> {
try {
db.update();
} catch (Throwable e) {
LOGGER.log(Level.SEVERE, "Problem updating lucene index database: ", e);
} finally {
latch.countDown();
}
});
}
}
return latch;
}
use of org.opengrok.indexer.configuration.RuntimeEnvironment in project OpenGrok by OpenGrok.
the class IndexDatabase method removeXrefFile.
/**
* Queue the removal of xref file for given path.
* @param path path to file under source root
*/
private void removeXrefFile(String path) {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
File xrefFile = whatXrefFile(path, env.isCompressXref());
PendingFileDeletion pending = new PendingFileDeletion(xrefFile.getAbsolutePath());
completer.add(pending);
}
Aggregations