use of org.opengrok.indexer.util.Statistics in project OpenGrok by OpenGrok.
the class BoundaryChangesets method getBoundaryChangesetIDs.
/**
* @param sinceRevision start revision ID
* @return immutable list of revision IDs denoting the intervals
* @throws HistoryException if there is problem traversing the changesets in the repository
*/
public synchronized List<String> getBoundaryChangesetIDs(String sinceRevision) throws HistoryException {
reset();
LOGGER.log(Level.FINE, "getting boundary changesets for ''{0}''", repository.getDirectoryName());
Statistics stat = new Statistics();
repository.accept(sinceRevision, this::visit);
// The changesets need to go from oldest to newest.
Collections.reverse(result);
stat.report(LOGGER, Level.FINE, String.format("Done getting boundary changesets for ''%s'' (%d entries)", repository.getDirectoryName(), result.size()));
return List.copyOf(result);
}
use of org.opengrok.indexer.util.Statistics in project OpenGrok by OpenGrok.
the class RepositoryWithPerPartesHistory method doCreateCache.
@Override
protected void doCreateCache(HistoryCache cache, String sinceRevision, File directory) throws HistoryException {
if (!RuntimeEnvironment.getInstance().isHistoryCachePerPartesEnabled()) {
LOGGER.log(Level.INFO, "repository {0} supports per partes history cache creation however " + "it is disabled in the configuration. Generating history cache as whole.", this);
finishCreateCache(cache, getHistory(directory, sinceRevision), null);
return;
}
// For repositories that supports this, avoid storing complete History in memory
// (which can be sizeable, at least for the initial indexing, esp. if merge changeset support is enabled),
// by splitting the work into multiple chunks.
BoundaryChangesets boundaryChangesets = new BoundaryChangesets(this);
List<String> boundaryChangesetList = new ArrayList<>(boundaryChangesets.getBoundaryChangesetIDs(sinceRevision));
// to finish the last step in the cycle below
boundaryChangesetList.add(null);
LOGGER.log(Level.FINE, "boundary changesets: {0}", boundaryChangesetList);
int cnt = 0;
for (String tillRevision : boundaryChangesetList) {
Statistics stat = new Statistics();
LOGGER.log(Level.FINEST, "storing history cache for revision range ({0}, {1})", new Object[] { sinceRevision, tillRevision });
finishCreateCache(cache, getHistory(directory, sinceRevision, tillRevision), tillRevision);
sinceRevision = tillRevision;
stat.report(LOGGER, Level.FINE, String.format("finished chunk %d/%d of history cache for repository ''%s''", ++cnt, boundaryChangesetList.size(), this.getDirectoryName()));
}
}
use of org.opengrok.indexer.util.Statistics in project OpenGrok by OpenGrok.
the class HistoryGuru method createCacheReal.
private void createCacheReal(Collection<Repository> repositories) {
Statistics elapsed = new Statistics();
ExecutorService executor = env.getIndexerParallelizer().getHistoryExecutor();
// Since we know each repository object from the repositories
// collection is unique, we can abuse HashMap to create a list of
// repository,revision tuples with repository as key (as the revision
// string does not have to be unique - surely it is not unique
// for the initial index case).
HashMap<Repository, String> repos2process = new HashMap<>();
// do not have to deal with latch decrementing in the cycle below.
for (final Repository repo : repositories) {
final String latestRev;
try {
latestRev = historyCache.getLatestCachedRevision(repo);
repos2process.put(repo, latestRev);
} catch (HistoryException he) {
LOGGER.log(Level.WARNING, String.format("Failed to retrieve latest cached revision for %s", repo.getDirectoryName()), he);
}
}
LOGGER.log(Level.INFO, "Creating historycache for {0} repositories", repos2process.size());
final CountDownLatch latch = new CountDownLatch(repos2process.size());
for (final Map.Entry<Repository, String> entry : repos2process.entrySet()) {
executor.submit(() -> {
try {
createCache(entry.getKey(), entry.getValue());
} catch (Exception ex) {
// We want to catch any exception since we are in thread.
LOGGER.log(Level.WARNING, "createCacheReal() got exception", ex);
} finally {
latch.countDown();
}
});
}
/*
* Wait until the history of all repositories is done. This is necessary
* since the next phase of generating index will need the history to
* be ready as it is recorded in Lucene index.
*/
try {
latch.await();
} catch (InterruptedException ex) {
LOGGER.log(Level.SEVERE, "latch exception", ex);
return;
}
// disk to enhance performance and save space.
try {
historyCache.optimize();
} catch (HistoryException he) {
LOGGER.log(Level.WARNING, "Failed optimizing the history cache database", he);
}
elapsed.report(LOGGER, "Done history cache for all repositories", "indexer.history.cache");
historyCache.setHistoryIndexDone();
}
use of org.opengrok.indexer.util.Statistics in project OpenGrok by OpenGrok.
the class HistoryGuru method createCache.
private void createCache(Repository repository, String sinceRevision) {
String path = repository.getDirectoryName();
String type = repository.getClass().getSimpleName();
if (!repository.isHistoryEnabled()) {
LOGGER.log(Level.INFO, "Skipping history cache creation of {0} repository in {1} and its subdirectories", new Object[] { type, path });
return;
}
if (repository.isWorking()) {
Statistics elapsed = new Statistics();
LOGGER.log(Level.INFO, "Creating history cache for {0} ({1}) {2} renamed file handling", new Object[] { path, type, repository.isHandleRenamedFiles() ? "with" : "without" });
try {
repository.createCache(historyCache, sinceRevision);
} catch (Exception e) {
LOGGER.log(Level.WARNING, "An error occurred while creating cache for " + path + " (" + type + ")", e);
}
elapsed.report(LOGGER, "Done history cache for " + path);
} else {
LOGGER.log(Level.WARNING, "Skipping creation of historycache of {0} repository in {1}: Missing SCM dependencies?", new Object[] { type, path });
}
}
use of org.opengrok.indexer.util.Statistics in project OpenGrok by OpenGrok.
the class RuntimeEnvironment method maybeRefreshIndexSearchers.
public void maybeRefreshIndexSearchers() {
LOGGER.log(Level.INFO, "refreshing searcher managers");
Statistics stat = new Statistics();
for (Map.Entry<String, SearcherManager> entry : searcherManagerMap.entrySet()) {
maybeRefreshSearcherManager(entry.getValue());
}
stat.report(LOGGER, "Done refreshing searcher managers");
}
Aggregations