use of org.opengrok.indexer.util.Progress in project OpenGrok by OpenGrok.
the class FileHistoryCache method store.
/**
* Store history for the whole repository in directory hierarchy resembling
* the original repository structure. History of individual files will be
* stored under this hierarchy, each file containing history of
* corresponding source file.
*
* @param history history object to process into per-file histories
* @param repository repository object
* @param tillRevision end revision (can be null)
*/
@Override
public void store(History history, Repository repository, String tillRevision) throws HistoryException {
final boolean handleRenamedFiles = repository.isHandleRenamedFiles();
String latestRev = null;
// Return immediately when there is nothing to do.
List<HistoryEntry> entries = history.getHistoryEntries();
if (entries.isEmpty()) {
return;
}
HashMap<String, List<HistoryEntry>> map = new HashMap<>();
latestRev = createFileMap(history, map);
// File based history cache does not store files for individual changesets so strip them.
history.strip();
File histDataDir = new File(getRepositoryHistDataDirname(repository));
// Check the directory again in case of races (might happen in the presence of sub-repositories).
if (!histDataDir.isDirectory() && !histDataDir.mkdirs() && !histDataDir.isDirectory()) {
LOGGER.log(Level.WARNING, "cannot create history cache directory for ''{0}''", histDataDir);
}
Set<String> regularFiles = map.keySet().stream().filter(e -> !history.isRenamed(e)).collect(Collectors.toSet());
createDirectoriesForFiles(regularFiles, repository, "regular files for history till " + getRevisionString(tillRevision));
/*
* Now traverse the list of files from the hash map built above and for each file store its history
* (saved in the value of the hash map entry for the file) in a file.
* The renamed files will be handled separately.
*/
LOGGER.log(Level.FINE, "Storing history for {0} regular files in repository ''{1}'' till {2}", new Object[] { regularFiles.size(), repository, getRevisionString(tillRevision) });
final File root = env.getSourceRootFile();
final CountDownLatch latch = new CountDownLatch(regularFiles.size());
AtomicInteger fileHistoryCount = new AtomicInteger();
try (Progress progress = new Progress(LOGGER, String.format("history cache for regular files of %s till %s", repository, getRevisionString(tillRevision)), regularFiles.size())) {
for (String file : regularFiles) {
env.getIndexerParallelizer().getHistoryFileExecutor().submit(() -> {
try {
doFileHistory(file, new History(map.get(file)), repository, root, false);
fileHistoryCount.getAndIncrement();
} catch (Exception ex) {
// We want to catch any exception since we are in thread.
LOGGER.log(Level.WARNING, "doFileHistory() got exception ", ex);
} finally {
latch.countDown();
progress.increment();
}
});
}
// Wait for the executors to finish.
try {
latch.await();
} catch (InterruptedException ex) {
LOGGER.log(Level.SEVERE, "latch exception", ex);
}
LOGGER.log(Level.FINE, "Stored history for {0} regular files in repository ''{1}''", new Object[] { fileHistoryCount, repository });
}
if (!handleRenamedFiles) {
finishStore(repository, latestRev);
return;
}
storeRenamed(history.getRenamedFiles(), repository, tillRevision);
finishStore(repository, latestRev);
}
use of org.opengrok.indexer.util.Progress in project OpenGrok by OpenGrok.
the class PendingFileCompleter method completeRenamings.
/**
* Attempts to rename all the tracked elements, catching any failures, and
* throwing an exception if any failed.
* @return the number of successful renamings
*/
private int completeRenamings() throws IOException {
int numPending = renames.size();
int numFailures = 0;
if (numPending < 1) {
return 0;
}
List<PendingFileRenamingExec> pendingExecs = renames.parallelStream().map(f -> new PendingFileRenamingExec(f.getTransientPath(), f.getAbsolutePath())).collect(Collectors.toList());
Map<Boolean, List<PendingFileRenamingExec>> bySuccess;
try (Progress progress = new Progress(LOGGER, "pending renames", numPending)) {
bySuccess = pendingExecs.parallelStream().collect(Collectors.groupingByConcurrent((x) -> {
progress.increment();
try {
doRename(x);
return true;
} catch (IOException e) {
x.exception = e;
return false;
}
}));
}
renames.clear();
List<PendingFileRenamingExec> failures = bySuccess.getOrDefault(Boolean.FALSE, null);
if (failures != null && failures.size() > 0) {
numFailures = failures.size();
double pctFailed = 100.0 * numFailures / numPending;
String exmsg = String.format("%d failures (%.1f%%) while renaming pending files", numFailures, pctFailed);
throw new IOException(exmsg, failures.get(0).exception);
}
return numPending - numFailures;
}
use of org.opengrok.indexer.util.Progress in project OpenGrok by OpenGrok.
the class IndexDatabase method indexParallel.
/**
* Executes the second, parallel stage of indexing.
* @param dir the parent directory (when appended to SOURCE_ROOT)
* @param args contains a list of files to index, found during the earlier
* stage
*/
private void indexParallel(String dir, IndexDownArgs args) {
int worksCount = args.works.size();
if (worksCount < 1) {
return;
}
AtomicInteger successCounter = new AtomicInteger();
AtomicInteger currentCounter = new AtomicInteger();
AtomicInteger alreadyClosedCounter = new AtomicInteger();
IndexerParallelizer parallelizer = RuntimeEnvironment.getInstance().getIndexerParallelizer();
ObjectPool<Ctags> ctagsPool = parallelizer.getCtagsPool();
Map<Boolean, List<IndexFileWork>> bySuccess = null;
try (Progress progress = new Progress(LOGGER, dir, worksCount)) {
bySuccess = parallelizer.getForkJoinPool().submit(() -> args.works.parallelStream().collect(Collectors.groupingByConcurrent((x) -> {
int tries = 0;
Ctags pctags = null;
boolean ret;
Statistics stats = new Statistics();
while (true) {
try {
if (alreadyClosedCounter.get() > 0) {
ret = false;
} else {
pctags = ctagsPool.get();
addFile(x.file, x.path, pctags);
successCounter.incrementAndGet();
ret = true;
}
} catch (AlreadyClosedException e) {
alreadyClosedCounter.incrementAndGet();
String errmsg = String.format("ERROR addFile(): %s", x.file);
LOGGER.log(Level.SEVERE, errmsg, e);
x.exception = e;
ret = false;
} catch (InterruptedException e) {
// Allow one retry if interrupted
if (++tries <= 1) {
continue;
}
LOGGER.log(Level.WARNING, "No retry: {0}", x.file);
x.exception = e;
ret = false;
} catch (RuntimeException | IOException e) {
String errmsg = String.format("ERROR addFile(): %s", x.file);
LOGGER.log(Level.WARNING, errmsg, e);
x.exception = e;
ret = false;
} finally {
if (pctags != null) {
pctags.reset();
ctagsPool.release(pctags);
}
}
progress.increment();
stats.report(LOGGER, Level.FINEST, String.format("file ''%s'' %s", x.file, ret ? "indexed" : "failed indexing"));
return ret;
}
}))).get();
} catch (InterruptedException | ExecutionException e) {
int successCount = successCounter.intValue();
double successPct = 100.0 * successCount / worksCount;
String exmsg = String.format("%d successes (%.1f%%) after aborting parallel-indexing", successCount, successPct);
LOGGER.log(Level.SEVERE, exmsg, e);
}
args.cur_count = currentCounter.intValue();
// Start with failureCount=worksCount, and then subtract successes.
int failureCount = worksCount;
if (bySuccess != null) {
List<IndexFileWork> successes = bySuccess.getOrDefault(Boolean.TRUE, null);
if (successes != null) {
failureCount -= successes.size();
}
}
if (failureCount > 0) {
double pctFailed = 100.0 * failureCount / worksCount;
String exmsg = String.format("%d failures (%.1f%%) while parallel-indexing", failureCount, pctFailed);
LOGGER.log(Level.WARNING, exmsg);
}
/*
* Encountering an AlreadyClosedException is severe enough to abort the
* run, since it will fail anyway later upon trying to commit().
*/
int numAlreadyClosed = alreadyClosedCounter.get();
if (numAlreadyClosed > 0) {
throw new AlreadyClosedException(String.format("count=%d", numAlreadyClosed));
}
}
use of org.opengrok.indexer.util.Progress in project OpenGrok by OpenGrok.
the class PendingFileCompleter method completeDeletions.
/**
* Attempts to delete all the tracked elements, catching any failures, and
* throwing an exception if any failed.
* @return the number of successful deletions
*/
private int completeDeletions() throws IOException {
int numPending = deletions.size();
int numFailures = 0;
if (numPending < 1) {
return 0;
}
List<PendingFileDeletionExec> pendingExecs = deletions.parallelStream().map(f -> new PendingFileDeletionExec(f.getAbsolutePath())).collect(Collectors.toList());
Map<Boolean, List<PendingFileDeletionExec>> bySuccess;
try (Progress progress = new Progress(LOGGER, "pending deletions", numPending)) {
bySuccess = pendingExecs.parallelStream().collect(Collectors.groupingByConcurrent((x) -> {
progress.increment();
doDelete(x);
return true;
}));
}
deletions.clear();
List<PendingFileDeletionExec> successes = bySuccess.getOrDefault(Boolean.TRUE, null);
if (successes != null) {
tryDeleteParents(successes);
}
List<PendingFileDeletionExec> failures = bySuccess.getOrDefault(Boolean.FALSE, null);
if (failures != null && failures.size() > 0) {
numFailures = failures.size();
double pctFailed = 100.0 * numFailures / numPending;
String exmsg = String.format("%d failures (%.1f%%) while deleting pending files", numFailures, pctFailed);
throw new IOException(exmsg, failures.get(0).exception);
}
return numPending - numFailures;
}
use of org.opengrok.indexer.util.Progress in project OpenGrok by OpenGrok.
the class PendingFileCompleter method completeLinkages.
/**
* Attempts to link the tracked elements, catching any failures, and
* throwing an exception if any failed.
* @return the number of successful linkages
*/
private int completeLinkages() throws IOException {
int numPending = linkages.size();
int numFailures = 0;
if (numPending < 1) {
return 0;
}
List<PendingSymlinkageExec> pendingExecs = linkages.parallelStream().map(f -> new PendingSymlinkageExec(f.getSourcePath(), f.getTargetRelPath())).collect(Collectors.toList());
Map<Boolean, List<PendingSymlinkageExec>> bySuccess;
try (Progress progress = new Progress(LOGGER, "pending linkages", numPending)) {
bySuccess = pendingExecs.parallelStream().collect(Collectors.groupingByConcurrent((x) -> {
progress.increment();
try {
doLink(x);
return true;
} catch (IOException e) {
x.exception = e;
return false;
}
}));
}
linkages.clear();
List<PendingSymlinkageExec> failures = bySuccess.getOrDefault(Boolean.FALSE, null);
if (failures != null && failures.size() > 0) {
numFailures = failures.size();
double pctFailed = 100.0 * numFailures / numPending;
String exmsg = String.format("%d failures (%.1f%%) while linking pending paths", numFailures, pctFailed);
throw new IOException(exmsg, failures.get(0).exception);
}
return numPending - numFailures;
}
Aggregations