use of org.archive.util.ObjectIdentityCache in project heritrix3 by internetarchive.
the class BdbModule method doCheckpoint.
public void doCheckpoint(final Checkpoint checkpointInProgress) throws IOException {
// First sync objectCaches
for (@SuppressWarnings("rawtypes") ObjectIdentityCache oic : oiCaches.values()) {
oic.sync();
}
try {
// sync all databases
for (DatabasePlusConfig dbc : databases.values()) {
dbc.database.sync();
}
// Do a force checkpoint. That's what a sync does (i.e. doSync).
CheckpointConfig chkptConfig = new CheckpointConfig();
chkptConfig.setForce(true);
// Mark Hayes of sleepycat says:
// "The default for this property is false, which gives the current
// behavior (allow deltas). If this property is true, deltas are
// prohibited -- full versions of internal nodes are always logged
// during the checkpoint. When a full version of an internal node
// is logged during a checkpoint, recovery does not need to process
// it at all. It is only fetched if needed by the application,
// during normal DB operations after recovery. When a delta of an
// internal node is logged during a checkpoint, recovery must
// process it by fetching the full version of the node from earlier
// in the log, and then applying the delta to it. This can be
// pretty slow, since it is potentially a large amount of
// random I/O."
// chkptConfig.setMinimizeRecoveryTime(true);
bdbEnvironment.checkpoint(chkptConfig);
LOGGER.fine("Finished bdb checkpoint.");
DbBackup dbBackup = new DbBackup(bdbEnvironment);
try {
dbBackup.startBackup();
File envCpDir = new File(dir.getFile(), checkpointInProgress.getName());
org.archive.util.FileUtils.ensureWriteableDirectory(envCpDir);
File logfilesList = new File(envCpDir, "jdbfiles.manifest");
String[] filedata = dbBackup.getLogFilesInBackupSet();
for (int i = 0; i < filedata.length; i++) {
File f = new File(dir.getFile(), filedata[i]);
filedata[i] += "," + f.length();
if (getUseHardLinkCheckpoints()) {
File hardLink = new File(envCpDir, filedata[i]);
try {
Files.createLink(hardLink.toPath(), f.toPath().toAbsolutePath());
} catch (IOException | UnsupportedOperationException e) {
LOGGER.log(Level.SEVERE, "unable to create required checkpoint link " + hardLink, e);
}
}
}
FileUtils.writeLines(logfilesList, Arrays.asList(filedata));
LOGGER.fine("Finished processing bdb log files.");
} finally {
dbBackup.endBackup();
}
} catch (DatabaseException e) {
throw new IOException(e);
}
if (checkpointInProgress.getForgetAllButLatest()) {
File[] oldEnvCpDirs = dir.getFile().listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return !name.equals(checkpointInProgress.getName()) && TextUtils.matches("cp\\d{5}-\\d{14}", name);
}
});
for (File d : oldEnvCpDirs) {
FileUtils.deleteDirectory(d);
}
}
}
use of org.archive.util.ObjectIdentityCache in project heritrix3 by internetarchive.
the class BdbModule method close.
public void close() {
if (classCatalog == null) {
return;
}
for (@SuppressWarnings("rawtypes") ObjectIdentityCache cache : oiCaches.values()) {
try {
cache.close();
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error closing oiCache " + cache, e);
}
}
List<String> dbNames = new ArrayList<String>(databases.keySet());
for (String dbName : dbNames) try {
closeDatabase(dbName);
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error closing db " + dbName, e);
}
try {
this.bdbEnvironment.sync();
this.bdbEnvironment.close();
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error closing environment.", e);
}
}
Aggregations