use of org.apache.derby.io.StorageFile in project derby by apache.
the class RawStore method backup.
/*
* Backup the database.
* Online backup copies all the database files (log, seg0 ...Etc) to the
* specified backup location without blocking any user operation for the
* duration of the backup. Stable copy is made of each page using
* page level latches and in some cases with the help of monitors.
* Transaction log is also backed up, this is used to bring the database to
* the consistent state on restore.
*
* <P> MT- only one thread is allowed to perform backup at any given time.
* Synchronized on this. Parallel backups are not supported.
*/
public synchronized void backup(Transaction t, File backupDir) throws StandardException {
if (!privExists(backupDir)) {
// if backup dir does not exist, go ahead and create it.
createBackupDirectory(backupDir);
} else {
if (!privIsDirectory(backupDir)) {
throw StandardException.newException(SQLState.RAWSTORE_CANNOT_BACKUP_TO_NONDIRECTORY, (File) backupDir);
}
if (privExists(new File(backupDir, PersistentService.PROPERTIES_NAME))) {
throw StandardException.newException(SQLState.RAWSTORE_CANNOT_BACKUP_INTO_DATABASE_DIRECTORY, (File) backupDir);
}
}
boolean error = true;
boolean renamed = false;
boolean renameFailed = false;
File oldbackup = null;
File backupcopy = null;
OutputStreamWriter historyFile = null;
StorageFile dbHistoryFile = null;
File backupHistoryFile = null;
LogInstant backupInstant = logFactory.getFirstUnflushedInstant();
try {
// get name of the current db, ie. database directory of current db.
StorageFile dbase = storageFactory.newStorageFile(null);
String canonicalDbName = storageFactory.getCanonicalName();
String dbname = StringUtil.shortDBName(canonicalDbName, storageFactory.getSeparator());
// append to end of history file
historyFile = privFileWriter(storageFactory.newStorageFile(BACKUP_HISTORY), true);
backupcopy = new File(backupDir, dbname);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_STARTED, canonicalDbName, getFilePath(backupcopy)));
// check if a backup copy of this database already exists,
if (privExists(backupcopy)) {
// first make a backup of the backup
oldbackup = new File(backupDir, dbname + ".OLD");
if (privExists(oldbackup)) {
if (privIsDirectory(oldbackup))
privRemoveDirectory(oldbackup);
else
privDelete(oldbackup);
}
if (!privRenameTo(backupcopy, oldbackup)) {
renameFailed = true;
throw StandardException.newException(SQLState.RAWSTORE_ERROR_RENAMING_FILE, backupcopy, oldbackup);
} else {
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_MOVED_BACKUP, getFilePath(backupcopy), getFilePath(oldbackup)));
renamed = true;
}
}
// create the backup database directory
createBackupDirectory(backupcopy);
dbHistoryFile = storageFactory.newStorageFile(BACKUP_HISTORY);
backupHistoryFile = new File(backupcopy, BACKUP_HISTORY);
// copy the history file into the backup.
if (!privCopyFile(dbHistoryFile, backupHistoryFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, dbHistoryFile, backupHistoryFile);
// if they are any jar file stored in the database, copy them into
// the backup.
StorageFile jarDir = storageFactory.newStorageFile(FileResource.JAR_DIRECTORY_NAME);
if (privExists(jarDir)) {
// find the list of schema directories under the jar dir and
// then copy only the plain files under those directories. One
// could just use the recursive copy of directory to copy all
// the files under the jar dir, but the problem with that is if
// a user gives jar directory as the backup path by mistake,
// copy will fail while copying the backup dir onto itself in
// recursion
String[] jarDirContents = privList(jarDir);
File backupJarDir = new File(backupcopy, FileResource.JAR_DIRECTORY_NAME);
// Create the backup jar directory
createBackupDirectory(backupJarDir);
LanguageConnectionContext lcc = (LanguageConnectionContext) getContextOrNull(LanguageConnectionContext.CONTEXT_ID);
// DERBY-5357 UUIDs introduced in jar file names in 10.9
boolean uuidSupported = lcc.getDataDictionary().checkVersion(DataDictionary.DD_VERSION_DERBY_10_9, null);
if (uuidSupported) {
// no subdirectories
for (int i = 0; i < jarDirContents.length; i++) {
StorageFile jar = storageFactory.newStorageFile(jarDir, jarDirContents[i]);
File backupJar = new File(backupJarDir, jarDirContents[i]);
if (privIsDirectory(new File(jar.getPath()))) {
// We no longer expect directories inside
continue;
// 'jar'. Need check to make the weird
// test #2 in BackupPathTests.java work:
// it does a backup of the db into its
// own(!) jar file directory, so trying
// to copy that db file into itself two
// levels down would fail.
}
if (!privCopyFile(jar, backupJar)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, jar, backupJar);
}
}
} else {
for (int i = 0; i < jarDirContents.length; i++) {
StorageFile jarSchemaDir = storageFactory.newStorageFile(jarDir, jarDirContents[i]);
File backupJarSchemaDir = new File(backupJarDir, jarDirContents[i]);
if (!privCopyDirectory(jarSchemaDir, backupJarSchemaDir, (byte[]) null, null, false)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, jarSchemaDir, backupJarSchemaDir);
}
}
}
}
// save service properties into the backup, Read in property
// from service.properties file, remove logDevice from it,
// then write it to the backup.
StorageFile logdir = logFactory.getLogDirectory();
try {
String name = getServiceName(this);
PersistentService ps = getMonitor().getServiceType(this);
String fullName = ps.getCanonicalServiceName(name);
Properties prop = ps.getServiceProperties(fullName, (Properties) null);
StorageFile defaultLogDir = storageFactory.newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
if (!logdir.equals(defaultLogDir)) {
prop.remove(Attribute.LOG_DEVICE);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(prop.getProperty(Attribute.LOG_DEVICE) == null, "cannot get rid of logDevice property");
}
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_EDITED_SERVICEPROPS));
}
// save the service properties into the backup.
ps.saveServiceProperties(backupcopy.getPath(), prop);
} catch (StandardException se) {
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_ERROR_EDIT_SERVICEPROPS) + se);
// skip the rest and let finally block clean up
return;
}
// Incase of encrypted database and the key is an external
// encryption key, there is an extra file with name
// Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE, this file should be
// copied in to the backup.
StorageFile verifyKeyFile = storageFactory.newStorageFile(Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
if (privExists(verifyKeyFile)) {
File backupVerifyKeyFile = new File(backupcopy, Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
if (!privCopyFile(verifyKeyFile, backupVerifyKeyFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, verifyKeyFile, backupVerifyKeyFile);
}
File logBackup = new File(backupcopy, LogFactory.LOG_DIRECTORY_NAME);
// this is wierd, delete it
if (privExists(logBackup)) {
privRemoveDirectory(logBackup);
}
// Create the log directory
createBackupDirectory(logBackup);
// do a checkpoint to get the persistent store up to date.
logFactory.checkpoint(this, dataFactory, xactFactory, true);
// start the transaction log backup.
logFactory.startLogBackup(logBackup);
File segBackup = new File(backupcopy, "seg0");
// Create the data segment directory
createBackupDirectory(segBackup);
// backup all the information in the data segment.
dataFactory.backupDataFiles(t, segBackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_DATA_SEG_BACKUP_COMPLETED, getFilePath(segBackup)));
// copy the log that got generated after the backup started to
// backup location and tell the logfactory that backup has come
// to end.
logFactory.endLogBackup(logBackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_COPIED_LOG, getFilePath(logdir), getFilePath(logBackup)));
error = false;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.RAWSTORE_UNEXPECTED_EXCEPTION, ioe);
} finally {
try {
if (error) {
// Abort all activity related to backup in the log factory.
logFactory.abortLogBackup();
// not an half backed one.
if (!renameFailed)
privRemoveDirectory(backupcopy);
if (renamed)
// recover the old backup
privRenameTo(oldbackup, backupcopy);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_ABORTED));
} else {
// success, remove the old backup copy
if (renamed && privExists(oldbackup)) {
// get rid of the old backup
privRemoveDirectory(oldbackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_REMOVED_BACKUP, getFilePath(oldbackup)));
}
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_COMPLETED, backupInstant));
// backup information into the backup.
if (!privCopyFile(dbHistoryFile, backupHistoryFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, dbHistoryFile, backupHistoryFile);
}
historyFile.close();
} catch (IOException ioe) {
try {
historyFile.close();
} catch (IOException ioe2) {
}
;
throw StandardException.newException(SQLState.RAWSTORE_UNEXPECTED_EXCEPTION, ioe);
}
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class VirtualFileTest method testGetParentAbsolute.
public void testGetParentAbsolute() {
DataStore store = getStore();
VirtualFile vFile = new VirtualFile(PathUtilTest.joinAbs(NON_EXISTING_DIRS), store);
int count = 0;
StorageFile parent = vFile.getParentDir();
while (parent != null) {
count++;
parent = parent.getParentDir();
}
assertEquals(5, count);
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class LogToFile method truncateLog.
/**
* Get rid of old and unnecessary log files
* @param firstLogNeeded The log file number of the oldest log file
* needed for recovery.
*/
private void truncateLog(long firstLogNeeded) {
long oldFirstLog;
if (keepAllLogs)
return;
// if they are not required for crash recovery.
if (backupInProgress) {
long logFileNeededForBackup = logFileToBackup;
// that is yet to be copied to the backup.
if (logFileNeededForBackup < firstLogNeeded)
firstLogNeeded = logFileNeededForBackup;
}
oldFirstLog = firstLogFileNumber;
firstLogFileNumber = firstLogNeeded;
while (oldFirstLog < firstLogNeeded) {
StorageFile uselessLogFile = null;
try {
uselessLogFile = getLogFileName(oldFirstLog);
if (privDelete(uselessLogFile)) {
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(DBG_FLAG, "truncating useless log file " + uselessLogFile.getPath());
}
} else {
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
SanityManager.DEBUG(DBG_FLAG, "Fail to truncate useless log file " + uselessLogFile.getPath());
}
}
} catch (StandardException se) {
if (SanityManager.DEBUG)
SanityManager.THROWASSERT("error opening log segment while deleting " + uselessLogFile.getPath(), se);
// if insane, just leave it be
}
oldFirstLog++;
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class LogToFile method getLogFileToSimulateCorruption.
/**
* Get the log file to Simulate a log corruption
* FOR UNIT TESTING USAGE ONLY
*/
public StorageRandomAccessFile getLogFileToSimulateCorruption(long filenum) throws IOException, StandardException {
if (SanityManager.DEBUG) {
// long filenum = LogCounter.getLogFileNumber(logInstant);
// long filepos = LogCounter.getLogFilePosition(logInstant);
StorageFile fileName = getLogFileName(filenum);
StorageRandomAccessFile log = null;
return privRandomAccessFile(fileName, "rw");
}
return null;
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class LogToFile method startLogBackup.
/*
* Start the transaction log backup.
*
* The transaction log is required to bring the database to the consistent
* state on restore.
*
* All the log files that are created after the backup starts
* must be kept around until they are copied into the backup,
* even if there are checkpoints when backup is in progress.
*
* Copy the log control files to the backup (the checkpoint recorded in the
* control files is the backup checkpoint). Restore will use the checkpoint
* info in these control files to perform recovery to bring
* the database to the consistent state.
*
* Find first log file that needs to be copied into the backup to bring
* the database to the consistent state on restore.
*
* In the end, existing log files that are needed to recover from the backup
* checkpoint are copied into the backup, any log that gets generated after
* this call are also copied into the backup after all the information
* in the data containers is written to the backup, when endLogBackup()
* is called.
*
* @param toDir - location where the log files should be copied to.
* @exception StandardException Standard Derby error policy
*
*/
public void startLogBackup(File toDir) throws StandardException {
synchronized (this) {
// wait until the thread that is doing the checkpoint completes it.
while (inCheckpoint) {
try {
wait();
} catch (InterruptedException ie) {
InterruptStatus.setInterrupted();
}
}
backupInProgress = true;
// copy the control files.
StorageFile fromFile;
File toFile;
// copy the log control file
fromFile = getControlFileName();
toFile = new File(toDir, fromFile.getName());
if (!privCopyFile(fromFile, toFile)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);
}
// copy the log mirror control file
fromFile = getMirrorControlFileName();
toFile = new File(toDir, fromFile.getName());
if (!privCopyFile(fromFile, toFile)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);
}
// find the first log file number that is active
logFileToBackup = getFirstLogNeeded(currentCheckpoint);
}
// copy all the log files that have to go into the backup
backupLogFiles(toDir, getLogFileNumber() - 1);
}
Aggregations