use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class BaseDataFileFactory method dropContainer.
/**
* Drop a container.
*
* <P><B>Synchronisation</B>
* <P>
* This call will mark the container as dropped and then obtain an CX lock
* (table level exclusive lock) on the container. Once a container has
* been marked as dropped it cannot be retrieved by an openContainer()
* call unless explicitly with droppedOK.
* <P>
* Once the exclusive lock has been obtained the container is removed
* and all its pages deallocated. The container will be fully removed
* at the commit time of the transaction.
*
* @exception StandardException Standard Derby error policy
*/
public void dropContainer(RawTransaction t, ContainerKey ckey) throws StandardException {
boolean tmpContainer = (ckey.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT);
LockingPolicy cl = null;
if (!tmpContainer) {
if (isReadOnly()) {
throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
}
cl = t.newLockingPolicy(LockingPolicy.MODE_CONTAINER, TransactionController.ISOLATION_SERIALIZABLE, true);
if (SanityManager.DEBUG)
SanityManager.ASSERT(cl != null);
}
// close all open containers and 'onCommit' objects of this container
t.notifyObservers(ckey);
RawContainerHandle containerHdl = (RawContainerHandle) t.openContainer(ckey, cl, ContainerHandle.MODE_FORUPDATE);
// happening thru some means other than the lock we are getting here.
try {
if (containerHdl == null || containerHdl.getContainerStatus() != RawContainerHandle.NORMAL) {
// If we are a temp container, don't worry about it.
if (tmpContainer) {
if (containerHdl != null)
containerHdl.removeContainer((LogInstant) null);
return;
} else {
throw StandardException.newException(SQLState.DATA_CONTAINER_VANISHED, ckey);
}
}
// Container exist, is updatable and we got the lock.
if (tmpContainer) {
containerHdl.dropContainer((LogInstant) null, true);
containerHdl.removeContainer((LogInstant) null);
} else {
ContainerOperation lop = new ContainerOperation(containerHdl, ContainerOperation.DROP);
// mark the container as pre-dirtied so that if a checkpoint
// happens after the log record is sent to the log stream, the
// cache cleaning will wait for this change.
containerHdl.preDirty(true);
try {
t.logAndDo(lop);
} finally {
// in case logAndDo fail, make sure the container is not
// stuck in preDirty state.
containerHdl.preDirty(false);
}
// remember this as a post commit work item
Serviceable p = new ReclaimSpace(ReclaimSpace.CONTAINER, ckey, this, true);
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace)) {
SanityManager.DEBUG(DaemonService.DaemonTrace, "Add post commit work " + p);
}
}
t.addPostCommitWork(p);
}
} finally {
if (containerHdl != null)
containerHdl.close();
}
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class RawStore method backup.
/*
* Backup the database.
* Online backup copies all the database files (log, seg0 ...Etc) to the
* specified backup location without blocking any user operation for the
* duration of the backup. Stable copy is made of each page using
* page level latches and in some cases with the help of monitors.
* Transaction log is also backed up, this is used to bring the database to
* the consistent state on restore.
*
* <P> MT- only one thread is allowed to perform backup at any given time.
* Synchronized on this. Parallel backups are not supported.
*/
public synchronized void backup(Transaction t, File backupDir) throws StandardException {
if (!privExists(backupDir)) {
// if backup dir does not exist, go ahead and create it.
createBackupDirectory(backupDir);
} else {
if (!privIsDirectory(backupDir)) {
throw StandardException.newException(SQLState.RAWSTORE_CANNOT_BACKUP_TO_NONDIRECTORY, (File) backupDir);
}
if (privExists(new File(backupDir, PersistentService.PROPERTIES_NAME))) {
throw StandardException.newException(SQLState.RAWSTORE_CANNOT_BACKUP_INTO_DATABASE_DIRECTORY, (File) backupDir);
}
}
boolean error = true;
boolean renamed = false;
boolean renameFailed = false;
File oldbackup = null;
File backupcopy = null;
OutputStreamWriter historyFile = null;
StorageFile dbHistoryFile = null;
File backupHistoryFile = null;
LogInstant backupInstant = logFactory.getFirstUnflushedInstant();
try {
// get name of the current db, ie. database directory of current db.
StorageFile dbase = storageFactory.newStorageFile(null);
String canonicalDbName = storageFactory.getCanonicalName();
String dbname = StringUtil.shortDBName(canonicalDbName, storageFactory.getSeparator());
// append to end of history file
historyFile = privFileWriter(storageFactory.newStorageFile(BACKUP_HISTORY), true);
backupcopy = new File(backupDir, dbname);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_STARTED, canonicalDbName, getFilePath(backupcopy)));
// check if a backup copy of this database already exists,
if (privExists(backupcopy)) {
// first make a backup of the backup
oldbackup = new File(backupDir, dbname + ".OLD");
if (privExists(oldbackup)) {
if (privIsDirectory(oldbackup))
privRemoveDirectory(oldbackup);
else
privDelete(oldbackup);
}
if (!privRenameTo(backupcopy, oldbackup)) {
renameFailed = true;
throw StandardException.newException(SQLState.RAWSTORE_ERROR_RENAMING_FILE, backupcopy, oldbackup);
} else {
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_MOVED_BACKUP, getFilePath(backupcopy), getFilePath(oldbackup)));
renamed = true;
}
}
// create the backup database directory
createBackupDirectory(backupcopy);
dbHistoryFile = storageFactory.newStorageFile(BACKUP_HISTORY);
backupHistoryFile = new File(backupcopy, BACKUP_HISTORY);
// copy the history file into the backup.
if (!privCopyFile(dbHistoryFile, backupHistoryFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, dbHistoryFile, backupHistoryFile);
// if they are any jar file stored in the database, copy them into
// the backup.
StorageFile jarDir = storageFactory.newStorageFile(FileResource.JAR_DIRECTORY_NAME);
if (privExists(jarDir)) {
// find the list of schema directories under the jar dir and
// then copy only the plain files under those directories. One
// could just use the recursive copy of directory to copy all
// the files under the jar dir, but the problem with that is if
// a user gives jar directory as the backup path by mistake,
// copy will fail while copying the backup dir onto itself in
// recursion
String[] jarDirContents = privList(jarDir);
File backupJarDir = new File(backupcopy, FileResource.JAR_DIRECTORY_NAME);
// Create the backup jar directory
createBackupDirectory(backupJarDir);
LanguageConnectionContext lcc = (LanguageConnectionContext) getContextOrNull(LanguageConnectionContext.CONTEXT_ID);
// DERBY-5357 UUIDs introduced in jar file names in 10.9
boolean uuidSupported = lcc.getDataDictionary().checkVersion(DataDictionary.DD_VERSION_DERBY_10_9, null);
if (uuidSupported) {
// no subdirectories
for (int i = 0; i < jarDirContents.length; i++) {
StorageFile jar = storageFactory.newStorageFile(jarDir, jarDirContents[i]);
File backupJar = new File(backupJarDir, jarDirContents[i]);
if (privIsDirectory(new File(jar.getPath()))) {
// We no longer expect directories inside
continue;
// 'jar'. Need check to make the weird
// test #2 in BackupPathTests.java work:
// it does a backup of the db into its
// own(!) jar file directory, so trying
// to copy that db file into itself two
// levels down would fail.
}
if (!privCopyFile(jar, backupJar)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, jar, backupJar);
}
}
} else {
for (int i = 0; i < jarDirContents.length; i++) {
StorageFile jarSchemaDir = storageFactory.newStorageFile(jarDir, jarDirContents[i]);
File backupJarSchemaDir = new File(backupJarDir, jarDirContents[i]);
if (!privCopyDirectory(jarSchemaDir, backupJarSchemaDir, (byte[]) null, null, false)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, jarSchemaDir, backupJarSchemaDir);
}
}
}
}
// save service properties into the backup, Read in property
// from service.properties file, remove logDevice from it,
// then write it to the backup.
StorageFile logdir = logFactory.getLogDirectory();
try {
String name = getServiceName(this);
PersistentService ps = getMonitor().getServiceType(this);
String fullName = ps.getCanonicalServiceName(name);
Properties prop = ps.getServiceProperties(fullName, (Properties) null);
StorageFile defaultLogDir = storageFactory.newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
if (!logdir.equals(defaultLogDir)) {
prop.remove(Attribute.LOG_DEVICE);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(prop.getProperty(Attribute.LOG_DEVICE) == null, "cannot get rid of logDevice property");
}
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_EDITED_SERVICEPROPS));
}
// save the service properties into the backup.
ps.saveServiceProperties(backupcopy.getPath(), prop);
} catch (StandardException se) {
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_ERROR_EDIT_SERVICEPROPS) + se);
// skip the rest and let finally block clean up
return;
}
// Incase of encrypted database and the key is an external
// encryption key, there is an extra file with name
// Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE, this file should be
// copied in to the backup.
StorageFile verifyKeyFile = storageFactory.newStorageFile(Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
if (privExists(verifyKeyFile)) {
File backupVerifyKeyFile = new File(backupcopy, Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
if (!privCopyFile(verifyKeyFile, backupVerifyKeyFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, verifyKeyFile, backupVerifyKeyFile);
}
File logBackup = new File(backupcopy, LogFactory.LOG_DIRECTORY_NAME);
// this is wierd, delete it
if (privExists(logBackup)) {
privRemoveDirectory(logBackup);
}
// Create the log directory
createBackupDirectory(logBackup);
// do a checkpoint to get the persistent store up to date.
logFactory.checkpoint(this, dataFactory, xactFactory, true);
// start the transaction log backup.
logFactory.startLogBackup(logBackup);
File segBackup = new File(backupcopy, "seg0");
// Create the data segment directory
createBackupDirectory(segBackup);
// backup all the information in the data segment.
dataFactory.backupDataFiles(t, segBackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_DATA_SEG_BACKUP_COMPLETED, getFilePath(segBackup)));
// copy the log that got generated after the backup started to
// backup location and tell the logfactory that backup has come
// to end.
logFactory.endLogBackup(logBackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_COPIED_LOG, getFilePath(logdir), getFilePath(logBackup)));
error = false;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.RAWSTORE_UNEXPECTED_EXCEPTION, ioe);
} finally {
try {
if (error) {
// Abort all activity related to backup in the log factory.
logFactory.abortLogBackup();
// not an half backed one.
if (!renameFailed)
privRemoveDirectory(backupcopy);
if (renamed)
// recover the old backup
privRenameTo(oldbackup, backupcopy);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_ABORTED));
} else {
// success, remove the old backup copy
if (renamed && privExists(oldbackup)) {
// get rid of the old backup
privRemoveDirectory(oldbackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_REMOVED_BACKUP, getFilePath(oldbackup)));
}
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_COMPLETED, backupInstant));
// backup information into the backup.
if (!privCopyFile(dbHistoryFile, backupHistoryFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, dbHistoryFile, backupHistoryFile);
}
historyFile.close();
} catch (IOException ioe) {
try {
historyFile.close();
} catch (IOException ioe2) {
}
;
throw StandardException.newException(SQLState.RAWSTORE_UNEXPECTED_EXCEPTION, ioe);
}
}
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class LogicalUndoOperation method doMe.
/**
* Loggable methods
*/
/**
* Apply the undo operation, in this implementation of the
* RawStore, it can only call the undoMe method of undoOp
*
* @param xact the Transaction that is doing the rollback
* @param instant the log instant of this undo operation
* @param in optional data
*
* @exception IOException Can be thrown by any of the methods of ObjectInput.
* @exception StandardException Standard Derby policy.
*/
public final void doMe(Transaction xact, LogInstant instant, LimitObjectInput in) throws StandardException, IOException {
// sanity check
long oldversion = 0;
// sanity check
LogInstant oldLogInstant = null;
if (SanityManager.DEBUG) {
oldLogInstant = this.page.getLastLogInstant();
oldversion = this.page.getPageVersion();
SanityManager.ASSERT(oldversion == this.getPageVersion());
SanityManager.ASSERT(oldLogInstant == null || instant == null || oldLogInstant.lessThan(instant));
}
// if this is called during runtime rollback, PageOp.generateUndo found
// the page and have it latched there.
// if this is called during recovery redo, this.needsRedo found the page and
// have it latched here
//
// in either case, this.page is the correct page and is latched.
//
// recordId is generated by generateUndo and is stored here. If this
// is a physical undo, recordId is identical to that which is stored in
// undoOp. If this is logical undo, it will be different if this.page
// is different from the undoOp's page (which is where the rollfoward
// change went to, and the record might have moved by now).
//
undoOp.undoMe(xact, this.page, recordId, instant, in);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(oldversion < this.page.getPageVersion());
SanityManager.ASSERT(instant == null || instant.equals(this.page.getLastLogInstant()));
}
releaseResource(xact);
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class PhysicalUndoOperation method doMe.
/**
* Loggable methods
*/
/**
* Apply the undo operation, in this implementation of the
* RawStore, it can only call the undoMe method of undoOp
*
* @param xact the Transaction that is doing the rollback
* @param instant the log instant of this undo operation
* @param in optional data
*
* @exception IOException Can be thrown by any of the methods of InputStream.
* @exception StandardException Standard Derby policy.
*/
public final void doMe(Transaction xact, LogInstant instant, LimitObjectInput in) throws StandardException, IOException {
// sanity check
long oldversion = 0;
// sanity check
LogInstant oldLogInstant = null;
if (SanityManager.DEBUG) {
oldLogInstant = this.page.getLastLogInstant();
oldversion = this.page.getPageVersion();
SanityManager.ASSERT(oldversion == this.getPageVersion());
SanityManager.ASSERT(oldLogInstant == null || instant == null || oldLogInstant.lessThan(instant));
}
// if this is called during runtime rollback, PageOp.generateUndo found
// the page and have it latched there.
// if this is called during recovery redo, this.needsRedo found the page and
// have it latched here
//
// in either case, this.page is the correct page and is latched.
//
undoOp.undoMe(xact, this.page, instant, in);
if (SanityManager.DEBUG) {
if (oldversion >= this.page.getPageVersion()) {
SanityManager.THROWASSERT("oldversion = " + oldversion + ";page version = " + this.page.getPageVersion() + "page = " + page + "; my class name is " + getClass().getName() + " undoOp is " + undoOp.getClass().getName());
}
SanityManager.ASSERT(oldversion < this.page.getPageVersion());
if (instant != null && !instant.equals(this.page.getLastLogInstant()))
SanityManager.THROWASSERT("my class name is " + getClass().getName() + " undoOp is " + undoOp.getClass().getName());
}
releaseResource(xact);
}
use of org.apache.derby.iapi.store.raw.log.LogInstant in project derby by apache.
the class FileLogger method logAndUndo.
/**
* Writes out a compensation log record to the log stream, and call its
* doMe method to undo the change of a previous log operation.
*
* <P>MT - Not needed. A transaction must be single threaded thru undo, each
* RawTransaction has its own logger, therefore no need to synchronize.
* The RawTransaction must handle synchronizing with multiple threads
* during rollback.
*
* @param xact the transaction logging the change
* @param compensation the compensation log operation
* @param undoInstant the log instant of the operation that is to be
* rolled back
* @param in optional data input for the compenastion doMe method
*
* @return the instant in the log that can be used to identify the log
* record
*
* @exception StandardException Derby Standard error policy
*/
public LogInstant logAndUndo(RawTransaction xact, Compensation compensation, LogInstant undoInstant, LimitObjectInput in) throws StandardException {
boolean inUserCode = false;
try {
logOutputBuffer.reset();
TransactionId transactionId = xact.getId();
// write out the log header with the operation embedded
logRecord.setValue(transactionId, compensation);
inUserCode = true;
logicalOut.writeObject(logRecord);
inUserCode = false;
// write out the undoInstant
logicalOut.writeLong(((LogCounter) undoInstant).getValueAsLong());
// in this implemetaion, there is no optional data for the
// compensation operation. Optional data for the rollback comes
// from the undoable operation - and is passed into this call.
int completeLength = logOutputBuffer.getPosition();
long instant = 0;
if (logFactory.databaseEncrypted()) {
// we must pad the encryption data to be multiple of block
// size, which is logFactory.getEncryptionBlockSize()
int encryptedLength = completeLength;
if ((encryptedLength % logFactory.getEncryptionBlockSize()) != 0)
encryptedLength = encryptedLength + logFactory.getEncryptionBlockSize() - (encryptedLength % logFactory.getEncryptionBlockSize());
if (encryptionBuffer == null || encryptionBuffer.length < encryptedLength)
encryptionBuffer = new byte[encryptedLength];
System.arraycopy(logOutputBuffer.getByteArray(), 0, encryptionBuffer, 0, completeLength);
// do not bother to clear out the padding area
int len = logFactory.encrypt(encryptionBuffer, 0, encryptedLength, encryptionBuffer, 0);
if (SanityManager.DEBUG)
SanityManager.ASSERT(len == encryptedLength, "encrypted log buffer length != log buffer len");
instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, 0, 0);
} else {
instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, null, 0, 0);
}
LogInstant logInstant = new LogCounter(instant);
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Write CLR: Xact: " + transactionId.toString() + "clrinstant: " + logInstant.toString() + " undoinstant " + undoInstant + "\n");
}
}
try {
// in and dataLength contains optional data that was written
// to the log during a previous call to logAndDo.
compensation.doMe(xact, logInstant, in);
} catch (StandardException se) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, se, compensation));
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, ioe, compensation));
}
return logInstant;
} catch (IOException ioe) {
if (inUserCode) {
throw StandardException.newException(SQLState.LOG_WRITE_LOG_RECORD, ioe, compensation);
} else {
throw StandardException.newException(SQLState.LOG_BUFFER_FULL, ioe, compensation);
}
}
}
Aggregations