use of org.apache.derby.shared.common.error.StandardException in project derby by apache.
the class InternalTriggerExecutionContext method getOldRowSet.
/**
* Returns a result set row the old images of the changed rows.
* For a row trigger, the result set will have a single row. For
* a statement trigger, this result set has every row that has
* changed or will change. If a statement trigger does not affect
* a row, then the result set will be empty (i.e. ResultSet.next()
* will return false).
*
* @return the ResultSet containing before images of the rows
* changed by the triggering event.
*
* @exception SQLException if called after the triggering event has
* completed
*/
public java.sql.ResultSet getOldRowSet() throws SQLException {
ensureProperContext();
if (beforeResultSet == null) {
return null;
}
try {
CursorResultSet brs = beforeResultSet;
/* We should really shallow clone the result set, because it could be used
* at multiple places independently in trigger action. This is a bug found
* during the fix of beetle 4373.
*/
if (brs instanceof TemporaryRowHolderResultSet)
brs = (CursorResultSet) ((TemporaryRowHolderResultSet) brs).clone();
else if (brs instanceof TableScanResultSet)
brs = (CursorResultSet) ((TableScanResultSet) brs).clone();
brs.open();
java.sql.ResultSet rs = cc.getResultSet(brs);
resultSetVector.addElement(rs);
return rs;
} catch (StandardException se) {
throw PublicAPI.wrapStandardException(se);
}
}
use of org.apache.derby.shared.common.error.StandardException in project derby by apache.
the class InternalTriggerExecutionContext method getNewRowSet.
/**
* Returns a result set row the new images of the changed rows.
* For a row trigger, the result set will have a single row. For
* a statement trigger, this result set has every row that has
* changed or will change. If a statement trigger does not affect
* a row, then the result set will be empty (i.e. ResultSet.next()
* will return false).
*
* @return the ResultSet containing after images of the rows
* changed by the triggering event.
*
* @exception SQLException if called after the triggering event has
* completed
*/
public java.sql.ResultSet getNewRowSet() throws SQLException {
ensureProperContext();
if (afterResultSet == null) {
return null;
}
try {
/* We should really shallow clone the result set, because it could be used
* at multiple places independently in trigger action. This is a bug found
* during the fix of beetle 4373.
*/
CursorResultSet ars = afterResultSet;
if (ars instanceof TemporaryRowHolderResultSet)
ars = (CursorResultSet) ((TemporaryRowHolderResultSet) ars).clone();
else if (ars instanceof TableScanResultSet)
ars = (CursorResultSet) ((TableScanResultSet) ars).clone();
ars.open();
java.sql.ResultSet rs = cc.getResultSet(ars);
resultSetVector.addElement(rs);
return rs;
} catch (StandardException se) {
throw PublicAPI.wrapStandardException(se);
}
}
use of org.apache.derby.shared.common.error.StandardException in project derby by apache.
the class RowChangerImpl method openForUpdate.
/**
* Open this RowChanger to avoid fixing indexes that do not change
* during update operations.
*
* @param fixOnUpdate fixOnUpdat[ix] == true ==> fix index 'ix' on
* an update operation.
* @param lockMode The lock mode to use
* (row or table, see TransactionController)
* @param wait If true, then the caller wants to wait for locks. False will be
* when we using a nested user xaction - we want to timeout right away
* if the parent holds the lock. (bug 4821)
*
* @exception StandardException thrown on failure to convert
*/
public void openForUpdate(boolean[] fixOnUpdate, int lockMode, boolean wait) throws StandardException {
LanguageConnectionContext lcc = null;
if (SanityManager.DEBUG)
SanityManager.ASSERT(!isOpen, "RowChanger already open");
if (activation != null) {
lcc = activation.getLanguageConnectionContext();
}
/* Isolation level - translate from language to store */
int isolationLevel;
if (lcc == null) {
isolationLevel = TransactionControl.READ_COMMITTED_ISOLATION_LEVEL;
} else {
isolationLevel = lcc.getCurrentIsolationLevel();
}
switch(isolationLevel) {
// Store will overwrite it to READ COMMITTED for update.
case TransactionControl.READ_UNCOMMITTED_ISOLATION_LEVEL:
isolationLevel = TransactionController.ISOLATION_READ_UNCOMMITTED;
break;
case TransactionControl.READ_COMMITTED_ISOLATION_LEVEL:
isolationLevel = TransactionController.ISOLATION_READ_COMMITTED;
break;
case TransactionControl.REPEATABLE_READ_ISOLATION_LEVEL:
isolationLevel = TransactionController.ISOLATION_REPEATABLE_READ;
break;
case TransactionControl.SERIALIZABLE_ISOLATION_LEVEL:
isolationLevel = TransactionController.ISOLATION_SERIALIZABLE;
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Invalid isolation level - " + isolationLevel);
}
}
try {
/* We can get called by either an activation or
* the DataDictionary. The DD cannot use the
* CompiledInfo while the activation can.
*/
if (heapSCOCI != null) {
baseCC = tc.openCompiledConglomerate(false, (TransactionController.OPENMODE_FORUPDATE | ((wait) ? 0 : TransactionController.OPENMODE_LOCK_NOWAIT)), lockMode, isolationLevel, heapSCOCI, heapDCOCI);
} else {
baseCC = tc.openConglomerate(heapConglom, false, (TransactionController.OPENMODE_FORUPDATE | ((wait) ? 0 : TransactionController.OPENMODE_LOCK_NOWAIT)), lockMode, isolationLevel);
}
} catch (StandardException se) {
if (activation != null)
activation.checkStatementValidity();
throw se;
}
/* Save the ConglomerateController off in the activation
* to eliminate the need to open it a 2nd time if we are doing
* and index to base row for the search as part of an update or
* delete below us.
* NOTE: activation can be null. (We don't have it in
* the DataDictionary.)
*/
if (activation != null) {
activation.checkStatementValidity();
activation.setHeapConglomerateController(baseCC);
}
/* Only worry about indexes if there are indexes to worry about */
if (indexCIDS.length != 0) {
/* IndexSetChanger re-used across executions. */
if (isc == null) {
isc = new IndexSetChanger(irgs, indexCIDS, indexSCOCIs, indexDCOCIs, indexNames, baseCC, tc, lockMode, baseRowReadList, isolationLevel, activation);
isc.setRowHolder(rowHolder);
} else {
/* Propagate the heap's ConglomerateController to
* all of the underlying index changers.
*/
isc.setBaseCC(baseCC);
}
isc.open(fixOnUpdate);
if (baseRowLocation == null)
baseRowLocation = baseCC.newRowLocationTemplate();
}
isOpen = true;
}
use of org.apache.derby.shared.common.error.StandardException in project derby by apache.
the class RawStore method failover.
/**
* @see org.apache.derby.iapi.store.raw.RawStoreFactory#failover(String dbname).
*/
public void failover(String dbname) throws StandardException {
MasterFactory masterFactory = null;
if (isReadOnly()) {
throw StandardException.newException(SQLState.LOGMODULE_DOES_NOT_SUPPORT_REPLICATION);
}
try {
masterFactory = (MasterFactory) findServiceModule(this, getMasterFactoryModule());
} catch (StandardException se) {
throw StandardException.newException(SQLState.REPLICATION_NOT_IN_MASTER_MODE);
}
masterFactory.startFailover();
}
use of org.apache.derby.shared.common.error.StandardException in project derby by apache.
the class RawStore method backup.
/*
* Backup the database.
* Online backup copies all the database files (log, seg0 ...Etc) to the
* specified backup location without blocking any user operation for the
* duration of the backup. Stable copy is made of each page using
* page level latches and in some cases with the help of monitors.
* Transaction log is also backed up, this is used to bring the database to
* the consistent state on restore.
*
* <P> MT- only one thread is allowed to perform backup at any given time.
* Synchronized on this. Parallel backups are not supported.
*/
public synchronized void backup(Transaction t, File backupDir) throws StandardException {
if (!privExists(backupDir)) {
// if backup dir does not exist, go ahead and create it.
createBackupDirectory(backupDir);
} else {
if (!privIsDirectory(backupDir)) {
throw StandardException.newException(SQLState.RAWSTORE_CANNOT_BACKUP_TO_NONDIRECTORY, (File) backupDir);
}
if (privExists(new File(backupDir, PersistentService.PROPERTIES_NAME))) {
throw StandardException.newException(SQLState.RAWSTORE_CANNOT_BACKUP_INTO_DATABASE_DIRECTORY, (File) backupDir);
}
}
boolean error = true;
boolean renamed = false;
boolean renameFailed = false;
File oldbackup = null;
File backupcopy = null;
OutputStreamWriter historyFile = null;
StorageFile dbHistoryFile = null;
File backupHistoryFile = null;
LogInstant backupInstant = logFactory.getFirstUnflushedInstant();
try {
// get name of the current db, ie. database directory of current db.
StorageFile dbase = storageFactory.newStorageFile(null);
String canonicalDbName = storageFactory.getCanonicalName();
String dbname = StringUtil.shortDBName(canonicalDbName, storageFactory.getSeparator());
// append to end of history file
historyFile = privFileWriter(storageFactory.newStorageFile(BACKUP_HISTORY), true);
backupcopy = new File(backupDir, dbname);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_STARTED, canonicalDbName, getFilePath(backupcopy)));
// check if a backup copy of this database already exists,
if (privExists(backupcopy)) {
// first make a backup of the backup
oldbackup = new File(backupDir, dbname + ".OLD");
if (privExists(oldbackup)) {
if (privIsDirectory(oldbackup))
privRemoveDirectory(oldbackup);
else
privDelete(oldbackup);
}
if (!privRenameTo(backupcopy, oldbackup)) {
renameFailed = true;
throw StandardException.newException(SQLState.RAWSTORE_ERROR_RENAMING_FILE, backupcopy, oldbackup);
} else {
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_MOVED_BACKUP, getFilePath(backupcopy), getFilePath(oldbackup)));
renamed = true;
}
}
// create the backup database directory
createBackupDirectory(backupcopy);
dbHistoryFile = storageFactory.newStorageFile(BACKUP_HISTORY);
backupHistoryFile = new File(backupcopy, BACKUP_HISTORY);
// copy the history file into the backup.
if (!privCopyFile(dbHistoryFile, backupHistoryFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, dbHistoryFile, backupHistoryFile);
// if they are any jar file stored in the database, copy them into
// the backup.
StorageFile jarDir = storageFactory.newStorageFile(FileResource.JAR_DIRECTORY_NAME);
if (privExists(jarDir)) {
// find the list of schema directories under the jar dir and
// then copy only the plain files under those directories. One
// could just use the recursive copy of directory to copy all
// the files under the jar dir, but the problem with that is if
// a user gives jar directory as the backup path by mistake,
// copy will fail while copying the backup dir onto itself in
// recursion
String[] jarDirContents = privList(jarDir);
File backupJarDir = new File(backupcopy, FileResource.JAR_DIRECTORY_NAME);
// Create the backup jar directory
createBackupDirectory(backupJarDir);
LanguageConnectionContext lcc = (LanguageConnectionContext) getContextOrNull(LanguageConnectionContext.CONTEXT_ID);
// DERBY-5357 UUIDs introduced in jar file names in 10.9
boolean uuidSupported = lcc.getDataDictionary().checkVersion(DataDictionary.DD_VERSION_DERBY_10_9, null);
if (uuidSupported) {
// no subdirectories
for (int i = 0; i < jarDirContents.length; i++) {
StorageFile jar = storageFactory.newStorageFile(jarDir, jarDirContents[i]);
File backupJar = new File(backupJarDir, jarDirContents[i]);
if (privIsDirectory(new File(jar.getPath()))) {
// We no longer expect directories inside
continue;
// 'jar'. Need check to make the weird
// test #2 in BackupPathTests.java work:
// it does a backup of the db into its
// own(!) jar file directory, so trying
// to copy that db file into itself two
// levels down would fail.
}
if (!privCopyFile(jar, backupJar)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, jar, backupJar);
}
}
} else {
for (int i = 0; i < jarDirContents.length; i++) {
StorageFile jarSchemaDir = storageFactory.newStorageFile(jarDir, jarDirContents[i]);
File backupJarSchemaDir = new File(backupJarDir, jarDirContents[i]);
if (!privCopyDirectory(jarSchemaDir, backupJarSchemaDir, (byte[]) null, null, false)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, jarSchemaDir, backupJarSchemaDir);
}
}
}
}
// save service properties into the backup, Read in property
// from service.properties file, remove logDevice from it,
// then write it to the backup.
StorageFile logdir = logFactory.getLogDirectory();
try {
String name = getServiceName(this);
PersistentService ps = getMonitor().getServiceType(this);
String fullName = ps.getCanonicalServiceName(name);
Properties prop = ps.getServiceProperties(fullName, (Properties) null);
StorageFile defaultLogDir = storageFactory.newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
if (!logdir.equals(defaultLogDir)) {
prop.remove(Attribute.LOG_DEVICE);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(prop.getProperty(Attribute.LOG_DEVICE) == null, "cannot get rid of logDevice property");
}
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_EDITED_SERVICEPROPS));
}
// save the service properties into the backup.
ps.saveServiceProperties(backupcopy.getPath(), prop);
} catch (StandardException se) {
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_ERROR_EDIT_SERVICEPROPS) + se);
// skip the rest and let finally block clean up
return;
}
// Incase of encrypted database and the key is an external
// encryption key, there is an extra file with name
// Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE, this file should be
// copied in to the backup.
StorageFile verifyKeyFile = storageFactory.newStorageFile(Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
if (privExists(verifyKeyFile)) {
File backupVerifyKeyFile = new File(backupcopy, Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
if (!privCopyFile(verifyKeyFile, backupVerifyKeyFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, verifyKeyFile, backupVerifyKeyFile);
}
File logBackup = new File(backupcopy, LogFactory.LOG_DIRECTORY_NAME);
// this is wierd, delete it
if (privExists(logBackup)) {
privRemoveDirectory(logBackup);
}
// Create the log directory
createBackupDirectory(logBackup);
// do a checkpoint to get the persistent store up to date.
logFactory.checkpoint(this, dataFactory, xactFactory, true);
// start the transaction log backup.
logFactory.startLogBackup(logBackup);
File segBackup = new File(backupcopy, "seg0");
// Create the data segment directory
createBackupDirectory(segBackup);
// backup all the information in the data segment.
dataFactory.backupDataFiles(t, segBackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_DATA_SEG_BACKUP_COMPLETED, getFilePath(segBackup)));
// copy the log that got generated after the backup started to
// backup location and tell the logfactory that backup has come
// to end.
logFactory.endLogBackup(logBackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_COPIED_LOG, getFilePath(logdir), getFilePath(logBackup)));
error = false;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.RAWSTORE_UNEXPECTED_EXCEPTION, ioe);
} finally {
try {
if (error) {
// Abort all activity related to backup in the log factory.
logFactory.abortLogBackup();
// not an half backed one.
if (!renameFailed)
privRemoveDirectory(backupcopy);
if (renamed)
// recover the old backup
privRenameTo(oldbackup, backupcopy);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_ABORTED));
} else {
// success, remove the old backup copy
if (renamed && privExists(oldbackup)) {
// get rid of the old backup
privRemoveDirectory(oldbackup);
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_REMOVED_BACKUP, getFilePath(oldbackup)));
}
logHistory(historyFile, MessageService.getTextMessage(MessageId.STORE_BACKUP_COMPLETED, backupInstant));
// backup information into the backup.
if (!privCopyFile(dbHistoryFile, backupHistoryFile))
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, dbHistoryFile, backupHistoryFile);
}
historyFile.close();
} catch (IOException ioe) {
try {
historyFile.close();
} catch (IOException ioe2) {
}
;
throw StandardException.newException(SQLState.RAWSTORE_UNEXPECTED_EXCEPTION, ioe);
}
}
}
Aggregations