use of org.apache.derby.io.StorageFile in project derby by apache.
the class LogToFile method backupLogFiles.
/*
* copy the log files into the given backup location
*
* @param toDir - location to copy the log files to
* @param lastLogFileToBackup - last log file that needs to be copied.
**/
private void backupLogFiles(File toDir, long lastLogFileToBackup) throws StandardException {
while (logFileToBackup <= lastLogFileToBackup) {
StorageFile fromFile = getLogFileName(logFileToBackup);
File toFile = new File(toDir, fromFile.getName());
if (!privCopyFile(fromFile, toFile)) {
throw StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);
}
logFileToBackup++;
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class LogToFile method createLogDirectory.
/**
* Create the directory where transaction log should go.
* @exception StandardException Standard Error Policy
*/
private void createLogDirectory() throws StandardException {
StorageFile logDir = logStorageFactory.newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
if (privExists(logDir)) {
// make sure log directory is empty.
String[] logfiles = privList(logDir);
if (logfiles != null) {
if (logfiles.length != 0) {
throw StandardException.newException(SQLState.LOG_SEGMENT_EXIST, logDir.getPath());
}
}
} else {
// create the log directory.
IOException ex = null;
boolean created = false;
try {
created = privMkdirs(logDir);
} catch (IOException ioe) {
ex = ioe;
}
if (!created) {
throw StandardException.newException(SQLState.LOG_SEGMENT_NOT_EXIST, ex, logDir.getPath());
}
createDataWarningFile();
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RAFContainer method backupContainer.
/**
* Backup the container.
*
* The container is written to the backup by reading the pages
* through the page cache, and then writing into the backup container.
* If the container is dropped(commitetd drop), only container stub is
* copied to the backup using simple file copy.
*
* MT -
* At any given time only one backup thread is allowed, but when backup in
* progress DML/DDL operations can run in parallel. Pages are latched while
* writing them to the backup to avoid copying partial changes to the pages.
* Online backup does not acquire any user level locks , so users can drop
* tables when backup is in progress. So it is possible that Container
* Removal request can come in when container backup is in progress.
* This case is handled by using the synchronization on this object monitor
* and using inRemove and inBackup flags. Conatiner removal checks if backup
* is in progress and wait for the backup to yield to continue the removal.
* Basic idea is to give preference to remove by stopping the backup of the
* container temporarily, when the remove container is requested by another
* thread. Generally, it takes more time to backup a regular container than
* the stub becuase stub is just one page. After each page copy, a check is
* made to find if a remove is requested and if it is then backup of the
* container is aborted and the backup thread puts itself into the wait state until
* remove request thread notifies that the remove is complete. When
* remove request compeletes stub is copied into the backup.
*
* Compress is blocked when backup is in progesss, so truncation of the
* container can not happen when backup is in progess. No need to
* synchronize backup of the container with truncation.
*
* @param handle the container handle.
* @param backupLocation location of the backup container.
* @exception StandardException Derby Standard error policy
*/
protected void backupContainer(BaseContainerHandle handle, String backupLocation) throws StandardException {
boolean backupCompleted = false;
File backupFile = null;
RandomAccessFile backupRaf = null;
boolean isStub = false;
BasePage page = null;
while (!backupCompleted) {
try {
synchronized (this) {
// container because of a drop.
while (inRemove) {
try {
wait();
} catch (InterruptedException ie) {
InterruptStatus.setInterrupted();
}
}
if (getCommittedDropState())
isStub = true;
inBackup = true;
}
// create container at the backup location.
if (isStub) {
// get the stub ( it is a committted drop table container )
StorageFile file = getFileName((ContainerKey) getIdentity(), true, false, true);
backupFile = new File(backupLocation, file.getName());
// directly copy the stub to the backup
copyFile(file, backupFile);
} else {
// regular container file
long lastPageNumber = getLastPageNumber(handle);
if (lastPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
// first page is allocated.
return;
}
StorageFile file = getFileName((ContainerKey) getIdentity(), false, false, true);
backupFile = new File(backupLocation, file.getName());
backupRaf = getRandomAccessFile(backupFile);
byte[] encryptionBuf = null;
if (dataFactory.databaseEncrypted()) {
// Backup uses seperate encryption buffer to encrypt the
// page instead of encryption buffer used by the regular
// conatiner writes. Otherwise writes to the backup
// has to be synchronized with regualar database writes
// because backup can run in parallel to container
// writes.
encryptionBuf = new byte[pageSize];
}
// to the backup location by reading through the page cache.
for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) {
page = getLatchedPage(handle, pageNumber);
// update the page array before writing to the disk
// with container header and encrypt it if the database
// is encrypted.
byte[] dataToWrite = updatePageArray(pageNumber, page.getPageArray(), encryptionBuf, false);
backupRaf.write(dataToWrite, 0, pageSize);
// unlatch releases page from cache, see
// StoredPage.releaseExclusive()
page.unlatch();
page = null;
synchronized (this) {
if (inRemove) {
break;
}
}
}
}
// it is already synced and closed while doing the copy.
if (!isStub) {
backupRaf.getFD().sync();
backupRaf.close();
backupRaf = null;
}
// backup of the conatiner is complete.
backupCompleted = true;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.BACKUP_FILE_IO_ERROR, ioe, backupFile);
} finally {
synchronized (this) {
inBackup = false;
notifyAll();
}
if (page != null) {
page.unlatch();
page = null;
}
// if it exists
if (!backupCompleted && backupFile != null) {
if (backupRaf != null) {
try {
backupRaf.close();
backupRaf = null;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.BACKUP_FILE_IO_ERROR, ioe, backupFile);
}
}
removeFile(backupFile);
}
}
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RemoveFile method add.
/**
* @see FileResource#add
* @exception StandardException Oops
*/
public long add(String name, InputStream source) throws StandardException {
OutputStream os = null;
if (factory.isReadOnly()) {
throw StandardException.newException(SQLState.FILE_READ_ONLY);
}
long generationId = factory.getNextId();
try {
StorageFile file = getAsFile(name, generationId);
if (file.exists()) {
throw StandardException.newException(SQLState.FILE_EXISTS, file);
}
ContextManager cm = FileContainer.getContextService().getCurrentContextManager();
RawTransaction tran = factory.getRawStoreFactory().getXactFactory().findUserTransaction(factory.getRawStoreFactory(), cm, AccessFactoryGlobals.USER_TRANS_NAME);
// Block the backup, If backup is already in progress wait
// for the backup to finish. Jar files are unlogged but the
// changes to the references to the jar file in the catalogs
// is logged. A consistent backup can not be made when jar file
// is being added.
tran.blockBackup(true);
StorageFile directory = file.getParentDir();
StorageFile parentDir = directory.getParentDir();
boolean pdExisted = parentDir.exists();
if (!directory.exists()) {
if (!directory.mkdirs()) {
throw StandardException.newException(SQLState.FILE_CANNOT_CREATE_SEGMENT, directory);
}
directory.limitAccessToOwner();
if (!pdExisted) {
parentDir.limitAccessToOwner();
}
}
os = file.getOutputStream();
byte[] data = new byte[4096];
int len;
factory.writeInProgress();
try {
while ((len = source.read(data)) != -1) {
os.write(data, 0, len);
}
factory.writableStorageFactory.sync(os, false);
} finally {
factory.writeFinished();
}
} catch (IOException ioe) {
throw StandardException.newException(SQLState.FILE_UNEXPECTED_EXCEPTION, ioe);
} finally {
try {
if (os != null) {
os.close();
}
} catch (IOException ioe2) {
/*RESOLVE: Why ignore this?*/
}
try {
if (source != null)
source.close();
} catch (IOException ioe2) {
/* RESOLVE: Why ignore this?*/
}
}
return generationId;
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RemoveFile method removeJarDir.
/**
* @see FileResource#removeJarDir
*/
public void removeJarDir(String f) throws StandardException {
if (factory.isReadOnly())
throw StandardException.newException(SQLState.FILE_READ_ONLY);
ContextManager cm = FileContainer.getContextService().getCurrentContextManager();
RawTransaction tran = factory.getRawStoreFactory().getXactFactory().findUserTransaction(factory.getRawStoreFactory(), cm, AccessFactoryGlobals.USER_TRANS_NAME);
StorageFile ff = factory.storageFactory.newStorageFile(f);
Serviceable s = new RemoveFile(ff);
// Since this code is only used during upgrade to post-10.8 databases
// we do no bother to build code for a special RemoveDirOperation and
// do tran.logAndDo (cf. logic in #remove). If the post-commit removal
// doesn't get completed, that is no big issue, the dirs can be removed
// by hand if need be. A prudent DBA will rerun the upgrade from a
// backup if something crashes anyway..
tran.addPostCommitWork(s);
}
Aggregations