use of org.apache.derby.io.StorageFile in project derby by apache.
the class BaseDataFileFactory method run.
// PrivilegedExceptionAction method
public final Object run() throws IOException, StandardException {
switch(actionCode) {
case BOOT_ACTION:
readOnly = storageFactory.isReadOnlyDatabase();
supportsRandomAccess = storageFactory.supportsRandomAccess();
return null;
case REMOVE_TEMP_DIRECTORY_ACTION:
StorageFile tempDir = storageFactory.getTempDir();
if (tempDir != null)
tempDir.deleteAll();
return null;
case GET_CONTAINER_PATH_ACTION:
case GET_ALTERNATE_CONTAINER_PATH_ACTION:
{
StringBuffer sb = new StringBuffer("seg");
sb.append(containerId.getSegmentId());
sb.append(storageFactory.getSeparator());
if (actionCode == GET_CONTAINER_PATH_ACTION) {
sb.append(stub ? 'd' : 'c');
sb.append(Long.toHexString(containerId.getContainerId()));
sb.append(".dat");
} else {
sb.append(stub ? 'D' : 'C');
sb.append(Long.toHexString(containerId.getContainerId()));
sb.append(".DAT");
}
return storageFactory.newStorageFile(sb.toString());
}
case REMOVE_STUBS_ACTION:
{
char separator = storageFactory.getSeparator();
StorageFile root = storageFactory.newStorageFile(null);
// get all the non-temporary data segment, they start with "seg"
String[] segs = root.list();
for (int s = segs.length - 1; s >= 0; s--) {
if (segs[s].startsWith("seg")) {
StorageFile seg = storageFactory.newStorageFile(root, segs[s]);
if (seg.exists() && seg.isDirectory()) {
String[] files = seg.list();
for (int f = files.length - 1; f >= 0; f--) {
// stub
if (files[f].startsWith("D") || files[f].startsWith("d")) {
StorageFile stub = storageFactory.newStorageFile(root, segs[s] + separator + files[f]);
boolean delete_status = stub.delete();
if (SanityManager.DEBUG) {
// checked for existence.
if (!delete_status) {
SanityManager.THROWASSERT("delete of stub (" + stub + ") failed.");
}
}
}
}
}
}
}
break;
}
case FIND_MAX_CONTAINER_ID_ACTION:
{
long maxnum = 1;
StorageFile seg = storageFactory.newStorageFile("seg0");
if (seg.exists() && seg.isDirectory()) {
// create an array with names of all files in seg0
String[] files = seg.list();
// loop through array looking for maximum containerid.
for (int f = files.length - 1; f >= 0; f--) {
try {
long fileNumber = Long.parseLong(files[f].substring(1, (files[f].length() - 4)), 16);
if (fileNumber > maxnum)
maxnum = fileNumber;
} catch (Throwable t) {
// ignore errors from parse, it just means that someone
// put a file in seg0 that we didn't expect. Continue
// with the next one.
}
}
}
return maxnum;
}
case DELETE_IF_EXISTS_ACTION:
{
boolean ret = actionFile.exists() && actionFile.delete();
actionFile = null;
return ret ? this : null;
}
case GET_PATH_ACTION:
{
String path = actionFile.getPath();
actionFile = null;
return path;
}
case POST_RECOVERY_REMOVE_ACTION:
{
for (Enumeration<StorageFile> e = postRecoveryRemovedFiles.elements(); e.hasMoreElements(); ) {
StorageFile f = e.nextElement();
if (f.exists()) {
boolean delete_status = f.delete();
if (SanityManager.DEBUG) {
// checked for existence.
if (!delete_status) {
SanityManager.THROWASSERT("delete of stub (" + stub + ") failed.");
}
}
}
}
return null;
}
case GET_LOCK_ON_DB_ACTION:
privGetJBMSLockOnDB();
return null;
case RELEASE_LOCK_ON_DB_ACTION:
privReleaseJBMSLockOnDB();
return null;
case RESTORE_DATA_DIRECTORY_ACTION:
privRestoreDataDirectory();
return null;
case GET_CONTAINER_NAMES_ACTION:
{
StorageFile seg = storageFactory.newStorageFile("seg0");
if (seg.exists() && seg.isDirectory()) {
// return the names of all files in seg0
return seg.list();
}
return null;
}
}
return null;
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class BaseDataFileFactory method luceneLoaded.
/**
* Return true if the Lucene plugin is loaded
*/
public boolean luceneLoaded() throws StandardException {
try {
return AccessController.doPrivileged(new PrivilegedExceptionAction<Boolean>() {
public Boolean run() {
StorageFactory storageFactory = getStorageFactory();
StorageFile luceneDir = storageFactory.newStorageFile(Database.LUCENE_DIR);
return luceneDir.exists();
}
}).booleanValue();
} catch (PrivilegedActionException pae) {
throw StandardException.plainWrapException(pae);
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RAFContainer method encryptOrDecryptContainer.
/**
* Creates encrypted or decrypted version of the container.
*
* Reads all the pages of the container from the original container
* through the page cache, then either encrypts page data with the new
* encryption mechanism or leaves the page data un-encrypted, and finally
* writes the data to the specified new container file.
* <p>
* The encryption and decryption engines used to carry out the
* cryptographic operation(s) are configured through the raw store, and
* accessed via the data factory. Note that the pages have already been
* decrypted before being put into the page cache.
*
* @param handle the container handle
* @param newFilePath file to store the new version of the container in
* @param doEncrypt tells whether to encrypt or not
* @exception StandardException Derby Standard error policy
*/
protected void encryptOrDecryptContainer(BaseContainerHandle handle, String newFilePath, boolean doEncrypt) throws StandardException {
BasePage page = null;
StorageFile newFile = dataFactory.getStorageFactory().newStorageFile(newFilePath);
StorageRandomAccessFile newRaf = null;
try {
long lastPageNumber = getLastPageNumber(handle);
newRaf = getRandomAccessFile(newFile);
byte[] encryptionBuf = null;
if (doEncrypt) {
encryptionBuf = new byte[pageSize];
}
// container file after processing the pages.
for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) {
page = getLatchedPage(handle, pageNumber);
// Update the page array before writing to the disk.
// An update consists of adding the container header, or
// (re-)encrypting the data.
byte[] dataToWrite = updatePageArray(pageNumber, page.getPageArray(), encryptionBuf, true);
newRaf.write(dataToWrite, 0, pageSize);
// unlatch releases page from cache.
page.unlatch();
page = null;
}
// sync the new version of the container.
newRaf.sync();
newRaf.close();
newRaf = null;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", doEncrypt ? "encrypt" : "decrypt", newFilePath);
} finally {
if (page != null) {
page.unlatch();
page = null;
}
if (newRaf != null) {
try {
newRaf.close();
} catch (IOException ioe) {
newRaf = null;
throw StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", doEncrypt ? "encrypt-close" : "decrypt-close", newFilePath);
}
}
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RAFContainer method run.
// PrivilegedExceptionAction method
public Object run() throws StandardException {
switch(actionCode) {
case GET_FILE_NAME_ACTION:
return privGetFileName(actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath);
case CREATE_CONTAINER_ACTION:
{
StorageFile file = privGetFileName(actionIdentity, false, false, false);
try {
if (file.exists()) {
// hasn't been called.
throw StandardException.newException(SQLState.FILE_EXISTS, file);
}
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CREATE, se, file);
}
try {
// OK not to force WAL here, in fact, this operation
// preceeds the creation of the log record to ensure
// sufficient space.
dataFactory.writeInProgress();
try {
fileData = file.getRandomAccessFile("rw");
file.limitAccessToOwner();
} finally {
dataFactory.writeFinished();
}
// This container format specifies that the first page is
// an allocation page and the container information is
// stored within it. The allocation page needs to be
// somewhat formatted because if the system crashed after
// the create container log operation is written, it needs
// to be well formed enough to get the container
// information back out of it.
//
// Don't try to go thru the page cache here because the
// container object cannot be found in the container cache
// at this point yet. However, if we use the page cache
// to store the first allocation page, then in order to
// write itself out, it needs to ask the container to do
// so, which is going to create a deadlock. The
// allocation page cannot write itself out without going
// thru the container because it doesn't know where its
// offset is. Here we effectively hardwire page 0 at
// offset 0 of the container file to be the first
// allocation page.
// create an embryonic page - if this is not a temporary
// container, synchronously write out the file header.
// Need to set it now. After writeRAFHeader
canUpdate = true;
// may be too late in case that method's IO
// is interrupted and container needs
// reopening. To get the correct "rw" mode
// we need canUpdate to be true.
writeRAFHeader(actionIdentity, fileData, true, (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT));
} catch (IOException ioe) {
canUpdate = false;
boolean fileDeleted;
try {
fileDeleted = privRemoveFile(file);
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString());
}
if (!fileDeleted) {
throw StandardException.newException(SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString());
}
throw StandardException.newException(SQLState.FILE_CREATE, ioe, file);
}
return null;
}
case REMOVE_FILE_ACTION:
return privRemoveFile(actionFile) ? this : null;
case OPEN_CONTAINER_ACTION:
{
// is this a stub?
boolean isStub = false;
StorageFile file = privGetFileName(actionIdentity, false, true, true);
if (file == null)
return null;
try {
if (!file.exists()) {
// file does not exist, may be it has been stubbified
file = privGetFileName(actionIdentity, true, true, true);
if (!file.exists())
return null;
isStub = true;
}
} catch (SecurityException se) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, se);
}
canUpdate = false;
try {
if (!dataFactory.isReadOnly() && file.canWrite())
canUpdate = true;
} catch (SecurityException se) {
// just means we can't write to it.
}
fileName = file.toString();
try {
fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET));
if (SanityManager.DEBUG) {
if (isStub)
SanityManager.ASSERT(getDroppedState() && getCommittedDropState(), "a stub failed to set drop state");
}
} catch (IOException ioe) {
if (isStub) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName));
}
// maybe it is being stubbified... try that
StorageFile stub = privGetFileName(actionIdentity, true, true, true);
if (stub.exists()) {
try {
boolean delete_status = privRemoveFile(file);
if (SanityManager.DEBUG) {
if (!delete_status) {
SanityManager.THROWASSERT("delete of file (" + file + ") failed.");
}
}
fileData = stub.getRandomAccessFile(canUpdate ? "rw" : "r");
readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET));
} catch (IOException ioe2) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe2, getIdentity() != null ? getIdentity().toString() : "unknown", "delete-stub", fileName));
}
// RESOLVE: this is a temporary hack
} else
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName));
}
return this;
}
// end of case OPEN_CONTAINER_ACTION
case REOPEN_CONTAINER_ACTION:
{
StorageFile file = privGetFileName(actionIdentity, false, true, true);
synchronized (this) {
try {
fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
} catch (FileNotFoundException ioe) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, (getIdentity() != null ? getIdentity().toString() : "unknown"), "read", fileName));
}
}
return this;
}
case STUBBIFY_ACTION:
{
StorageFile file = privGetFileName(actionIdentity, false, false, true);
StorageFile stub = privGetFileName(actionIdentity, true, false, false);
StorageRandomAccessFile stubData = null;
try {
if (!stub.exists()) {
// write the header to the stub
stubData = stub.getRandomAccessFile("rw");
stub.limitAccessToOwner();
writeRAFHeader(actionIdentity, stubData, true, /* create */
true);
/* sync */
stubData.close();
stubData = null;
}
// Force WAL and check for database corruption before removing file.
// This is one operation where the container is changed on disk
// directly without going thru the container cache, which otherwise
// would have force WAL. Take care of it here.
dataFactory.flush(actionInstant);
// try to remove the container file
// fileDate is not null only if we are redoing a removeContainer
// (stubbify) operation. Then fileData acutally is opened against
// the stub and the original container file does not exist.
// Then we need to close it here because this method is called by
// cache.remove and nobody will be able to see fileData after this.
privRemoveFile(file);
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString());
} catch (IOException ioe) {
// (half-baked) stub
try {
if (stubData != null) {
stubData.close();
stub.delete();
stubData = null;
}
if (fileData != null) {
fileData.close();
fileData = null;
}
} catch (IOException ioe2) {
throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, ioe2, file, ioe.toString());
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString());
}
}
// let the data factory know about this the stub file;It
// could remove when next checkpoint occurs if it's not necessary for recovery
dataFactory.stubFileToRemoveAfterCheckPoint(stub, actionInstant, getIdentity());
return null;
}
case GET_RANDOM_ACCESS_FILE_ACTION:
{
try {
boolean exists = actionFile.exists();
Object result = actionFile.getRandomAccessFile("rw");
if (!exists) {
actionFile.limitAccessToOwner();
}
return result;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.FILE_CREATE, ioe, actionFile.getPath());
}
}
}
// end of switch
return null;
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class StreamFileContainer method getFileName.
/**
* Return a file name for the identity.
* <p>
* Return a valid file name for the identity, or null if the data
* directory for this segment cannot be created
*
* @exception StandardException Segment directory cannot be created
*/
protected StorageFile getFileName(ContainerKey identity, boolean forCreate, boolean errorOK) throws StandardException {
if (identity.getSegmentId() == StreamContainerHandle.TEMPORARY_SEGMENT) {
return (dataFactory.storageFactory.newStorageFile(dataFactory.storageFactory.getTempDir(), "T" + identity.getContainerId() + ".tmp"));
} else {
if (SanityManager.DEBUG)
SanityManager.THROWASSERT("cannot create stream container in non-temp segments yet.");
StorageFile container = dataFactory.getContainerPath(identity, false);
if (!privExists(container)) {
if (!forCreate)
return null;
StorageFile directory = container.getParentDir();
if (!privExists(directory)) {
// make sure only 1 thread can create a segment at one time
synchronized (dataFactory) {
if (!privExists(directory)) {
boolean created = false;
IOException ex = null;
try {
created = privMkdirs(directory);
} catch (IOException ioe) {
ex = ioe;
}
if (!created) {
if (errorOK)
return null;
else
throw StandardException.newException(SQLState.FILE_CANNOT_CREATE_SEGMENT, ex, directory);
}
}
}
}
}
return container;
}
}
Aggregations