use of org.apache.derby.io.StorageFile in project derby by apache.
the class LuceneSupport method createOrRecreateIndex.
/**
* Create or re-create a Lucene index on the specified column.
*
* @param schema The schema of the column to index
* @param table The table of the column to index
* @param textcol The column to create the Lucene index on
* @param indexDescriptorMaker name of static method which instantiates the index configuration. may be null.
* @param create True if the index is to be created, false if it is to be recreated
* @throws SQLException
* @throws IOException
*/
private static void createOrRecreateIndex(Connection conn, String schema, String table, String textcol, String indexDescriptorMaker, boolean create, String... keyColumns) throws SQLException, IOException, PrivilegedActionException {
VTITemplate.ColumnDescriptor[] primaryKeys = new VTITemplate.ColumnDescriptor[0];
// can't override keys when the index is updated
if (!create) {
primaryKeys = getKeys(conn, schema, table, textcol);
} else // use the supplied keys if possible
if ((keyColumns != null) && (keyColumns.length > 0)) {
primaryKeys = getKeys(conn, schema, table, keyColumns);
} else {
primaryKeys = getPrimaryKeys(conn, schema, table);
}
// can't create an index without specifying keys for joining it back to Derby data
if (primaryKeys.length == 0) {
throw ToolUtilities.newSQLException(SQLState.LUCENE_NO_PRIMARY_KEY);
}
// don't let the user create a table function with duplicate column names
vetColumnName(ToolUtilities.derbyIdentifier(textcol));
for (VTITemplate.ColumnDescriptor key : primaryKeys) {
vetColumnName(key.columnName);
}
int keyCount = 0;
StorageFile propertiesFile = getIndexPropertiesFile(conn, schema, table, textcol);
//
if (!create) {
dropIndexDirectories(schema, table, textcol);
}
Version luceneVersion = LuceneUtils.currentVersion();
// create the new directory
DerbyLuceneDir derbyLuceneDir = getDerbyLuceneDir(conn, schema, table, textcol);
// get the Analyzer and the field names. use the default if the user didn't specify an override
if (indexDescriptorMaker == null) {
indexDescriptorMaker = LuceneUtils.class.getName() + ".defaultIndexDescriptor";
}
LuceneIndexDescriptor indexDescriptor = getIndexDescriptor(indexDescriptorMaker);
String[] fieldNames = indexDescriptor.getFieldNames();
Analyzer analyzer = indexDescriptor.getAnalyzer();
// make sure the field names don't overlap with the key names
sortAndVetFieldNames(fieldNames, primaryKeys);
Properties indexProperties = new Properties();
indexProperties.setProperty(LUCENE_VERSION, luceneVersion.toString());
indexProperties.setProperty(UPDATE_TIMESTAMP, Long.toString(System.currentTimeMillis()));
indexProperties.setProperty(INDEX_DESCRIPTOR_MAKER, indexDescriptorMaker);
indexProperties.setProperty(ANALYZER, analyzer.getClass().getName());
StringBuilder tableFunction = new StringBuilder();
tableFunction.append("create function " + makeTableFunctionName(schema, table, textcol) + "\n");
tableFunction.append("( query varchar( 32672 ), windowSize int, scoreCeiling real )\n");
tableFunction.append("returns table\n(");
writeIndexProperties(propertiesFile, indexProperties);
PreparedStatement ps = null;
ResultSet rs = null;
IndexWriter iw = null;
try {
iw = getIndexWriter(luceneVersion, analyzer, derbyLuceneDir);
// select all keys and the textcol from this column, add to lucene index
StringBuilder query = new StringBuilder("select ");
for (VTITemplate.ColumnDescriptor keyDesc : primaryKeys) {
String keyName = delimitID(keyDesc.columnName);
if (keyCount > 0) {
query.append(", ");
}
query.append(keyName);
String keyType = mapType(keyDesc);
if (keyCount > 0) {
tableFunction.append(",");
}
tableFunction.append("\n\t" + keyName + " " + keyType);
keyCount++;
}
tableFunction.append(",\n\t" + DOCUMENT_ID + " int");
tableFunction.append(",\n\t" + SCORE + " real");
tableFunction.append("\n)\nlanguage java parameter style derby_jdbc_result_set contains sql\n");
tableFunction.append("external name '" + LuceneSupport.class.getName() + ".luceneQuery'");
// now create the table function for this text column
if (create) {
conn.prepareStatement(tableFunction.toString()).execute();
}
query.append(", ");
query.append(delimitID(ToolUtilities.derbyIdentifier(textcol)));
query.append(" from " + makeTableName(schema, table));
ps = conn.prepareStatement(query.toString());
rs = ps.executeQuery();
while (rs.next()) {
Document doc = new Document();
for (int i = 0; i < keyCount; i++) {
VTITemplate.ColumnDescriptor keyDescriptor = primaryKeys[i];
addValue(doc, keyDescriptor, rs, i + 1);
}
String textcolValue = rs.getString(keyCount + 1);
if (textcolValue != null) {
for (String fieldName : fieldNames) {
doc.add(new TextField(fieldName, textcolValue, Store.NO));
}
}
addDocument(iw, doc);
}
} finally {
try {
if (iw != null) {
close(iw);
}
} finally {
try {
if (rs != null) {
rs.close();
}
} finally {
if (ps != null) {
ps.close();
}
}
}
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class LuceneSupport method unloadTool.
/**
* Removes the functions and procedures loaded by loadTool and created by createIndex.
* Drop the LuceneSupport schema. Drop the lucene subdirectory.
*/
public void unloadTool(String... configurationParameters) throws SQLException {
forbidReadOnlyConnections();
Connection conn = getDefaultConnection();
ToolUtilities.mustBeDBO(conn);
if (!luceneSchemaExists(conn)) {
throw ToolUtilities.newSQLException(SQLState.LUCENE_ALREADY_UNLOADED);
}
//
// Drop all of the functions and procedures bound to methods in this package.
//
String className = getClass().getName();
int endPackageIdx = className.lastIndexOf(".");
String packageName = className.substring(0, endPackageIdx);
PreparedStatement ps = conn.prepareStatement("select s.schemaName, a.alias, a.aliastype\n" + "from sys.sysschemas s, sys.sysaliases a\n" + "where s.schemaID = a.schemaID\n" + "and substr( cast( a.javaclassname as varchar( 32672 ) ), 1, ? ) = ?\n");
ps.setInt(1, packageName.length());
ps.setString(2, packageName);
ResultSet routines = ps.executeQuery();
try {
while (routines.next()) {
String schema = routines.getString(1);
String routineName = routines.getString(2);
String routineType = ("P".equals(routines.getString(3))) ? "procedure" : "function";
conn.prepareStatement("drop " + routineType + " " + makeTableName(schema, routineName)).execute();
}
} finally {
routines.close();
}
//
// Drop the LuceneSupport schema.
//
conn.prepareStatement("drop schema " + LUCENE_SCHEMA + " restrict").execute();
//
// Now delete the Lucene subdirectory;
//
StorageFactory storageFactory = getStorageFactory(conn);
StorageFile luceneDir = storageFactory.newStorageFile(Database.LUCENE_DIR);
if (exists(luceneDir)) {
deleteFile(luceneDir);
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class BasicDatabase method backupLucene.
/**
* <p>
* Backup Lucene indexes to the backup directory. This assumes
* that the rest of the database has been backup up and sanity
* checks have been run.
* </p>
*/
private void backupLucene(String backupDir) throws StandardException {
try {
File backupRoot = new File(backupDir);
StorageFactory storageFactory = getStorageFactory();
String canonicalDbName = storageFactory.getCanonicalName();
String dbname = StringUtil.shortDBName(canonicalDbName, storageFactory.getSeparator());
File backupDB = new File(backupRoot, dbname);
final File targetDir = new File(backupDB, Database.LUCENE_DIR);
final StorageFile sourceDir = getLuceneDir();
AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
public Boolean run() throws StandardException {
if (!FileUtil.copyDirectory(getStorageFactory(), sourceDir, targetDir, null, null, true)) {
throw StandardException.newException(SQLState.UNABLE_TO_COPY_FILE_FROM_BACKUP, sourceDir.getPath(), targetDir.getAbsolutePath());
}
return null;
}
});
} catch (IOException ioe) {
throw StandardException.plainWrapException(ioe);
} catch (PrivilegedActionException pae) {
throw StandardException.plainWrapException(pae);
}
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RAFContainer method run.
// PrivilegedExceptionAction method
public Object run() throws StandardException {
switch(actionCode) {
case GET_FILE_NAME_ACTION:
return privGetFileName(actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath);
case CREATE_CONTAINER_ACTION:
{
StorageFile file = privGetFileName(actionIdentity, false, false, false);
try {
if (file.exists()) {
// hasn't been called.
throw StandardException.newException(SQLState.FILE_EXISTS, file);
}
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CREATE, se, file);
}
try {
// OK not to force WAL here, in fact, this operation
// preceeds the creation of the log record to ensure
// sufficient space.
dataFactory.writeInProgress();
try {
fileData = file.getRandomAccessFile("rw");
file.limitAccessToOwner();
} finally {
dataFactory.writeFinished();
}
// This container format specifies that the first page is
// an allocation page and the container information is
// stored within it. The allocation page needs to be
// somewhat formatted because if the system crashed after
// the create container log operation is written, it needs
// to be well formed enough to get the container
// information back out of it.
//
// Don't try to go thru the page cache here because the
// container object cannot be found in the container cache
// at this point yet. However, if we use the page cache
// to store the first allocation page, then in order to
// write itself out, it needs to ask the container to do
// so, which is going to create a deadlock. The
// allocation page cannot write itself out without going
// thru the container because it doesn't know where its
// offset is. Here we effectively hardwire page 0 at
// offset 0 of the container file to be the first
// allocation page.
// create an embryonic page - if this is not a temporary
// container, synchronously write out the file header.
// Need to set it now. After writeRAFHeader
canUpdate = true;
// may be too late in case that method's IO
// is interrupted and container needs
// reopening. To get the correct "rw" mode
// we need canUpdate to be true.
writeRAFHeader(actionIdentity, fileData, true, (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT));
} catch (IOException ioe) {
canUpdate = false;
boolean fileDeleted;
try {
fileDeleted = privRemoveFile(file);
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString());
}
if (!fileDeleted) {
throw StandardException.newException(SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString());
}
throw StandardException.newException(SQLState.FILE_CREATE, ioe, file);
}
return null;
}
case REMOVE_FILE_ACTION:
return privRemoveFile(actionFile) ? this : null;
case OPEN_CONTAINER_ACTION:
{
// is this a stub?
boolean isStub = false;
StorageFile file = privGetFileName(actionIdentity, false, true, true);
if (file == null)
return null;
try {
if (!file.exists()) {
// file does not exist, may be it has been stubbified
file = privGetFileName(actionIdentity, true, true, true);
if (!file.exists())
return null;
isStub = true;
}
} catch (SecurityException se) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, se);
}
canUpdate = false;
try {
if (!dataFactory.isReadOnly() && file.canWrite())
canUpdate = true;
} catch (SecurityException se) {
// just means we can't write to it.
}
fileName = file.toString();
try {
fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET));
if (SanityManager.DEBUG) {
if (isStub)
SanityManager.ASSERT(getDroppedState() && getCommittedDropState(), "a stub failed to set drop state");
}
} catch (IOException ioe) {
if (isStub) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName));
}
// maybe it is being stubbified... try that
StorageFile stub = privGetFileName(actionIdentity, true, true, true);
if (stub.exists()) {
try {
boolean delete_status = privRemoveFile(file);
if (SanityManager.DEBUG) {
if (!delete_status) {
SanityManager.THROWASSERT("delete of file (" + file + ") failed.");
}
}
fileData = stub.getRandomAccessFile(canUpdate ? "rw" : "r");
readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET));
} catch (IOException ioe2) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe2, getIdentity() != null ? getIdentity().toString() : "unknown", "delete-stub", fileName));
}
// RESOLVE: this is a temporary hack
} else
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName));
}
return this;
}
// end of case OPEN_CONTAINER_ACTION
case REOPEN_CONTAINER_ACTION:
{
StorageFile file = privGetFileName(actionIdentity, false, true, true);
synchronized (this) {
try {
fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
} catch (FileNotFoundException ioe) {
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, (getIdentity() != null ? getIdentity().toString() : "unknown"), "read", fileName));
}
}
return this;
}
case STUBBIFY_ACTION:
{
StorageFile file = privGetFileName(actionIdentity, false, false, true);
StorageFile stub = privGetFileName(actionIdentity, true, false, false);
StorageRandomAccessFile stubData = null;
try {
if (!stub.exists()) {
// write the header to the stub
stubData = stub.getRandomAccessFile("rw");
stub.limitAccessToOwner();
writeRAFHeader(actionIdentity, stubData, true, /* create */
true);
/* sync */
stubData.close();
stubData = null;
}
// Force WAL and check for database corruption before removing file.
// This is one operation where the container is changed on disk
// directly without going thru the container cache, which otherwise
// would have force WAL. Take care of it here.
dataFactory.flush(actionInstant);
// try to remove the container file
// fileDate is not null only if we are redoing a removeContainer
// (stubbify) operation. Then fileData acutally is opened against
// the stub and the original container file does not exist.
// Then we need to close it here because this method is called by
// cache.remove and nobody will be able to see fileData after this.
privRemoveFile(file);
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString());
} catch (IOException ioe) {
// (half-baked) stub
try {
if (stubData != null) {
stubData.close();
stub.delete();
stubData = null;
}
if (fileData != null) {
fileData.close();
fileData = null;
}
} catch (IOException ioe2) {
throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, ioe2, file, ioe.toString());
} catch (SecurityException se) {
throw StandardException.newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString());
}
}
// let the data factory know about this the stub file;It
// could remove when next checkpoint occurs if it's not necessary for recovery
dataFactory.stubFileToRemoveAfterCheckPoint(stub, actionInstant, getIdentity());
return null;
}
case GET_RANDOM_ACCESS_FILE_ACTION:
{
try {
boolean exists = actionFile.exists();
Object result = actionFile.getRandomAccessFile("rw");
if (!exists) {
actionFile.limitAccessToOwner();
}
return result;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.FILE_CREATE, ioe, actionFile.getPath());
}
}
}
// end of switch
return null;
}
use of org.apache.derby.io.StorageFile in project derby by apache.
the class RAFContainer method encryptOrDecryptContainer.
/**
* Creates encrypted or decrypted version of the container.
*
* Reads all the pages of the container from the original container
* through the page cache, then either encrypts page data with the new
* encryption mechanism or leaves the page data un-encrypted, and finally
* writes the data to the specified new container file.
* <p>
* The encryption and decryption engines used to carry out the
* cryptographic operation(s) are configured through the raw store, and
* accessed via the data factory. Note that the pages have already been
* decrypted before being put into the page cache.
*
* @param handle the container handle
* @param newFilePath file to store the new version of the container in
* @param doEncrypt tells whether to encrypt or not
* @exception StandardException Derby Standard error policy
*/
protected void encryptOrDecryptContainer(BaseContainerHandle handle, String newFilePath, boolean doEncrypt) throws StandardException {
BasePage page = null;
StorageFile newFile = dataFactory.getStorageFactory().newStorageFile(newFilePath);
StorageRandomAccessFile newRaf = null;
try {
long lastPageNumber = getLastPageNumber(handle);
newRaf = getRandomAccessFile(newFile);
byte[] encryptionBuf = null;
if (doEncrypt) {
encryptionBuf = new byte[pageSize];
}
// container file after processing the pages.
for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) {
page = getLatchedPage(handle, pageNumber);
// Update the page array before writing to the disk.
// An update consists of adding the container header, or
// (re-)encrypting the data.
byte[] dataToWrite = updatePageArray(pageNumber, page.getPageArray(), encryptionBuf, true);
newRaf.write(dataToWrite, 0, pageSize);
// unlatch releases page from cache.
page.unlatch();
page = null;
}
// sync the new version of the container.
newRaf.sync();
newRaf.close();
newRaf = null;
} catch (IOException ioe) {
throw StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", doEncrypt ? "encrypt" : "decrypt", newFilePath);
} finally {
if (page != null) {
page.unlatch();
page = null;
}
if (newRaf != null) {
try {
newRaf.close();
} catch (IOException ioe) {
newRaf = null;
throw StandardException.newException(SQLState.FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", doEncrypt ? "encrypt-close" : "decrypt-close", newFilePath);
}
}
}
}
Aggregations