use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit by apache.
the class DbDataStore method getAllIdentifiers.
public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
ArrayList<DataIdentifier> list = new ArrayList<DataIdentifier>();
ResultSet rs = null;
try {
// SELECT ID FROM DATASTORE
rs = conHelper.query(selectAllSQL);
while (rs.next()) {
String id = rs.getString(1);
if (!id.startsWith(TEMP_PREFIX)) {
DataIdentifier identifier = new DataIdentifier(id);
list.add(identifier);
}
}
log.debug("Found " + list.size() + " identifiers.");
return list.iterator();
} catch (Exception e) {
throw convert("Can not read records", e);
} finally {
DbUtility.close(rs);
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit by apache.
the class DbDataStore method openStream.
/**
* Open the input stream. This method sets those fields of the caller
* that need to be closed once the input stream is read.
*
* @param inputStream the database input stream object
* @param identifier data identifier
* @throws DataStoreException if the data store could not be accessed,
* or if the given identifier is invalid
*/
InputStream openStream(DbInputStream inputStream, DataIdentifier identifier) throws DataStoreException {
ResultSet rs = null;
try {
// SELECT ID, DATA FROM DATASTORE WHERE ID = ?
rs = conHelper.query(selectDataSQL, identifier.toString());
if (!rs.next()) {
throw new DataStoreException("Record not found: " + identifier);
}
InputStream stream = rs.getBinaryStream(2);
if (stream == null) {
stream = new ByteArrayInputStream(new byte[0]);
DbUtility.close(rs);
} else if (copyWhenReading) {
// If we copy while reading, create a temp file and close the stream
File temp = moveToTempFile(stream);
stream = new BufferedInputStream(new TempFileInputStream(temp));
DbUtility.close(rs);
} else {
stream = new BufferedInputStream(stream);
inputStream.setResultSet(rs);
}
return stream;
} catch (Exception e) {
DbUtility.close(rs);
throw convert("Retrieving database resource ", e);
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class AzureBlobStoreBackend method write.
@Override
public void write(DataIdentifier identifier, File file) throws DataStoreException {
if (null == identifier) {
throw new NullPointerException("identifier");
}
if (null == file) {
throw new NullPointerException("file");
}
String key = getKeyName(identifier);
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
long len = file.length();
LOG.debug("Blob write started. identifier={} length={}", key, len);
CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key);
if (!blob.exists()) {
BlobRequestOptions options = new BlobRequestOptions();
options.setConcurrentRequestCount(concurrentRequestCount);
boolean useBufferedStream = len < BUFFERED_STREAM_THRESHHOLD;
final InputStream in = useBufferedStream ? new BufferedInputStream(new FileInputStream(file)) : new FileInputStream(file);
try {
blob.upload(in, len, null, options, null);
LOG.debug("Blob created. identifier={} length={} duration={} buffered={}", key, len, (System.currentTimeMillis() - start), useBufferedStream);
} finally {
in.close();
}
return;
}
blob.downloadAttributes();
if (blob.getProperties().getLength() != len) {
throw new DataStoreException("Length Collision. identifier=" + key + " new length=" + len + " old length=" + blob.getProperties().getLength());
}
LOG.trace("Blob already exists. identifier={} lastModified={}", key, blob.getProperties().getLastModified().getTime());
blob.startCopy(blob);
//TODO: better way of updating lastModified (use custom metadata?)
if (!waitForCopy(blob)) {
throw new DataStoreException(String.format("Cannot update lastModified for blob. identifier=%s status=%s", key, blob.getCopyState().getStatusDescription()));
}
LOG.debug("Blob updated. identifier={} lastModified={} duration={}", key, blob.getProperties().getLastModified().getTime(), (System.currentTimeMillis() - start));
} catch (StorageException e) {
LOG.info("Error writing blob. identifier={}", key, e);
throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e);
} catch (URISyntaxException | IOException e) {
LOG.debug("Error writing blob. identifier={}", key, e);
throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e);
} catch (InterruptedException e) {
LOG.debug("Error writing blob. identifier={}", key, e);
throw new DataStoreException(String.format("Cannot copy blob. identifier=%s", key), e);
} finally {
if (null != contextClassLoader) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class AzureBlobStoreBackend method init.
@Override
public void init() throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
long start = System.currentTimeMillis();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
LOG.debug("Started backend initialization");
if (null == properties) {
try {
properties = Utils.readConfig(Utils.DEFAULT_CONFIG_FILE);
} catch (IOException e) {
throw new DataStoreException("Unable to initialize Azure Data Store from " + Utils.DEFAULT_CONFIG_FILE, e);
}
}
secret = properties.getProperty("secret");
try {
Utils.setProxyIfNeeded(properties);
containerName = (String) properties.get(AzureConstants.AZURE_BLOB_CONTAINER_NAME);
connectionString = Utils.getConnectionStringFromProperties(properties);
concurrentRequestCount = PropertiesUtil.toInteger(properties.get(AzureConstants.AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION), 1);
LOG.info("Using concurrentRequestsPerOperation={}", concurrentRequestCount);
retryPolicy = Utils.getRetryPolicy((String) properties.get(AzureConstants.AZURE_BLOB_MAX_REQUEST_RETRY));
if (properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT) != null) {
requestTimeout = PropertiesUtil.toInteger(properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT), RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT);
}
CloudBlobContainer azureContainer = getAzureContainer();
if (azureContainer.createIfNotExists()) {
LOG.info("New container created. containerName={}", containerName);
} else {
LOG.info("Reusing existing container. containerName={}", containerName);
}
LOG.debug("Backend initialized. duration={}", +(System.currentTimeMillis() - start));
} catch (StorageException e) {
throw new DataStoreException(e);
}
} finally {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class AzureBlobStoreBackend method deleteMetadataRecord.
@Override
public boolean deleteMetadataRecord(String name) {
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(addMetaKeyPrefix(name));
boolean result = blob.deleteIfExists();
LOG.debug("Metadata record {}. metadataName={} duration={}", result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)", name, (System.currentTimeMillis() - start));
return result;
} catch (StorageException e) {
LOG.info("Error deleting metadata record. metadataName={}", name, e);
} catch (DataStoreException | URISyntaxException e) {
LOG.debug("Error deleting metadata record. metadataName={}", name, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
return false;
}
Aggregations