use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit by apache.
the class DbDataStore method deleteAllOlderThan.
public synchronized int deleteAllOlderThan(long min) throws DataStoreException {
try {
ArrayList<String> touch = new ArrayList<String>();
ArrayList<DataIdentifier> ids = new ArrayList<DataIdentifier>(inUse.keySet());
for (DataIdentifier identifier : ids) {
if (identifier != null) {
touch.add(identifier.toString());
}
}
touch.addAll(temporaryInUse);
for (String key : touch) {
updateLastModifiedDate(key, 0);
}
// DELETE FROM DATASTORE WHERE LAST_MODIFIED<?
return conHelper.update(deleteOlderSQL, min);
} catch (Exception e) {
throw convert("Can not delete records", e);
}
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit by apache.
the class S3Backend method getAllIdentifiers.
@Override
public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Set<DataIdentifier> ids = new HashSet<DataIdentifier>();
ObjectListing prevObjectListing = s3service.listObjects(bucket);
while (true) {
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
String id = getIdentifierName(s3ObjSumm.getKey());
if (id != null) {
ids.add(new DataIdentifier(id));
}
}
if (!prevObjectListing.isTruncated())
break;
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
LOG.debug("getAllIdentifiers returned size [{}] took [{}] ms.", ids.size(), (System.currentTimeMillis() - start));
return ids.iterator();
} catch (AmazonServiceException e) {
throw new DataStoreException("Could not list objects", e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit by apache.
the class S3Backend method deleteAllOlderThan.
@Override
public Set<DataIdentifier> deleteAllOlderThan(long min) throws DataStoreException {
long start = System.currentTimeMillis();
// S3 stores lastModified to lower boundary of timestamp in ms.
// and hence min is reduced by 1000ms.
min = min - 1000;
Set<DataIdentifier> deleteIdSet = new HashSet<DataIdentifier>(30);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectListing prevObjectListing = s3service.listObjects(bucket);
while (true) {
List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
DataIdentifier identifier = new DataIdentifier(getIdentifierName(s3ObjSumm.getKey()));
long lastModified = s3ObjSumm.getLastModified().getTime();
LOG.debug("Identifier [{}]'s lastModified = [{}]", identifier, lastModified);
if (lastModified < min && getDataStore().confirmDelete(identifier) && // order is important here
s3service.getObjectMetadata(bucket, s3ObjSumm.getKey()).getLastModified().getTime() < min) {
getDataStore().deleteFromCache(identifier);
LOG.debug("add id [{}] to delete lists", s3ObjSumm.getKey());
deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
deleteIdSet.add(identifier);
}
}
if (deleteList.size() > 0) {
DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
delObjsReq.setKeys(deleteList);
DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
if (dobjs.getDeletedObjects().size() != deleteList.size()) {
throw new DataStoreException("Incomplete delete object request. only " + dobjs.getDeletedObjects().size() + " out of " + deleteList.size() + " are deleted");
} else {
LOG.debug("[{}] records deleted from datastore", deleteList);
}
}
if (!prevObjectListing.isTruncated()) {
break;
}
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.info("deleteAllOlderThan: min=[{}] exit. Deleted[{}] records. Number of records deleted [{}] took [{}]ms", new Object[] { min, deleteIdSet, deleteIdSet.size(), (System.currentTimeMillis() - start) });
return deleteIdSet;
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class AbstractSharedCachingDataStore method init.
public void init(String homeDir) throws DataStoreException {
if (path == null) {
path = homeDir + "/repository/datastore";
}
path = FilenameUtils.normalizeNoEndSeparator(new File(path).getAbsolutePath());
checkArgument(stagingSplitPercentage >= 0 && stagingSplitPercentage <= 50, "Staging percentage cache should be between 0 and 50");
this.rootDirectory = new File(path);
this.tmp = new File(rootDirectory, "tmp");
LOG.trace("Temporary file created [{}]", tmp.mkdirs());
this.backend = createBackend();
backend.init();
String home = FilenameUtils.normalizeNoEndSeparator(new File(homeDir).getAbsolutePath());
this.cache = new CompositeDataStoreCache(path, new File(home), cacheSize, stagingSplitPercentage, uploadThreads, new CacheLoader<String, InputStream>() {
@Override
public InputStream load(String key) throws Exception {
return backend.read(new DataIdentifier(key));
}
}, new StagingUploader() {
@Override
public void write(String id, File file) throws DataStoreException {
backend.write(new DataIdentifier(id), file);
}
}, statisticsProvider, listeningExecutor, schedulerExecutor, executor, stagingPurgeInterval, stagingRetryInterval);
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class AbstractSharedCachingDataStore method addRecord.
@Override
public DataRecord addRecord(InputStream inputStream, BlobOptions blobOptions) throws DataStoreException {
Stopwatch watch = Stopwatch.createStarted();
try {
TransientFileFactory fileFactory = TransientFileFactory.getInstance();
File tmpFile = fileFactory.createTransientFile("upload", null, tmp);
// Copy the stream to the temporary file and calculate the
// stream length and the message digest of the stream
MessageDigest digest = MessageDigest.getInstance(DIGEST);
OutputStream output = new DigestOutputStream(new FileOutputStream(tmpFile), digest);
long length = 0;
try {
length = IOUtils.copyLarge(inputStream, output);
} finally {
output.close();
}
DataIdentifier identifier = new DataIdentifier(encodeHexString(digest.digest()));
LOG.debug("SHA-256 of [{}], length =[{}] took [{}] ms ", identifier, length, watch.elapsed(TimeUnit.MILLISECONDS));
// otherwise add to backend
if (blobOptions.getUpload() == SYNCHRONOUS || !cache.stage(identifier.toString(), tmpFile)) {
backend.write(identifier, tmpFile);
LOG.info("Added blob [{}] to backend", identifier);
// offer to download cache
cache.getDownloadCache().put(identifier.toString(), tmpFile);
}
return getRecordIfStored(identifier);
} catch (Exception e) {
LOG.error("Error in adding record");
throw new DataStoreException("Error in adding record ", e);
}
}
Aggregations