use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class AbstractSharedCachingDataStore method init.
public void init(String homeDir) throws DataStoreException {
if (path == null) {
path = homeDir + "/repository/datastore";
}
path = FilenameUtils.normalizeNoEndSeparator(new File(path).getAbsolutePath());
checkArgument(stagingSplitPercentage >= 0 && stagingSplitPercentage <= 50, "Staging percentage cache should be between 0 and 50");
this.rootDirectory = new File(path);
this.tmp = new File(rootDirectory, "tmp");
LOG.trace("Temporary file created [{}]", tmp.mkdirs());
this.backend = createBackend();
backend.init();
String home = FilenameUtils.normalizeNoEndSeparator(new File(homeDir).getAbsolutePath());
this.cache = new CompositeDataStoreCache(path, new File(home), cacheSize, stagingSplitPercentage, uploadThreads, new CacheLoader<String, InputStream>() {
@Override
public InputStream load(String key) throws Exception {
return backend.read(new DataIdentifier(key));
}
}, new StagingUploader() {
@Override
public void write(String id, File file) throws DataStoreException {
backend.write(new DataIdentifier(id), file);
}
}, statisticsProvider, listeningExecutor, schedulerExecutor, executor, stagingPurgeInterval, stagingRetryInterval);
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class AbstractSharedCachingDataStore method addRecord.
@Override
public DataRecord addRecord(InputStream inputStream, BlobOptions blobOptions) throws DataStoreException {
Stopwatch watch = Stopwatch.createStarted();
try {
TransientFileFactory fileFactory = TransientFileFactory.getInstance();
File tmpFile = fileFactory.createTransientFile("upload", null, tmp);
// Copy the stream to the temporary file and calculate the
// stream length and the message digest of the stream
MessageDigest digest = MessageDigest.getInstance(DIGEST);
OutputStream output = new DigestOutputStream(new FileOutputStream(tmpFile), digest);
long length = 0;
try {
length = IOUtils.copyLarge(inputStream, output);
} finally {
output.close();
}
DataIdentifier identifier = new DataIdentifier(encodeHexString(digest.digest()));
LOG.debug("SHA-256 of [{}], length =[{}] took [{}] ms ", identifier, length, watch.elapsed(TimeUnit.MILLISECONDS));
// otherwise add to backend
if (blobOptions.getUpload() == SYNCHRONOUS || !cache.stage(identifier.toString(), tmpFile)) {
backend.write(identifier, tmpFile);
LOG.info("Added blob [{}] to backend", identifier);
// offer to download cache
cache.getDownloadCache().put(identifier.toString(), tmpFile);
}
return getRecordIfStored(identifier);
} catch (Exception e) {
LOG.error("Error in adding record");
throw new DataStoreException("Error in adding record ", e);
}
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class S3Backend method getMetadataRecord.
@Override
public DataRecord getMetadataRecord(String name) {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectMetadata meta = s3service.getObjectMetadata(bucket, addMetaKeyPrefix(name));
return new S3DataRecord(this, s3service, bucket, new DataIdentifier(name), meta.getLastModified().getTime(), meta.getContentLength(), true);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class S3Backend method getAllMetadataRecords.
@Override
public List<DataRecord> getAllMetadataRecords(String prefix) {
List<DataRecord> metadataList = new ArrayList<DataRecord>();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket).withPrefix(addMetaKeyPrefix(prefix));
ObjectListing prevObjectListing = s3service.listObjects(listObjectsRequest);
for (final S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
metadataList.add(new S3DataRecord(this, s3service, bucket, new DataIdentifier(stripMetaKeyPrefix(s3ObjSumm.getKey())), s3ObjSumm.getLastModified().getTime(), s3ObjSumm.getSize(), true));
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
return metadataList;
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class AbstractDataStoreTest method doDeleteAllOlderThan.
/**
* Asserts that {@link DataStore#deleteAllOlderThan(long)} only deleted
* records older than argument passed.
*/
protected void doDeleteAllOlderThan() throws Exception {
Random random = randomGen;
byte[] data = new byte[dataLength];
random.nextBytes(data);
DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data));
data = new byte[dataLength];
random.nextBytes(data);
DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data));
// sleep for some time to ensure that async upload completes in backend.
sleep(10000);
long updateTime = System.currentTimeMillis();
ds.updateModifiedDateOnAccess(updateTime);
// sleep to workaround System.currentTimeMillis granularity.
sleep(3000);
data = new byte[dataLength];
random.nextBytes(data);
DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data));
data = new byte[dataLength];
random.nextBytes(data);
DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data));
rec1 = ds.getRecord(rec1.getIdentifier());
ds.clearInUse();
Assert.assertEquals("only rec2 should be deleted", 1, ds.deleteAllOlderThan(updateTime));
assertNull("rec2 should be null", ds.getRecordIfStored(rec2.getIdentifier()));
Iterator<DataIdentifier> itr = ds.getAllIdentifiers();
List<DataIdentifier> list = new ArrayList<DataIdentifier>();
list.add(rec1.getIdentifier());
list.add(rec3.getIdentifier());
list.add(rec4.getIdentifier());
while (itr.hasNext()) {
assertTrue("record found on list", list.remove(itr.next()));
}
Assert.assertEquals("touched records found", 0, list.size());
Assert.assertEquals("rec1 touched", true, rec1.getLastModified() > updateTime);
Assert.assertEquals("rec3 touched", true, rec3.getLastModified() > updateTime);
Assert.assertEquals("rec4 touched", true, rec4.getLastModified() > updateTime);
}
Aggregations