use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class CachingDataStoreTest method lazyLoadStream.
/**
* Add in datastore, invalidate from cache and lazy load record stream.
*/
@Test
public void lazyLoadStream() throws Exception {
LOG.info("Starting lazyLoadStream");
File f = copyToFile(randomStream(0, 4 * 1024), folder.newFile());
String id = getIdForInputStream(f);
FileInputStream fin = new FileInputStream(f);
closer.register(fin);
DataRecord rec = dataStore.addRecord(fin);
assertEquals(id, rec.getIdentifier().toString());
//start & finish
taskLatch.countDown();
callbackLatch.countDown();
waitFinish();
// Invalidate from the local cache
dataStore.getCache().invalidate(id);
// retrieve record from the datastore
rec = dataStore.getRecordIfStored(new DataIdentifier(id));
assertNotNull(rec);
assertEquals(id, rec.getIdentifier().toString());
// the file should not be in cache
File cached = dataStore.getCache().getIfPresent(id);
assertNull(cached);
// assert stream
assertFile(rec.getStream(), f, folder);
// Now should be available in the cache
cached = dataStore.getCache().getIfPresent(id);
assertNotNull(cached);
assertTrue(Files.equal(f, cached));
dataStore.deleteRecord(new DataIdentifier(id));
rec = dataStore.getRecordIfStored(new DataIdentifier(id));
assertNull(rec);
LOG.info("Finished lazyLoadStream");
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class CachingDataStoreTest method zeroCacheAddGetDelete.
/**
* Add, get, delete when zero cache size.
* @throws Exception
*/
@Test
public void zeroCacheAddGetDelete() throws Exception {
LOG.info("Starting zeroCacheAddGetDelete");
dataStore.close();
init(1, 0, 0);
File f = copyToFile(randomStream(0, 4 * 1024), folder.newFile());
String id = getIdForInputStream(f);
FileInputStream fin = new FileInputStream(f);
closer.register(fin);
DataRecord rec = dataStore.addRecord(fin);
assertEquals(id, rec.getIdentifier().toString());
assertFile(rec.getStream(), f, folder);
rec = dataStore.getRecordIfStored(new DataIdentifier(id));
assertEquals(id, rec.getIdentifier().toString());
assertFile(rec.getStream(), f, folder);
assertEquals(1, Iterators.size(dataStore.getAllIdentifiers()));
dataStore.deleteRecord(new DataIdentifier(id));
rec = dataStore.getRecordIfStored(new DataIdentifier(id));
assertNull(rec);
LOG.info("Finished zeroCacheAddGetDelete");
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit-oak by apache.
the class SafeDataStoreBlobStore method getReference.
@Override
public String getReference(@Nonnull String encodedBlobId) {
checkNotNull(encodedBlobId);
String blobId = extractBlobId(encodedBlobId);
//Reference are not created for in memory record
if (InMemoryDataRecord.isInstance(blobId)) {
return null;
}
DataRecord record;
try {
record = delegate.getRecordIfStored(new DataIdentifier(blobId));
if (record != null) {
return record.getReference();
} else {
log.debug("No blob found for id [{}]", blobId);
}
} catch (DataStoreException e) {
log.warn("Unable to access the blobId for [{}]", blobId, e);
}
return null;
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit by apache.
the class S3Backend method getAllIdentifiers.
@Override
public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Set<DataIdentifier> ids = new HashSet<DataIdentifier>();
ObjectListing prevObjectListing = s3service.listObjects(bucket);
while (true) {
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
String id = getIdentifierName(s3ObjSumm.getKey());
if (id != null) {
ids.add(new DataIdentifier(id));
}
}
if (!prevObjectListing.isTruncated())
break;
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
LOG.debug("getAllIdentifiers returned size [{}] took [{}] ms.", ids.size(), (System.currentTimeMillis() - start));
return ids.iterator();
} catch (AmazonServiceException e) {
throw new DataStoreException("Could not list objects", e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataIdentifier in project jackrabbit by apache.
the class S3Backend method deleteAllOlderThan.
@Override
public Set<DataIdentifier> deleteAllOlderThan(long min) throws DataStoreException {
long start = System.currentTimeMillis();
// S3 stores lastModified to lower boundary of timestamp in ms.
// and hence min is reduced by 1000ms.
min = min - 1000;
Set<DataIdentifier> deleteIdSet = new HashSet<DataIdentifier>(30);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectListing prevObjectListing = s3service.listObjects(bucket);
while (true) {
List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
DataIdentifier identifier = new DataIdentifier(getIdentifierName(s3ObjSumm.getKey()));
long lastModified = s3ObjSumm.getLastModified().getTime();
LOG.debug("Identifier [{}]'s lastModified = [{}]", identifier, lastModified);
if (lastModified < min && getDataStore().confirmDelete(identifier) && // order is important here
s3service.getObjectMetadata(bucket, s3ObjSumm.getKey()).getLastModified().getTime() < min) {
getDataStore().deleteFromCache(identifier);
LOG.debug("add id [{}] to delete lists", s3ObjSumm.getKey());
deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
deleteIdSet.add(identifier);
}
}
if (deleteList.size() > 0) {
DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
delObjsReq.setKeys(deleteList);
DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
if (dobjs.getDeletedObjects().size() != deleteList.size()) {
throw new DataStoreException("Incomplete delete object request. only " + dobjs.getDeletedObjects().size() + " out of " + deleteList.size() + " are deleted");
} else {
LOG.debug("[{}] records deleted from datastore", deleteList);
}
}
if (!prevObjectListing.isTruncated()) {
break;
}
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.info("deleteAllOlderThan: min=[{}] exit. Deleted[{}] records. Number of records deleted [{}] took [{}]ms", new Object[] { min, deleteIdSet, deleteIdSet.size(), (System.currentTimeMillis() - start) });
return deleteIdSet;
}
Aggregations