use of org.apache.jackrabbit.core.data.DataRecord in project jackrabbit-oak by apache.
the class DataStoreBlobStore method writeBlob.
@Override
public String writeBlob(InputStream stream, BlobOptions options) throws IOException {
boolean threw = true;
try {
long start = System.nanoTime();
checkNotNull(stream);
DataRecord dr = writeStream(stream, options);
String id = getBlobId(dr);
if (tracker != null && !InMemoryDataRecord.isInstance(id)) {
try {
tracker.add(id);
log.trace("Tracked Id {}", id);
} catch (Exception e) {
log.warn("Could not add track id", e);
}
}
threw = false;
stats.uploaded(System.nanoTime() - start, TimeUnit.NANOSECONDS, dr.getLength());
stats.uploadCompleted(id);
return id;
} catch (DataStoreException e) {
throw new IOException(e);
} finally {
//DataStore does not closes the stream internally
//So close the stream explicitly
Closeables.close(stream, threw);
}
}
use of org.apache.jackrabbit.core.data.DataRecord in project jackrabbit-oak by apache.
the class DataStoreBlobStore method getBlobId.
@Override
public String getBlobId(@Nonnull String reference) {
checkNotNull(reference);
DataRecord record;
try {
record = delegate.getRecordFromReference(reference);
if (record != null) {
return getBlobId(record);
}
} catch (DataStoreException e) {
log.warn("Unable to access the blobId for [{}]", reference, e);
}
return null;
}
use of org.apache.jackrabbit.core.data.DataRecord in project jackrabbit-oak by apache.
the class TestS3DSWithSSES3 method testDataMigration.
/**
* Test data migration enabling SSE_S3 encryption.
*/
@Test
public void testDataMigration() {
try {
//manually close the setup ds and remove encryption
ds.close();
props.remove(S3Constants.S3_ENCRYPTION);
ds = createDataStore();
byte[] data = new byte[dataLength];
randomGen.nextBytes(data);
DataRecord rec = ds.addRecord(new ByteArrayInputStream(data));
Assert.assertEquals(data.length, rec.getLength());
assertRecord(data, rec);
ds.close();
// turn encryption now anc recreate datastore instance
props.setProperty(S3Constants.S3_ENCRYPTION, S3Constants.S3_ENCRYPTION_SSE_S3);
props.setProperty(S3Constants.S3_RENAME_KEYS, "true");
ds = createDataStore();
rec = ds.getRecord(rec.getIdentifier());
Assert.assertEquals(data.length, rec.getLength());
assertRecord(data, rec);
randomGen.nextBytes(data);
ds.addRecord(new ByteArrayInputStream(data));
ds.close();
} catch (Exception e) {
LOG.error("error:", e);
fail(e.getMessage());
}
}
use of org.apache.jackrabbit.core.data.DataRecord in project jackrabbit-oak by apache.
the class TestS3DataStore method testSecretDefined.
@Test
public void testSecretDefined() throws Exception {
assumeTrue(isS3Configured());
Random randomGen = new Random();
props = S3DataStoreUtils.getS3Config();
props.setProperty("secret", "123456");
ds = getS3DataStore(s3Class, props, dataStoreDir.getAbsolutePath());
byte[] data = new byte[4096];
randomGen.nextBytes(data);
DataRecord rec = ds.addRecord(new ByteArrayInputStream(data));
assertEquals(data.length, rec.getLength());
String ref = rec.getReference();
assertNotNull(ref);
String id = rec.getIdentifier().toString();
Mac mac = Mac.getInstance("HmacSHA1");
mac.init(new SecretKeySpec("123456".getBytes("UTF-8"), "HmacSHA1"));
byte[] hash = mac.doFinal(id.getBytes("UTF-8"));
id = id + ':' + encodeHexString(hash);
assertEquals(id, ref);
}
use of org.apache.jackrabbit.core.data.DataRecord in project jackrabbit-oak by apache.
the class DataStoreBlobStore method countDeleteChunks.
@Override
public long countDeleteChunks(List<String> chunkIds, long maxLastModifiedTime) throws Exception {
int count = 0;
if (delegate instanceof MultiDataStoreAware) {
List<String> deleted = Lists.newArrayListWithExpectedSize(512);
for (String chunkId : chunkIds) {
String blobId = extractBlobId(chunkId);
DataIdentifier identifier = new DataIdentifier(blobId);
DataRecord dataRecord = getRecordForId(identifier);
boolean success = (maxLastModifiedTime <= 0) || dataRecord.getLastModified() <= maxLastModifiedTime;
log.trace("Deleting blob [{}] with last modified date [{}] : [{}]", blobId, dataRecord.getLastModified(), success);
if (success) {
((MultiDataStoreAware) delegate).deleteRecord(identifier);
deleted.add(blobId);
count++;
if (count % 512 == 0) {
log.info("Deleted blobs {}", deleted);
deleted.clear();
}
}
}
if (!deleted.isEmpty()) {
log.info("Deleted blobs {}", deleted);
}
}
return count;
}
Aggregations