use of com.github.ambry.notification.NotificationBlobType in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param blobId the blobId of the blob that is to be verified.
* @param properties the {@link BlobProperties} of the blob that is to be verified
* @param originalPutContent original content of the blob
* @param originalUserMetadata original user-metadata of the blob
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(String blobId, BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest.ReceivedPutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
Assert.assertEquals("Wrong max chunk size in metadata", chunkSize, compositeBlobInfo.getChunkSize());
Assert.assertEquals("Wrong total size in metadata", originalPutContent.length, compositeBlobInfo.getTotalSize());
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
Assert.assertEquals("Number of chunks is not as expected", RouterUtils.getNumChunksForBlobAndChunkSize(originalPutContent.length, chunkSize), dataBlobIds.size());
// verify user-metadata
if (properties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, request.getUsermetadata().array());
}
verifyCompositeBlob(properties, originalPutContent, originalUserMetadata, dataBlobIds, request, serializedRequests);
} else {
notificationBlobType = NotificationBlobType.Simple;
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!properties.isEncrypted()) {
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
Assert.assertArrayEquals("UserMetadata mismatch for simple blob", originalUserMetadata, request.getUsermetadata().array());
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(content), userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), new Callback<DecryptJob.DecryptJobResult>() {
@Override
public void onCompletion(DecryptJob.DecryptJobResult result, Exception exception) {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("Content mismatch", originalPutContent, result.getDecryptedBlobContent().array());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.notification.NotificationBlobType in project ambry by linkedin.
the class PutOperation method maybeNotifyForBlobCreation.
/**
* Notify for overall blob creation if the operation is complete and the blob was put successfully. Also ensure that
* notifications have been sent out for all successfully put data chunks.
*/
void maybeNotifyForBlobCreation() {
if (isOperationComplete()) {
// only notify for data chunk creation on direct uploads.
if (isComposite() && !isStitchOperation()) {
metadataPutChunk.maybeNotifyForFirstChunkCreation();
}
if (blobId != null) {
Pair<Account, Container> accountContainer = RouterUtils.getAccountContainer(accountService, getBlobProperties().getAccountId(), getBlobProperties().getContainerId());
NotificationBlobType blobType = isComposite() ? NotificationBlobType.Composite : options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
notificationSystem.onBlobCreated(getBlobIdString(), getBlobProperties(), accountContainer.getFirst(), accountContainer.getSecond(), blobType);
}
}
}
use of com.github.ambry.notification.NotificationBlobType in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param requestAndResult the {@link RequestAndResult} to use for verification.
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
String blobId = requestAndResult.result.result();
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
boolean stitchOperation = requestAndResult.chunksToStitch != null;
if (stitchOperation) {
assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
}
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
long expectedMaxChunkSize;
long expectedTotalSize;
int expectedNumChunks;
if (stitchOperation) {
expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
expectedNumChunks = requestAndResult.chunksToStitch.size();
} else {
expectedMaxChunkSize = chunkSize;
expectedTotalSize = requestAndResult.putContent.length;
expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
}
assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
// Verify all dataBlobIds are DataChunk
for (StoreKey key : dataBlobIds) {
BlobId origDataBlobId = (BlobId) key;
assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
}
// verify user-metadata
if (requestAndResult.putBlobProperties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
}
if (!stitchOperation) {
verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
}
} else {
notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
// TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
// Once the logic is fixed we should assert Simple.
BlobDataType dataType = origBlobId.getBlobDataType();
assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!requestAndResult.putBlobProperties.isEncrypted()) {
assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
decryptedBlobContent.readBytes(blobContent);
assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
decryptedBlobContent.release();
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
Aggregations