use of com.github.ambry.commons.BlobId.BlobDataType in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param requestAndResult the {@link RequestAndResult} to use for verification.
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
String blobId = requestAndResult.result.result();
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
boolean stitchOperation = requestAndResult.chunksToStitch != null;
if (stitchOperation) {
assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
}
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
long expectedMaxChunkSize;
long expectedTotalSize;
int expectedNumChunks;
if (stitchOperation) {
expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
expectedNumChunks = requestAndResult.chunksToStitch.size();
} else {
expectedMaxChunkSize = chunkSize;
expectedTotalSize = requestAndResult.putContent.length;
expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
}
assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
// Verify all dataBlobIds are DataChunk
for (StoreKey key : dataBlobIds) {
BlobId origDataBlobId = (BlobId) key;
assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
}
// verify user-metadata
if (requestAndResult.putBlobProperties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
}
if (!stitchOperation) {
verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
}
} else {
notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
// TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
// Once the logic is fixed we should assert Simple.
BlobDataType dataType = origBlobId.getBlobDataType();
assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!requestAndResult.putBlobProperties.isEncrypted()) {
assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
decryptedBlobContent.readBytes(blobContent);
assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
decryptedBlobContent.release();
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
Aggregations