use of com.github.ambry.messageformat.CompositeBlobInfo in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param blobId the blobId of the blob that is to be verified.
* @param properties the {@link BlobProperties} of the blob that is to be verified
* @param originalPutContent original content of the blob
* @param originalUserMetadata original user-metadata of the blob
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(String blobId, BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest.ReceivedPutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
Assert.assertEquals("Wrong max chunk size in metadata", chunkSize, compositeBlobInfo.getChunkSize());
Assert.assertEquals("Wrong total size in metadata", originalPutContent.length, compositeBlobInfo.getTotalSize());
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
Assert.assertEquals("Number of chunks is not as expected", RouterUtils.getNumChunksForBlobAndChunkSize(originalPutContent.length, chunkSize), dataBlobIds.size());
// verify user-metadata
if (properties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, request.getUsermetadata().array());
}
verifyCompositeBlob(properties, originalPutContent, originalUserMetadata, dataBlobIds, request, serializedRequests);
} else {
notificationBlobType = NotificationBlobType.Simple;
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!properties.isEncrypted()) {
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
Assert.assertArrayEquals("UserMetadata mismatch for simple blob", originalUserMetadata, request.getUsermetadata().array());
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(content), userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), new Callback<DecryptJob.DecryptJobResult>() {
@Override
public void onCompletion(DecryptJob.DecryptJobResult result, Exception exception) {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("Content mismatch", originalPutContent, result.getDecryptedBlobContent().array());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.messageformat.CompositeBlobInfo in project ambry by linkedin.
the class BlobIdTransformer method newMessage.
/**
* Creates a Message from the old Message
* input stream, replacing the old store key and account/container IDs
* with a new store key and account/container IDs
* @param inputStream the input stream of the Message
* @param newKey the new StoreKey
* @param oldMessageInfo the {@link MessageInfo} of the message being transformed
* @return new Message message
* @throws Exception
*/
private Message newMessage(InputStream inputStream, StoreKey newKey, MessageInfo oldMessageInfo) throws Exception {
MessageHeader_Format headerFormat = getMessageHeader(inputStream);
storeKeyFactory.getStoreKey(new DataInputStream(inputStream));
BlobId newBlobId = (BlobId) newKey;
if (headerFormat.isPutRecord()) {
if (headerFormat.hasLifeVersion() && headerFormat.getLifeVersion() != oldMessageInfo.getLifeVersion()) {
// The original Put buffer might have lifeVersion as 0, but the message info might have a higher lifeVersion.
logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", headerFormat.getLifeVersion(), oldMessageInfo.getLifeVersion(), oldMessageInfo.getStoreKey());
}
ByteBuffer blobEncryptionKey = null;
if (headerFormat.hasEncryptionKeyRecord()) {
blobEncryptionKey = deserializeBlobEncryptionKey(inputStream);
}
BlobProperties oldProperties = deserializeBlobProperties(inputStream);
ByteBuffer userMetaData = deserializeUserMetadata(inputStream);
BlobData blobData = deserializeBlob(inputStream);
ByteBuf blobDataBytes = blobData.content();
long blobPropertiesSize = oldProperties.getBlobSize();
// will be rewritten with transformed IDs
if (blobData.getBlobType().equals(BlobType.MetadataBlob)) {
ByteBuffer serializedMetadataContent = blobDataBytes.nioBuffer();
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(serializedMetadataContent, storeKeyFactory);
Map<StoreKey, StoreKey> convertedKeys = storeKeyConverter.convert(compositeBlobInfo.getKeys());
List<StoreKey> newKeys = new ArrayList<>();
boolean isOldMetadataKeyDifferentFromNew = !oldMessageInfo.getStoreKey().getID().equals(newKey.getID());
short metadataAccountId = newBlobId.getAccountId();
short metadataContainerId = newBlobId.getContainerId();
for (StoreKey oldDataChunkKey : compositeBlobInfo.getKeys()) {
StoreKey newDataChunkKey = convertedKeys.get(oldDataChunkKey);
if (newDataChunkKey == null) {
throw new IllegalStateException("Found metadata chunk with a deprecated data chunk. " + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
}
if (isOldMetadataKeyDifferentFromNew && newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
throw new IllegalStateException("Found changed metadata chunk with an unchanged data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
}
if (!isOldMetadataKeyDifferentFromNew && !newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
throw new IllegalStateException("Found unchanged metadata chunk with a changed data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkKey.getID());
}
BlobId newDataChunkBlobId = (BlobId) newDataChunkKey;
if (newDataChunkBlobId.getAccountId() != metadataAccountId || newDataChunkBlobId.getContainerId() != metadataContainerId) {
throw new IllegalStateException("Found changed metadata chunk with a datachunk with a different account/container" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkBlobId.getID() + " Metadata AccountId: " + metadataAccountId + " Metadata ContainerId: " + metadataContainerId + " Datachunk AccountId: " + newDataChunkBlobId.getAccountId() + " Datachunk ContainerId: " + newDataChunkBlobId.getContainerId());
}
newKeys.add(newDataChunkKey);
}
ByteBuffer metadataContent;
if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V2) {
metadataContent = MetadataContentSerDe.serializeMetadataContentV2(compositeBlobInfo.getChunkSize(), compositeBlobInfo.getTotalSize(), newKeys);
} else if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V3) {
List<Pair<StoreKey, Long>> keyAndSizeList = new ArrayList<>();
List<CompositeBlobInfo.ChunkMetadata> chunkMetadataList = compositeBlobInfo.getChunkMetadataList();
for (int i = 0; i < newKeys.size(); i++) {
keyAndSizeList.add(new Pair<>(newKeys.get(i), chunkMetadataList.get(i).getSize()));
}
metadataContent = MetadataContentSerDe.serializeMetadataContentV3(compositeBlobInfo.getTotalSize(), keyAndSizeList);
} else {
throw new IllegalStateException("Unexpected metadata content version from composite blob: " + compositeBlobInfo.getMetadataContentVersion());
}
blobPropertiesSize = compositeBlobInfo.getTotalSize();
metadataContent.flip();
blobDataBytes.release();
blobDataBytes = Unpooled.wrappedBuffer(metadataContent);
blobData = new BlobData(blobData.getBlobType(), metadataContent.remaining(), blobDataBytes);
}
BlobProperties newProperties = new BlobProperties(blobPropertiesSize, oldProperties.getServiceId(), oldProperties.getOwnerId(), oldProperties.getContentType(), oldProperties.isPrivate(), oldProperties.getTimeToLiveInSeconds(), oldProperties.getCreationTimeInMs(), newBlobId.getAccountId(), newBlobId.getContainerId(), oldProperties.isEncrypted(), oldProperties.getExternalAssetTag(), oldProperties.getContentEncoding(), oldProperties.getFilename());
// BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
// which is still using java ByteBuffer. So, no need to consider releasing stuff.
// @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
PutMessageFormatInputStream putMessageFormatInputStream = new PutMessageFormatInputStream(newKey, blobEncryptionKey, newProperties, userMetaData, new ByteBufInputStream(blobDataBytes, true), blobData.getSize(), blobData.getBlobType(), oldMessageInfo.getLifeVersion());
// Reuse the original CRC if present in the oldMessageInfo. This is important to ensure that messages that are
// received via replication are sent to the store with proper CRCs (which the store needs to detect duplicate
// messages). As an additional guard, here the original CRC is only reused if the key's ID in string form is the
// same after conversion.
Long originalCrc = oldMessageInfo.getStoreKey().getID().equals(newKey.getID()) ? oldMessageInfo.getCrc() : null;
MessageInfo info = new MessageInfo.Builder(newKey, putMessageFormatInputStream.getSize(), newProperties.getAccountId(), newProperties.getContainerId(), oldMessageInfo.getOperationTimeMs()).isTtlUpdated(oldMessageInfo.isTtlUpdated()).expirationTimeInMs(oldMessageInfo.getExpirationTimeInMs()).crc(originalCrc).lifeVersion(oldMessageInfo.getLifeVersion()).build();
return new Message(info, putMessageFormatInputStream);
} else {
throw new IllegalArgumentException("Only 'put' records are valid");
}
}
use of com.github.ambry.messageformat.CompositeBlobInfo in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param requestAndResult the {@link RequestAndResult} to use for verification.
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
String blobId = requestAndResult.result.result();
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
boolean stitchOperation = requestAndResult.chunksToStitch != null;
if (stitchOperation) {
assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
}
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
long expectedMaxChunkSize;
long expectedTotalSize;
int expectedNumChunks;
if (stitchOperation) {
expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
expectedNumChunks = requestAndResult.chunksToStitch.size();
} else {
expectedMaxChunkSize = chunkSize;
expectedTotalSize = requestAndResult.putContent.length;
expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
}
assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
// Verify all dataBlobIds are DataChunk
for (StoreKey key : dataBlobIds) {
BlobId origDataBlobId = (BlobId) key;
assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
}
// verify user-metadata
if (requestAndResult.putBlobProperties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
}
if (!stitchOperation) {
verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
}
} else {
notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
// TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
// Once the logic is fixed we should assert Simple.
BlobDataType dataType = origBlobId.getBlobDataType();
assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!requestAndResult.putBlobProperties.isEncrypted()) {
assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
decryptedBlobContent.readBytes(blobContent);
assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
decryptedBlobContent.release();
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.messageformat.CompositeBlobInfo in project ambry by linkedin.
the class GetBlobOperationTest method testCompositeBlobRawMode.
/**
* Test gets of composite blob in raw mode.
*/
@Test
public void testCompositeBlobRawMode() throws Exception {
options = new GetBlobOptionsInternal(new GetBlobOptionsBuilder().operationType(GetBlobOptions.OperationType.All).rawMode(true).build(), false, routerMetrics.ageAtGet);
for (int numChunks = 2; numChunks < 10; numChunks++) {
blobSize = numChunks * maxChunkSize;
doPut();
if (testEncryption) {
getAndAssertSuccess();
ByteBuffer payload = getBlobBuffer();
// extract chunk ids
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(new ByteArrayInputStream(payload.array()), blobIdFactory);
ByteBuf metadataBuffer = blobAll.getBlobData().content();
try {
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(metadataBuffer.nioBuffer(), blobIdFactory);
Assert.assertEquals("Total size didn't match", blobSize, compositeBlobInfo.getTotalSize());
Assert.assertEquals("Chunk count didn't match", numChunks, compositeBlobInfo.getKeys().size());
} finally {
metadataBuffer.release();
}
// TODO; test raw get on each chunk (needs changes to test framework)
} else {
// Only supported for encrypted blobs now
}
}
}
Aggregations