use of com.github.ambry.messageformat.PutMessageFormatInputStream in project ambry by linkedin.
the class ReplicationTest method getPutMessage.
/**
* Constructs an entire message with header, blob properties, user metadata and blob content.
* @param id id for which the message has to be constructed.
* @param accountId accountId of the blob
* @param containerId containerId of the blob
* @param enableEncryption {@code true} if encryption needs to be enabled. {@code false} otherwise
* @return a {@link Pair} of {@link ByteBuffer} and {@link MessageInfo} representing the entire message and the
* associated {@link MessageInfo}
* @throws MessageFormatException
* @throws IOException
*/
private Pair<ByteBuffer, MessageInfo> getPutMessage(StoreKey id, short accountId, short containerId, boolean enableEncryption) throws MessageFormatException, IOException {
int blobSize = TestUtils.RANDOM.nextInt(500) + 501;
int userMetadataSize = TestUtils.RANDOM.nextInt(blobSize / 2);
int encryptionKeySize = TestUtils.RANDOM.nextInt(blobSize / 4);
byte[] blob = new byte[blobSize];
byte[] usermetadata = new byte[userMetadataSize];
byte[] encryptionKey = enableEncryption ? new byte[encryptionKeySize] : null;
TestUtils.RANDOM.nextBytes(blob);
TestUtils.RANDOM.nextBytes(usermetadata);
BlobProperties blobProperties = new BlobProperties(blobSize, "test", accountId, containerId, encryptionKey != null);
MessageFormatInputStream stream = new PutMessageFormatInputStream(id, encryptionKey == null ? null : ByteBuffer.wrap(encryptionKey), blobProperties, ByteBuffer.wrap(usermetadata), new ByteBufferInputStream(ByteBuffer.wrap(blob)), blobSize);
byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
return new Pair<>(ByteBuffer.wrap(message), new MessageInfo(id, message.length, Utils.Infinite_Time, accountId, containerId, blobProperties.getCreationTimeInMs()));
}
use of com.github.ambry.messageformat.PutMessageFormatInputStream in project ambry by linkedin.
the class BlobIdTransformer method newMessage.
/**
* Creates a Message from the old Message
* input stream, replacing the old store key and account/container IDs
* with a new store key and account/container IDs
* @param inputStream the input stream of the Message
* @param newKey the new StoreKey
* @param oldMessageInfo the {@link MessageInfo} of the message being transformed
* @return new Message message
* @throws Exception
*/
private Message newMessage(InputStream inputStream, StoreKey newKey, MessageInfo oldMessageInfo) throws Exception {
MessageHeader_Format headerFormat = getMessageHeader(inputStream);
storeKeyFactory.getStoreKey(new DataInputStream(inputStream));
BlobId newBlobId = (BlobId) newKey;
if (headerFormat.isPutRecord()) {
if (headerFormat.hasLifeVersion() && headerFormat.getLifeVersion() != oldMessageInfo.getLifeVersion()) {
// The original Put buffer might have lifeVersion as 0, but the message info might have a higher lifeVersion.
logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", headerFormat.getLifeVersion(), oldMessageInfo.getLifeVersion(), oldMessageInfo.getStoreKey());
}
ByteBuffer blobEncryptionKey = null;
if (headerFormat.hasEncryptionKeyRecord()) {
blobEncryptionKey = deserializeBlobEncryptionKey(inputStream);
}
BlobProperties oldProperties = deserializeBlobProperties(inputStream);
ByteBuffer userMetaData = deserializeUserMetadata(inputStream);
BlobData blobData = deserializeBlob(inputStream);
ByteBuf blobDataBytes = blobData.content();
long blobPropertiesSize = oldProperties.getBlobSize();
// will be rewritten with transformed IDs
if (blobData.getBlobType().equals(BlobType.MetadataBlob)) {
ByteBuffer serializedMetadataContent = blobDataBytes.nioBuffer();
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(serializedMetadataContent, storeKeyFactory);
Map<StoreKey, StoreKey> convertedKeys = storeKeyConverter.convert(compositeBlobInfo.getKeys());
List<StoreKey> newKeys = new ArrayList<>();
boolean isOldMetadataKeyDifferentFromNew = !oldMessageInfo.getStoreKey().getID().equals(newKey.getID());
short metadataAccountId = newBlobId.getAccountId();
short metadataContainerId = newBlobId.getContainerId();
for (StoreKey oldDataChunkKey : compositeBlobInfo.getKeys()) {
StoreKey newDataChunkKey = convertedKeys.get(oldDataChunkKey);
if (newDataChunkKey == null) {
throw new IllegalStateException("Found metadata chunk with a deprecated data chunk. " + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
}
if (isOldMetadataKeyDifferentFromNew && newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
throw new IllegalStateException("Found changed metadata chunk with an unchanged data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
}
if (!isOldMetadataKeyDifferentFromNew && !newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
throw new IllegalStateException("Found unchanged metadata chunk with a changed data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkKey.getID());
}
BlobId newDataChunkBlobId = (BlobId) newDataChunkKey;
if (newDataChunkBlobId.getAccountId() != metadataAccountId || newDataChunkBlobId.getContainerId() != metadataContainerId) {
throw new IllegalStateException("Found changed metadata chunk with a datachunk with a different account/container" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkBlobId.getID() + " Metadata AccountId: " + metadataAccountId + " Metadata ContainerId: " + metadataContainerId + " Datachunk AccountId: " + newDataChunkBlobId.getAccountId() + " Datachunk ContainerId: " + newDataChunkBlobId.getContainerId());
}
newKeys.add(newDataChunkKey);
}
ByteBuffer metadataContent;
if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V2) {
metadataContent = MetadataContentSerDe.serializeMetadataContentV2(compositeBlobInfo.getChunkSize(), compositeBlobInfo.getTotalSize(), newKeys);
} else if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V3) {
List<Pair<StoreKey, Long>> keyAndSizeList = new ArrayList<>();
List<CompositeBlobInfo.ChunkMetadata> chunkMetadataList = compositeBlobInfo.getChunkMetadataList();
for (int i = 0; i < newKeys.size(); i++) {
keyAndSizeList.add(new Pair<>(newKeys.get(i), chunkMetadataList.get(i).getSize()));
}
metadataContent = MetadataContentSerDe.serializeMetadataContentV3(compositeBlobInfo.getTotalSize(), keyAndSizeList);
} else {
throw new IllegalStateException("Unexpected metadata content version from composite blob: " + compositeBlobInfo.getMetadataContentVersion());
}
blobPropertiesSize = compositeBlobInfo.getTotalSize();
metadataContent.flip();
blobDataBytes.release();
blobDataBytes = Unpooled.wrappedBuffer(metadataContent);
blobData = new BlobData(blobData.getBlobType(), metadataContent.remaining(), blobDataBytes);
}
BlobProperties newProperties = new BlobProperties(blobPropertiesSize, oldProperties.getServiceId(), oldProperties.getOwnerId(), oldProperties.getContentType(), oldProperties.isPrivate(), oldProperties.getTimeToLiveInSeconds(), oldProperties.getCreationTimeInMs(), newBlobId.getAccountId(), newBlobId.getContainerId(), oldProperties.isEncrypted(), oldProperties.getExternalAssetTag(), oldProperties.getContentEncoding(), oldProperties.getFilename());
// BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
// which is still using java ByteBuffer. So, no need to consider releasing stuff.
// @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
PutMessageFormatInputStream putMessageFormatInputStream = new PutMessageFormatInputStream(newKey, blobEncryptionKey, newProperties, userMetaData, new ByteBufInputStream(blobDataBytes, true), blobData.getSize(), blobData.getBlobType(), oldMessageInfo.getLifeVersion());
// Reuse the original CRC if present in the oldMessageInfo. This is important to ensure that messages that are
// received via replication are sent to the store with proper CRCs (which the store needs to detect duplicate
// messages). As an additional guard, here the original CRC is only reused if the key's ID in string form is the
// same after conversion.
Long originalCrc = oldMessageInfo.getStoreKey().getID().equals(newKey.getID()) ? oldMessageInfo.getCrc() : null;
MessageInfo info = new MessageInfo.Builder(newKey, putMessageFormatInputStream.getSize(), newProperties.getAccountId(), newProperties.getContainerId(), oldMessageInfo.getOperationTimeMs()).isTtlUpdated(oldMessageInfo.isTtlUpdated()).expirationTimeInMs(oldMessageInfo.getExpirationTimeInMs()).crc(originalCrc).lifeVersion(oldMessageInfo.getLifeVersion()).build();
return new Message(info, putMessageFormatInputStream);
} else {
throw new IllegalArgumentException("Only 'put' records are valid");
}
}
use of com.github.ambry.messageformat.PutMessageFormatInputStream in project ambry by linkedin.
the class ReplicationTestHelper method createPutMessage.
/**
* Constructs an entire message with header, blob properties, user metadata and blob content.
* @param id id for which the message has to be constructed.
* @param accountId accountId of the blob
* @param containerId containerId of the blob
* @param enableEncryption {@code true} if encryption needs to be enabled. {@code false} otherwise
* @param lifeVersion lifeVersion for this hich the message has to be constructed.
* @return a {@link Pair} of {@link ByteBuffer} and {@link MessageInfo} representing the entire message and the
* associated {@link MessageInfo}
* @throws MessageFormatException
* @throws IOException
*/
public static PutMsgInfoAndBuffer createPutMessage(StoreKey id, short accountId, short containerId, boolean enableEncryption, short lifeVersion) throws MessageFormatException, IOException {
Random blobIdRandom = new Random(id.getID().hashCode());
int blobSize = blobIdRandom.nextInt(500) + 501;
int userMetadataSize = blobIdRandom.nextInt(blobSize / 2);
int encryptionKeySize = blobIdRandom.nextInt(blobSize / 4);
byte[] blob = new byte[blobSize];
byte[] usermetadata = new byte[userMetadataSize];
byte[] encryptionKey = enableEncryption ? new byte[encryptionKeySize] : null;
blobIdRandom.nextBytes(blob);
blobIdRandom.nextBytes(usermetadata);
BlobProperties blobProperties = new BlobProperties(blobSize, "test", null, null, false, EXPIRY_TIME_MS - CONSTANT_TIME_MS, CONSTANT_TIME_MS, accountId, containerId, encryptionKey != null, null, null, null);
MessageFormatInputStream stream = new PutMessageFormatInputStream(id, encryptionKey == null ? null : ByteBuffer.wrap(encryptionKey), blobProperties, ByteBuffer.wrap(usermetadata), new ByteBufferInputStream(ByteBuffer.wrap(blob)), blobSize, BlobType.DataBlob, lifeVersion);
byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
return new PutMsgInfoAndBuffer(ByteBuffer.wrap(message), new MessageInfo(id, message.length, false, false, false, EXPIRY_TIME_MS, null, accountId, containerId, CONSTANT_TIME_MS, lifeVersion));
}
use of com.github.ambry.messageformat.PutMessageFormatInputStream in project ambry by linkedin.
the class VcrRecoveryTest method cloudRecoveryTestForLargeBlob.
/**
* Test recovery from one vcr node to one disk based data node for large blobs.
* Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
* @throws Exception If an exception happens.
*/
@Test
public void cloudRecoveryTestForLargeBlob() throws Exception {
// Create blobs and upload to cloud destination.
int userMetaDataSize = 100;
byte[] userMetadata = new byte[userMetaDataSize];
TestUtils.RANDOM.nextBytes(userMetadata);
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
// Currently ambry supports max size of 4MB for blobs.
int blobSize = FOUR_MB_SZ;
for (BlobId blobId : blobIds) {
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test recovery by sending get request to recovery node
testGetOnRecoveryNode(blobIdToSizeMap);
}
use of com.github.ambry.messageformat.PutMessageFormatInputStream in project ambry by linkedin.
the class VcrRecoveryTest method basicCloudRecoveryTest.
/**
* Test recovery from one vcr node to one disk based data node.
* Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
* @throws Exception If an exception happens.
*/
@Test
public void basicCloudRecoveryTest() throws Exception {
// Create blobs and upload to cloud destination.
int userMetaDataSize = 100;
byte[] userMetadata = new byte[userMetaDataSize];
TestUtils.RANDOM.nextBytes(userMetadata);
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
for (BlobId blobId : blobIds) {
int blobSize = Utils.getRandomShort(TestUtils.RANDOM);
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test recovery by sending get request to recovery node
testGetOnRecoveryNode(blobIdToSizeMap);
}
Aggregations