use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class CloudOperationTest method doDirectPut.
/**
* Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
* @return the blob id
* @throws Exception Any unexpected exception
*/
private String doDirectPut() throws Exception {
// a blob size that is greater than the maxChunkSize and is not a multiple of it. Will result in a composite blob.
int blobSize = maxChunkSize * random.nextInt(10) + random.nextInt(maxChunkSize - 1) + 1;
BlobProperties blobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(random), Utils.getRandomShort(random), false, null, null, null);
byte[] userMetadata = new byte[10];
random.nextBytes(userMetadata);
byte[] putContent = new byte[blobSize];
random.nextBytes(putContent);
ByteBuf putContentBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(blobSize);
putContentBuf.writeBytes(putContent);
BlobId blobId = doDirectPut(blobProperties, userMetadata, putContentBuf.retainedDuplicate());
putContentBuf.release();
return blobId.getID();
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class GetBlobOperationTest method doPut.
/**
* Generates random content, and does a single put of the content, and saves the blob id string returned. The tests
* use this blob id string to perform the gets. Tests asserting success compare the contents of the returned blob
* with the content that is generated within this method.
* @throws Exception
*/
private void doPut() throws Exception {
blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(random), Utils.getRandomShort(random), testEncryption, null, null, null);
userMetadata = new byte[10];
random.nextBytes(userMetadata);
putContent = new byte[blobSize];
random.nextBytes(putContent);
ReadableStreamChannel putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
// TODO fix null quota charge event listener
blobIdStr = router.putBlob(blobProperties, userMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
blobId = RouterUtils.getBlobIdFromString(blobIdStr, mockClusterMap);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class GetBlobOperationTest method getAndAssertSuccess.
/**
* Construct GetBlob operations with appropriate callbacks, then poll those operations until they complete,
* and ensure that the whole blob data is read out and the contents match.
* @param getChunksBeforeRead {@code true} if all chunks should be cached by the router before reading from the
* stream.
* @param initiateReadBeforeChunkGet Whether readInto() should be initiated immediately before data chunks are
* fetched by the router to simulate chunk arrival delay.
* @param expectedLifeVersion the expected lifeVersion from get operation.
*/
private void getAndAssertSuccess(final boolean getChunksBeforeRead, final boolean initiateReadBeforeChunkGet, short expectedLifeVersion) throws Exception {
final CountDownLatch readCompleteLatch = new CountDownLatch(1);
final AtomicReference<Throwable> readCompleteThrowable = new AtomicReference<>(null);
final AtomicLong readCompleteResult = new AtomicLong(0);
final AtomicReference<Exception> operationException = new AtomicReference<>(null);
final int numChunks = ((blobSize + maxChunkSize - 1) / maxChunkSize) + (blobSize > maxChunkSize ? 1 : 0);
mockNetworkClient.resetProcessedResponseCount();
Callback<GetBlobResultInternal> callback = (result, exception) -> {
if (exception != null) {
operationException.set(exception);
readCompleteLatch.countDown();
} else {
try {
if (options.getChunkIdsOnly) {
Assert.assertNull("Unexpected blob result when getChunkIdsOnly", result.getBlobResult);
if (blobSize > maxChunkSize) {
// CompositeBlob
Assert.assertNotNull("CompositeBlob should return a list of blob ids when getting chunk ids", result.storeKeys);
Assert.assertEquals(result.storeKeys.size(), (blobSize + maxChunkSize - 1) / maxChunkSize);
} else {
// SimpleBlob
Assert.assertNull("Unexpected list of blob id when getChunkIdsOnly is true on a simple blob", result.storeKeys);
}
readCompleteLatch.countDown();
return;
}
BlobInfo blobInfo;
switch(options.getBlobOptions.getOperationType()) {
case All:
if (!options.getBlobOptions.isRawMode()) {
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(blobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", blobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertArrayEquals("User metadata must be the same", userMetadata, blobInfo.getUserMetadata());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
}
break;
case Data:
Assert.assertNull("Unexpected blob info in operation result", result.getBlobResult.getBlobInfo());
break;
case BlobInfo:
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(blobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", blobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertNull("Unexpected blob data in operation result", result.getBlobResult.getBlobDataChannel());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
}
} catch (Throwable e) {
readCompleteThrowable.set(e);
}
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo) {
final ByteBufferAsyncWritableChannel asyncWritableChannel = new ByteBufferAsyncWritableChannel();
final Future<Long> preSetReadIntoFuture = initiateReadBeforeChunkGet ? result.getBlobResult.getBlobDataChannel().readInto(asyncWritableChannel, null) : null;
Utils.newThread(() -> {
if (getChunksBeforeRead) {
// wait for all chunks (data + metadata) to be received
while (mockNetworkClient.getProcessedResponseCount() < numChunks * routerConfig.routerGetRequestParallelism) {
Thread.yield();
}
}
Future<Long> readIntoFuture = initiateReadBeforeChunkGet ? preSetReadIntoFuture : result.getBlobResult.getBlobDataChannel().readInto(asyncWritableChannel, null);
assertBlobReadSuccess(options.getBlobOptions, readIntoFuture, asyncWritableChannel, result.getBlobResult.getBlobDataChannel(), readCompleteLatch, readCompleteResult, readCompleteThrowable);
}, false).start();
} else {
readCompleteLatch.countDown();
}
}
};
GetBlobOperation op = createOperationAndComplete(callback);
readCompleteLatch.await();
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
if (operationException.get() != null) {
throw operationException.get();
}
if (readCompleteThrowable.get() != null) {
throw new IllegalStateException(readCompleteThrowable.get());
}
// Ensure that a ChannelClosed exception is not set when the ReadableStreamChannel is closed correctly.
Assert.assertNull("Callback operation exception should be null", op.getOperationException());
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo && !options.getBlobOptions.isRawMode() && !options.getChunkIdsOnly) {
int sizeWritten = blobSize;
if (options.getBlobOptions.getRange() != null) {
ByteRange range = options.getBlobOptions.getRange().toResolvedByteRange(blobSize, options.getBlobOptions.resolveRangeOnEmptyBlob());
sizeWritten = (int) range.getRangeSize();
}
Assert.assertEquals("Size read must equal size written", sizeWritten, readCompleteResult.get());
}
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class GetBlobOperationTest method testBlobSizeReplacement.
/**
* A past issue with replication logic resulted in the blob size listed in the blob properties reflecting the size
* of a chunk's content buffer instead of the plaintext size of the entire blob. This issue affects composite blobs
* and simple encrypted blob. This test tests the router's ability to replace the incorrect blob size field in the
* blob properties with the inferred correct size.
* @throws Exception
*/
@Test
public void testBlobSizeReplacement() throws Exception {
userMetadata = new byte[10];
random.nextBytes(userMetadata);
options = new GetBlobOptionsInternal(new GetBlobOptionsBuilder().operationType(GetBlobOptions.OperationType.BlobInfo).build(), false, routerMetrics.ageAtGet);
// test simple blob case
blobSize = maxChunkSize;
putContent = new byte[blobSize];
random.nextBytes(putContent);
blobProperties = new BlobProperties(blobSize + 20, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(random), Utils.getRandomShort(random), testEncryption, null, null, null);
ByteBuf putContentBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(blobSize);
putContentBuf.writeBytes(putContent);
doDirectPut(BlobType.DataBlob, putContentBuf.retainedDuplicate());
putContentBuf.release();
Counter sizeMismatchCounter = (testEncryption ? routerMetrics.simpleEncryptedBlobSizeMismatchCount : routerMetrics.simpleUnencryptedBlobSizeMismatchCount);
long startCount = sizeMismatchCounter.getCount();
getAndAssertSuccess();
long endCount = sizeMismatchCounter.getCount();
Assert.assertEquals("Wrong number of blob size mismatches", 1, endCount - startCount);
// test composite blob case
int numChunks = 3;
blobSize = maxChunkSize;
List<StoreKey> storeKeys = new ArrayList<>(numChunks);
for (int i = 0; i < numChunks; i++) {
doPut();
storeKeys.add(blobId);
}
blobSize = maxChunkSize * numChunks;
ByteBuffer metadataContent = MetadataContentSerDe.serializeMetadataContentV2(maxChunkSize, blobSize, storeKeys);
metadataContent.flip();
blobProperties = new BlobProperties(blobSize - 20, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(random), Utils.getRandomShort(random), testEncryption, null, null, null);
ByteBuf metadataContentBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(metadataContent.remaining());
metadataContentBuf.writeBytes(metadataContent.duplicate());
doDirectPut(BlobType.MetadataBlob, metadataContentBuf.retainedDuplicate());
metadataContentBuf.release();
startCount = routerMetrics.compositeBlobSizeMismatchCount.getCount();
getAndAssertSuccess();
endCount = routerMetrics.compositeBlobSizeMismatchCount.getCount();
Assert.assertEquals("Wrong number of blob size mismatches", 1, endCount - startCount);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class StoredBlob method makeGetResponse.
/**
* Make a {@link GetResponse} for the given {@link GetRequest} for which the given {@link ServerErrorCode} was
* encountered. The request could be for BlobInfo or for Blob (the only two options that the router would request
* for).
* @param getRequest the {@link GetRequest} for which the response is being constructed.
* @param getError the {@link ServerErrorCode} that was encountered.
* @return the constructed {@link GetResponse}
* @throws IOException if there was an error constructing the response.
*/
GetResponse makeGetResponse(GetRequest getRequest, ServerErrorCode getError) throws IOException {
GetResponse getResponse;
if (getError == ServerErrorCode.No_Error) {
List<PartitionRequestInfo> infos = getRequest.getPartitionInfoList();
if (infos.size() != 1 || infos.get(0).getBlobIds().size() != 1) {
getError = ServerErrorCode.Unknown_Error;
}
}
ServerErrorCode serverError;
ServerErrorCode partitionError;
boolean isDataBlob = false;
try {
String id = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0).getID();
isDataBlob = blobs.get(id).type == BlobType.DataBlob;
} catch (Exception ignored) {
}
if (!getErrorOnDataBlobOnly || isDataBlob) {
// set it in the partitionResponseInfo
if (getError == ServerErrorCode.No_Error || getError == ServerErrorCode.Blob_Expired || getError == ServerErrorCode.Blob_Deleted || getError == ServerErrorCode.Blob_Not_Found || getError == ServerErrorCode.Blob_Authorization_Failure || getError == ServerErrorCode.Disk_Unavailable) {
partitionError = getError;
serverError = ServerErrorCode.No_Error;
} else {
serverError = getError;
// does not matter - this will not be checked if serverError is not No_Error.
partitionError = ServerErrorCode.No_Error;
}
} else {
serverError = ServerErrorCode.No_Error;
partitionError = ServerErrorCode.No_Error;
}
if (serverError == ServerErrorCode.No_Error) {
int byteBufferSize;
ByteBuffer byteBuffer;
StoreKey key = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0);
short accountId = Account.UNKNOWN_ACCOUNT_ID;
short containerId = Container.UNKNOWN_CONTAINER_ID;
long operationTimeMs = Utils.Infinite_Time;
StoredBlob blob = blobs.get(key.getID());
ServerErrorCode processedError = errorForGet(key.getID(), blob, getRequest);
MessageMetadata msgMetadata = null;
if (processedError == ServerErrorCode.No_Error) {
ByteBuffer buf = blobs.get(key.getID()).serializedSentPutRequest.duplicate();
// read off the size
buf.getLong();
// read off the type.
buf.getShort();
PutRequest originalBlobPutReq = PutRequest.readFrom(new DataInputStream(new ByteBufferInputStream(buf)), clusterMap);
switch(getRequest.getMessageFormatFlag()) {
case BlobInfo:
BlobProperties blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
operationTimeMs = blobProperties.getCreationTimeInMs();
ByteBuffer userMetadata = originalBlobPutReq.getUsermetadata();
byteBufferSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties) + MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
byteBuffer = ByteBuffer.allocate(byteBufferSize);
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
break;
case Blob:
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
Crc32 crc = new Crc32();
crc.update(byteBuffer.array(), 0, byteBuffer.position());
byteBuffer.putLong(crc.getValue());
break;
case All:
blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
userMetadata = originalBlobPutReq.getUsermetadata();
operationTimeMs = originalBlobPutReq.getBlobProperties().getCreationTimeInMs();
int blobHeaderSize = MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize();
int blobEncryptionRecordSize = originalBlobPutReq.getBlobEncryptionKey() != null ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(originalBlobPutReq.getBlobEncryptionKey().duplicate()) : 0;
int blobPropertiesSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
int blobInfoSize = blobPropertiesSize + userMetadataSize;
int blobRecordSize;
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
case MessageFormatRecord.Blob_Version_V1:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBufferSize = blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize + blobRecordSize;
byteBuffer = ByteBuffer.allocate(byteBufferSize);
try {
MessageFormatRecord.MessageHeader_Format_V2.serializeHeader(byteBuffer, blobEncryptionRecordSize + blobInfoSize + blobRecordSize, originalBlobPutReq.getBlobEncryptionKey() == null ? Message_Header_Invalid_Relative_Offset : blobHeaderSize + key.sizeInBytes(), blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize, Message_Header_Invalid_Relative_Offset, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobPropertiesSize, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize);
} catch (MessageFormatException e) {
e.printStackTrace();
}
byteBuffer.put(key.toBytes());
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
MessageFormatRecord.BlobEncryptionKey_Format_V1.serializeBlobEncryptionKeyRecord(byteBuffer, originalBlobPutReq.getBlobEncryptionKey().duplicate());
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
int blobRecordStart = byteBuffer.position();
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
crc = new Crc32();
crc.update(byteBuffer.array(), blobRecordStart, blobRecordSize - MessageFormatRecord.Crc_Size);
byteBuffer.putLong(crc.getValue());
break;
default:
throw new IOException("GetRequest flag is not supported: " + getRequest.getMessageFormatFlag());
}
} else if (processedError == ServerErrorCode.Blob_Deleted) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Deleted;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Expired) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Expired;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Authorization_Failure) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Authorization_Failure;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Not_Found;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
}
byteBuffer.flip();
ByteBufferSend responseSend = new ByteBufferSend(byteBuffer);
List<MessageInfo> messageInfoList = new ArrayList<>();
List<MessageMetadata> messageMetadataList = new ArrayList<>();
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
if (partitionError == ServerErrorCode.No_Error) {
messageInfoList.add(new MessageInfo(key, byteBufferSize, false, blob.isTtlUpdated(), blob.isUndeleted(), blob.expiresAt, null, accountId, containerId, operationTimeMs, blob.lifeVersion));
messageMetadataList.add(msgMetadata);
}
PartitionResponseInfo partitionResponseInfo = partitionError == ServerErrorCode.No_Error ? new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), messageInfoList, messageMetadataList) : new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), partitionError);
partitionResponseInfoList.add(partitionResponseInfo);
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, responseSend, serverError);
} else {
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), new ArrayList<PartitionResponseInfo>(), new ByteBufferSend(ByteBuffer.allocate(0)), serverError);
}
return getResponse;
}
Aggregations