use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param blobId the blobId of the blob that is to be verified.
* @param properties the {@link BlobProperties} of the blob that is to be verified
* @param originalPutContent original content of the blob
* @param originalUserMetadata original user-metadata of the blob
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(String blobId, BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest.ReceivedPutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
Assert.assertEquals("Wrong max chunk size in metadata", chunkSize, compositeBlobInfo.getChunkSize());
Assert.assertEquals("Wrong total size in metadata", originalPutContent.length, compositeBlobInfo.getTotalSize());
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
Assert.assertEquals("Number of chunks is not as expected", RouterUtils.getNumChunksForBlobAndChunkSize(originalPutContent.length, chunkSize), dataBlobIds.size());
// verify user-metadata
if (properties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, request.getUsermetadata().array());
}
verifyCompositeBlob(properties, originalPutContent, originalUserMetadata, dataBlobIds, request, serializedRequests);
} else {
notificationBlobType = NotificationBlobType.Simple;
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!properties.isEncrypted()) {
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
Assert.assertArrayEquals("UserMetadata mismatch for simple blob", originalUserMetadata, request.getUsermetadata().array());
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(content), userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), new Callback<DecryptJob.DecryptJobResult>() {
@Override
public void onCompletion(DecryptJob.DecryptJobResult result, Exception exception) {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("Content mismatch", originalPutContent, result.getDecryptedBlobContent().array());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class AmbryRequestsTest method sendAndVerifyOperationRequest.
/**
* Sends and verifies that an operation specific request works correctly.
* @param requestType the type of the request to send.
* @param ids the partitionIds to send requests for.
* @param expectedErrorCode the {@link ServerErrorCode} expected in the response. For some requests this is the
* response in the constituents rather than the actual response ({@link GetResponse} and
* {@link ReplicaMetadataResponse}).
* @throws InterruptedException
* @throws IOException
*/
private void sendAndVerifyOperationRequest(RequestOrResponseType requestType, List<? extends PartitionId> ids, ServerErrorCode expectedErrorCode) throws InterruptedException, IOException {
for (PartitionId id : ids) {
int correlationId = TestUtils.RANDOM.nextInt();
String clientId = UtilsTest.getRandomString(10);
BlobId blobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMapUtils.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), id, false);
RequestOrResponse request;
switch(requestType) {
case PutRequest:
BlobProperties properties = new BlobProperties(0, "serviceId", blobId.getAccountId(), blobId.getAccountId(), false);
request = new PutRequest(correlationId, clientId, blobId, properties, ByteBuffer.allocate(0), ByteBuffer.allocate(0), 0, BlobType.DataBlob, null);
break;
case DeleteRequest:
request = new DeleteRequest(correlationId, clientId, blobId, SystemTime.getInstance().milliseconds());
break;
case GetRequest:
PartitionRequestInfo pRequestInfo = new PartitionRequestInfo(id, Collections.singletonList(blobId));
request = new GetRequest(correlationId, clientId, MessageFormatFlags.All, Collections.singletonList(pRequestInfo), GetOption.Include_All);
break;
case ReplicaMetadataRequest:
ReplicaMetadataRequestInfo rRequestInfo = new ReplicaMetadataRequestInfo(id, FIND_TOKEN_FACTORY.getNewFindToken(), "localhost", "/tmp");
request = new ReplicaMetadataRequest(correlationId, clientId, Collections.singletonList(rRequestInfo), Long.MAX_VALUE);
break;
default:
throw new IllegalArgumentException(requestType + " not supported by this function");
}
storageManager.resetStore();
Response response = sendRequestGetResponse(request, requestType == RequestOrResponseType.GetRequest || requestType == RequestOrResponseType.ReplicaMetadataRequest ? ServerErrorCode.No_Error : expectedErrorCode);
if (expectedErrorCode.equals(ServerErrorCode.No_Error)) {
assertEquals("Operation received at the store not as expected", requestType, MockStorageManager.operationReceived);
}
if (requestType == RequestOrResponseType.GetRequest) {
GetResponse getResponse = (GetResponse) response;
for (PartitionResponseInfo info : getResponse.getPartitionResponseInfoList()) {
assertEquals("Error code does not match expected", expectedErrorCode, info.getErrorCode());
}
} else if (requestType == RequestOrResponseType.ReplicaMetadataRequest) {
ReplicaMetadataResponse replicaMetadataResponse = (ReplicaMetadataResponse) response;
for (ReplicaMetadataResponseInfo info : replicaMetadataResponse.getReplicaMetadataResponseInfoList()) {
assertEquals("Error code does not match expected", expectedErrorCode, info.getError());
}
}
}
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class GetBlobOperationTest method doDirectPut.
/**
* Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
* @param blobType the {@link BlobType} for the blob to upload.
* @param blobContent the raw content for the blob to upload (i.e. this can be serialized composite blob metadata or
* an encrypted blob).
*/
private void doDirectPut(BlobType blobType, ByteBuf blobContent) throws Exception {
List<PartitionId> writablePartitionIds = mockClusterMap.getWritablePartitionIds(null);
PartitionId partitionId = writablePartitionIds.get(random.nextInt(writablePartitionIds.size()));
blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), blobProperties.getAccountId(), blobProperties.getContainerId(), partitionId, blobProperties.isEncrypted(), blobType == BlobType.MetadataBlob ? BlobId.BlobDataType.METADATA : BlobId.BlobDataType.DATACHUNK);
blobIdStr = blobId.getID();
Iterator<MockServer> servers = partitionId.getReplicaIds().stream().map(ReplicaId::getDataNodeId).map(dataNodeId -> mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort())).iterator();
ByteBuffer blobEncryptionKey = null;
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadata);
if (blobProperties.isEncrypted()) {
FutureResult<EncryptJob.EncryptJobResult> futureResult = new FutureResult<>();
cryptoJobHandler.submitJob(new EncryptJob(blobProperties.getAccountId(), blobProperties.getContainerId(), blobType == BlobType.MetadataBlob ? null : blobContent.retainedDuplicate(), userMetadataBuf.duplicate(), kms.getRandomKey(), cryptoService, kms, null, new CryptoJobMetricsTracker(routerMetrics.encryptJobMetrics), futureResult::done));
EncryptJob.EncryptJobResult result = futureResult.get(5, TimeUnit.SECONDS);
blobEncryptionKey = result.getEncryptedKey();
if (blobType != BlobType.MetadataBlob) {
blobContent.release();
blobContent = result.getEncryptedBlobContent();
}
userMetadataBuf = result.getEncryptedUserMetadata();
}
while (servers.hasNext()) {
MockServer server = servers.next();
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), blobType, blobEncryptionKey == null ? null : blobEncryptionKey.duplicate());
// Make sure we release the BoundedNettyByteBufReceive.
server.send(request).release();
request.release();
}
blobContent.release();
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class PutOperationTest method testSendIncomplete.
/**
* Ensure that if any of the requests associated with the buffer of a PutChunk is not completely read out even
* after the associated chunk is complete, the buffer is not reused even though the PutChunk is reused.
*/
@Test
public void testSendIncomplete() throws Exception {
int numChunks = routerConfig.routerMaxInMemPutChunks + 1;
BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
byte[] userMetadata = new byte[10];
byte[] content = new byte[chunkSize * numChunks];
random.nextBytes(content);
ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
FutureResult<String> future = new FutureResult<>();
MockNetworkClient mockNetworkClient = new MockNetworkClient();
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), userMetadata, channel, PutBlobOptions.DEFAULT, future, null, new RouterCallback(mockNetworkClient, new ArrayList<>()), null, null, null, null, time, blobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
List<RequestInfo> requestInfos = new ArrayList<>();
requestRegistrationCallback.setRequestsToSend(requestInfos);
// Since this channel is in memory, one call to fill chunks would end up filling the maximum number of PutChunks.
op.fillChunks();
Assert.assertTrue("ReadyForPollCallback should have been invoked as chunks were fully filled", mockNetworkClient.getAndClearWokenUpStatus());
// A poll should therefore return requestParallelism number of requests from each chunk
op.poll(requestRegistrationCallback);
Assert.assertEquals(routerConfig.routerMaxInMemPutChunks * requestParallelism, requestInfos.size());
// There are routerMaxInMemPutChunks + 1 data chunks for this blob (and a metadata chunk).
// Once the first chunk is completely sent out, the first PutChunk will be reused. What the test verifies is that
// the buffer of the first PutChunk does not get reused. It does this as follows:
// For the first chunk,
// 1. use first request to succeed the chunk (the successTarget is set to 1).
// 2. read and store from the second for comparing later.
// 3. read from the third after the first PutChunk gets reused and ensure that the data from the third is the
// same as from what was saved off from the second. This means that the buffer was not reused by the first
// PutChunk.
// 1.
ResponseInfo responseInfo = getResponseInfo(requestInfos.get(0));
PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(0).getRequest().release();
responseInfo.release();
// 2.
PutRequest putRequest = (PutRequest) requestInfos.get(1).getRequest();
ByteBuffer buf = ByteBuffer.allocate((int) putRequest.sizeInBytes());
ByteBufferChannel bufChannel = new ByteBufferChannel(buf);
// read it out (which also marks this request as complete).
putRequest.writeTo(bufChannel);
putRequest.release();
byte[] expectedRequestContent = buf.array();
// 3.
// first save the third request
PutRequest savedRequest = (PutRequest) requestInfos.get(2).getRequest();
// succeed all the other requests.
for (int i = 3; i < requestInfos.size(); i++) {
responseInfo = getResponseInfo(requestInfos.get(i));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(i).getRequest().release();
responseInfo.release();
}
// fill the first PutChunk with the last chunk.
op.fillChunks();
// Verify that the last chunk was filled.
requestInfos.clear();
op.poll(requestRegistrationCallback);
Assert.assertEquals(1 * requestParallelism, requestInfos.size());
// Verify that the buffer of the third request is not affected.
buf = ByteBuffer.allocate((int) savedRequest.sizeInBytes());
bufChannel = new ByteBufferChannel(buf);
savedRequest.writeTo(bufChannel);
savedRequest.release();
byte[] savedRequestContent = buf.array();
// reset the correlation id as they will be different between the two requests.
resetCorrelationId(expectedRequestContent);
resetCorrelationId(savedRequestContent);
Assert.assertArrayEquals("Underlying buffer should not have be reused", expectedRequestContent, savedRequestContent);
// internal to the chunk (though this can be verified via coverage).
for (int i = 0; i < requestInfos.size(); i++) {
responseInfo = getResponseInfo(requestInfos.get(i));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(i).getRequest().release();
responseInfo.release();
}
requestInfos.clear();
// this should return requests for the metadata chunk
op.poll(requestRegistrationCallback);
Assert.assertEquals(1 * requestParallelism, requestInfos.size());
Assert.assertFalse("Operation should not be complete yet", op.isOperationComplete());
// once the metadata request succeeds, it should complete the operation.
responseInfo = getResponseInfo(requestInfos.get(0));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
responseInfo.release();
requestInfos.forEach(info -> info.getRequest().release());
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class StoredBlob method send.
/**
* Take in a request in the form of {@link Send} and return a response in the form of a
* {@link BoundedNettyByteBufReceive}.
* @param send the request.
* @return the response.
* @throws IOException if there was an error in interpreting the request.
*/
public BoundedNettyByteBufReceive send(Send send) throws IOException {
if (!shouldRespond) {
return null;
}
ServerErrorCode serverError = hardError != null ? hardError : serverErrors.size() > 0 ? serverErrors.poll() : ServerErrorCode.No_Error;
RequestOrResponseType type = ((RequestOrResponse) send).getRequestType();
RequestOrResponse response;
requestCounts.computeIfAbsent(type, k -> new LongAdder()).increment();
switch(type) {
case PutRequest:
response = makePutResponse((PutRequest) send, serverError);
break;
case GetRequest:
response = makeGetResponse((GetRequest) send, serverError);
break;
case DeleteRequest:
response = makeDeleteResponse((DeleteRequest) send, serverError);
break;
case TtlUpdateRequest:
response = makeTtlUpdateResponse((TtlUpdateRequest) send, serverError);
break;
case UndeleteRequest:
response = makeUndeleteResponse((UndeleteRequest) send, serverError);
break;
default:
throw new IOException("Unknown request type received");
}
ByteBufferChannel channel = new ByteBufferChannel(ByteBuffer.allocate((int) response.sizeInBytes()));
response.writeTo(channel);
response.release();
ByteBuffer payload = channel.getBuffer();
payload.flip();
BoundedNettyByteBufReceive receive = new BoundedNettyByteBufReceive(100 * 1024 * 1024);
receive.readFrom(Channels.newChannel(new ByteBufferInputStream(payload)));
return receive;
}
Aggregations