Search in sources :

Example 1 with PutRequest

use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.

the class MockReadableStreamChannel method verifyBlob.

/**
 * Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
 * data as the original object that was put.
 * @param blobId the blobId of the blob that is to be verified.
 * @param properties the {@link BlobProperties} of the blob that is to be verified
 * @param originalPutContent original content of the blob
 * @param originalUserMetadata original user-metadata of the blob
 * @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
 */
private void verifyBlob(String blobId, BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
    ByteBuffer serializedRequest = serializedRequests.get(blobId);
    PutRequest.ReceivedPutRequest request = deserializePutRequest(serializedRequest);
    NotificationBlobType notificationBlobType;
    if (request.getBlobType() == BlobType.MetadataBlob) {
        notificationBlobType = NotificationBlobType.Composite;
        byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
        Assert.assertEquals("Wrong max chunk size in metadata", chunkSize, compositeBlobInfo.getChunkSize());
        Assert.assertEquals("Wrong total size in metadata", originalPutContent.length, compositeBlobInfo.getTotalSize());
        List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
        Assert.assertEquals("Number of chunks is not as expected", RouterUtils.getNumChunksForBlobAndChunkSize(originalPutContent.length, chunkSize), dataBlobIds.size());
        // verify user-metadata
        if (properties.isEncrypted()) {
            ByteBuffer userMetadata = request.getUsermetadata();
            BlobId origBlobId = new BlobId(blobId, mockClusterMap);
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                Assert.assertNull("Exception should not be thrown", exception);
                Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
            }).run();
        } else {
            Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, request.getUsermetadata().array());
        }
        verifyCompositeBlob(properties, originalPutContent, originalUserMetadata, dataBlobIds, request, serializedRequests);
    } else {
        notificationBlobType = NotificationBlobType.Simple;
        byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        if (!properties.isEncrypted()) {
            Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
            Assert.assertArrayEquals("UserMetadata mismatch for simple blob", originalUserMetadata, request.getUsermetadata().array());
            notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
        } else {
            ByteBuffer userMetadata = request.getUsermetadata();
            BlobId origBlobId = new BlobId(blobId, mockClusterMap);
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(content), userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), new Callback<DecryptJob.DecryptJobResult>() {

                @Override
                public void onCompletion(DecryptJob.DecryptJobResult result, Exception exception) {
                    Assert.assertNull("Exception should not be thrown", exception);
                    Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                    Assert.assertArrayEquals("Content mismatch", originalPutContent, result.getDecryptedBlobContent().array());
                    Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
                }
            }).run();
        }
    }
    notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
Also used : DataInputStream(java.io.DataInputStream) Arrays(java.util.Arrays) ServerErrorCode(com.github.ambry.commons.ServerErrorCode) BlobProperties(com.github.ambry.messageformat.BlobProperties) DataNodeId(com.github.ambry.clustermap.DataNodeId) RunWith(org.junit.runner.RunWith) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) HashMap(java.util.HashMap) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) GeneralSecurityException(java.security.GeneralSecurityException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) SystemTime(com.github.ambry.utils.SystemTime) PutRequest(com.github.ambry.protocol.PutRequest) Parameterized(org.junit.runners.Parameterized) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Set(java.util.Set) MetadataContentSerDe(com.github.ambry.messageformat.MetadataContentSerDe) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) TimeUnit(java.util.concurrent.TimeUnit) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) KMSConfig(com.github.ambry.config.KMSConfig) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobType(com.github.ambry.messageformat.BlobType) Assert(org.junit.Assert) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlobId(com.github.ambry.commons.BlobId) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) InputStream(java.io.InputStream) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) PutRequest(com.github.ambry.protocol.PutRequest) ByteBuffer(java.nio.ByteBuffer) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) StoreKey(com.github.ambry.store.StoreKey) GeneralSecurityException(java.security.GeneralSecurityException) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobId(com.github.ambry.commons.BlobId)

Example 2 with PutRequest

use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.

the class AmbryRequestsTest method sendAndVerifyOperationRequest.

/**
 * Sends and verifies that an operation specific request works correctly.
 * @param requestType the type of the request to send.
 * @param ids the partitionIds to send requests for.
 * @param expectedErrorCode the {@link ServerErrorCode} expected in the response. For some requests this is the
 *                          response in the constituents rather than the actual response ({@link GetResponse} and
 *                          {@link ReplicaMetadataResponse}).
 * @throws InterruptedException
 * @throws IOException
 */
private void sendAndVerifyOperationRequest(RequestOrResponseType requestType, List<? extends PartitionId> ids, ServerErrorCode expectedErrorCode) throws InterruptedException, IOException {
    for (PartitionId id : ids) {
        int correlationId = TestUtils.RANDOM.nextInt();
        String clientId = UtilsTest.getRandomString(10);
        BlobId blobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMapUtils.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), id, false);
        RequestOrResponse request;
        switch(requestType) {
            case PutRequest:
                BlobProperties properties = new BlobProperties(0, "serviceId", blobId.getAccountId(), blobId.getAccountId(), false);
                request = new PutRequest(correlationId, clientId, blobId, properties, ByteBuffer.allocate(0), ByteBuffer.allocate(0), 0, BlobType.DataBlob, null);
                break;
            case DeleteRequest:
                request = new DeleteRequest(correlationId, clientId, blobId, SystemTime.getInstance().milliseconds());
                break;
            case GetRequest:
                PartitionRequestInfo pRequestInfo = new PartitionRequestInfo(id, Collections.singletonList(blobId));
                request = new GetRequest(correlationId, clientId, MessageFormatFlags.All, Collections.singletonList(pRequestInfo), GetOption.Include_All);
                break;
            case ReplicaMetadataRequest:
                ReplicaMetadataRequestInfo rRequestInfo = new ReplicaMetadataRequestInfo(id, FIND_TOKEN_FACTORY.getNewFindToken(), "localhost", "/tmp");
                request = new ReplicaMetadataRequest(correlationId, clientId, Collections.singletonList(rRequestInfo), Long.MAX_VALUE);
                break;
            default:
                throw new IllegalArgumentException(requestType + " not supported by this function");
        }
        storageManager.resetStore();
        Response response = sendRequestGetResponse(request, requestType == RequestOrResponseType.GetRequest || requestType == RequestOrResponseType.ReplicaMetadataRequest ? ServerErrorCode.No_Error : expectedErrorCode);
        if (expectedErrorCode.equals(ServerErrorCode.No_Error)) {
            assertEquals("Operation received at the store not as expected", requestType, MockStorageManager.operationReceived);
        }
        if (requestType == RequestOrResponseType.GetRequest) {
            GetResponse getResponse = (GetResponse) response;
            for (PartitionResponseInfo info : getResponse.getPartitionResponseInfoList()) {
                assertEquals("Error code does not match expected", expectedErrorCode, info.getErrorCode());
            }
        } else if (requestType == RequestOrResponseType.ReplicaMetadataRequest) {
            ReplicaMetadataResponse replicaMetadataResponse = (ReplicaMetadataResponse) response;
            for (ReplicaMetadataResponseInfo info : replicaMetadataResponse.getReplicaMetadataResponseInfoList()) {
                assertEquals("Error code does not match expected", expectedErrorCode, info.getError());
            }
        }
    }
}
Also used : ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) PutRequest(com.github.ambry.protocol.PutRequest) PartitionId(com.github.ambry.clustermap.PartitionId) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) CatchupStatusAdminResponse(com.github.ambry.protocol.CatchupStatusAdminResponse) GetResponse(com.github.ambry.protocol.GetResponse) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) AdminResponse(com.github.ambry.protocol.AdminResponse) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) Response(com.github.ambry.protocol.Response) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) ReplicaMetadataRequest(com.github.ambry.protocol.ReplicaMetadataRequest) ReplicaMetadataRequestInfo(com.github.ambry.protocol.ReplicaMetadataRequestInfo) BlobProperties(com.github.ambry.messageformat.BlobProperties) GetRequest(com.github.ambry.protocol.GetRequest) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 3 with PutRequest

use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.

the class GetBlobOperationTest method doDirectPut.

/**
 * Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
 * @param blobType the {@link BlobType} for the blob to upload.
 * @param blobContent the raw content for the blob to upload (i.e. this can be serialized composite blob metadata or
 *                    an encrypted blob).
 */
private void doDirectPut(BlobType blobType, ByteBuf blobContent) throws Exception {
    List<PartitionId> writablePartitionIds = mockClusterMap.getWritablePartitionIds(null);
    PartitionId partitionId = writablePartitionIds.get(random.nextInt(writablePartitionIds.size()));
    blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), blobProperties.getAccountId(), blobProperties.getContainerId(), partitionId, blobProperties.isEncrypted(), blobType == BlobType.MetadataBlob ? BlobId.BlobDataType.METADATA : BlobId.BlobDataType.DATACHUNK);
    blobIdStr = blobId.getID();
    Iterator<MockServer> servers = partitionId.getReplicaIds().stream().map(ReplicaId::getDataNodeId).map(dataNodeId -> mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort())).iterator();
    ByteBuffer blobEncryptionKey = null;
    ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadata);
    if (blobProperties.isEncrypted()) {
        FutureResult<EncryptJob.EncryptJobResult> futureResult = new FutureResult<>();
        cryptoJobHandler.submitJob(new EncryptJob(blobProperties.getAccountId(), blobProperties.getContainerId(), blobType == BlobType.MetadataBlob ? null : blobContent.retainedDuplicate(), userMetadataBuf.duplicate(), kms.getRandomKey(), cryptoService, kms, null, new CryptoJobMetricsTracker(routerMetrics.encryptJobMetrics), futureResult::done));
        EncryptJob.EncryptJobResult result = futureResult.get(5, TimeUnit.SECONDS);
        blobEncryptionKey = result.getEncryptedKey();
        if (blobType != BlobType.MetadataBlob) {
            blobContent.release();
            blobContent = result.getEncryptedBlobContent();
        }
        userMetadataBuf = result.getEncryptedUserMetadata();
    }
    while (servers.hasNext()) {
        MockServer server = servers.next();
        PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), blobType, blobEncryptionKey == null ? null : blobEncryptionKey.duplicate());
        // Make sure we release the BoundedNettyByteBufReceive.
        server.send(request).release();
        request.release();
    }
    blobContent.release();
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) GetOption(com.github.ambry.protocol.GetOption) Arrays(java.util.Arrays) BlobProperties(com.github.ambry.messageformat.BlobProperties) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) BlobAll(com.github.ambry.messageformat.BlobAll) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) GetResponse(com.github.ambry.protocol.GetResponse) Future(java.util.concurrent.Future) ByteArrayInputStream(java.io.ByteArrayInputStream) NetworkClientErrorCode(com.github.ambry.network.NetworkClientErrorCode) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) Counter(com.codahale.metrics.Counter) NettyByteBufLeakHelper(com.github.ambry.utils.NettyByteBufLeakHelper) GetRequest(com.github.ambry.protocol.GetRequest) EnumSet(java.util.EnumSet) Parameterized(org.junit.runners.Parameterized) PartitionState(com.github.ambry.clustermap.PartitionState) RestServiceErrorCode(com.github.ambry.rest.RestServiceErrorCode) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PooledByteBufAllocator(io.netty.buffer.PooledByteBufAllocator) Collectors(java.util.stream.Collectors) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) BlobInfo(com.github.ambry.messageformat.BlobInfo) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) RestServiceException(com.github.ambry.rest.RestServiceException) MockTime(com.github.ambry.utils.MockTime) MessageFormatFlags(com.github.ambry.messageformat.MessageFormatFlags) QuotaTestUtils(com.github.ambry.quota.QuotaTestUtils) Callback(com.github.ambry.commons.Callback) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobType(com.github.ambry.messageformat.BlobType) InMemAccountService(com.github.ambry.account.InMemAccountService) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) Histogram(com.codahale.metrics.Histogram) DataInputStream(java.io.DataInputStream) ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) HashSet(java.util.HashSet) PutManagerTest(com.github.ambry.router.PutManagerTest) ByteBuf(io.netty.buffer.ByteBuf) SocketNetworkClient(com.github.ambry.network.SocketNetworkClient) Assume(org.junit.Assume) PutRequest(com.github.ambry.protocol.PutRequest) Before(org.junit.Before) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Properties(java.util.Properties) Iterator(java.util.Iterator) ByteBufferAsyncWritableChannel(com.github.ambry.commons.ByteBufferAsyncWritableChannel) VerifiableProperties(com.github.ambry.config.VerifiableProperties) RouterTestHelpers(com.github.ambry.router.RouterTestHelpers) QuotaChargeCallback(com.github.ambry.quota.QuotaChargeCallback) MetadataContentSerDe(com.github.ambry.messageformat.MetadataContentSerDe) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) RequestInfo(com.github.ambry.network.RequestInfo) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) KMSConfig(com.github.ambry.config.KMSConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) MessageFormatRecord(com.github.ambry.messageformat.MessageFormatRecord) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) PutRequest(com.github.ambry.protocol.PutRequest) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) ByteBuffer(java.nio.ByteBuffer) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobId(com.github.ambry.commons.BlobId)

Example 4 with PutRequest

use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.

the class PutOperationTest method testSendIncomplete.

/**
 * Ensure that if any of the requests associated with the buffer of a PutChunk is not completely read out even
 * after the associated chunk is complete, the buffer is not reused even though the PutChunk is reused.
 */
@Test
public void testSendIncomplete() throws Exception {
    int numChunks = routerConfig.routerMaxInMemPutChunks + 1;
    BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
    byte[] userMetadata = new byte[10];
    byte[] content = new byte[chunkSize * numChunks];
    random.nextBytes(content);
    ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
    FutureResult<String> future = new FutureResult<>();
    MockNetworkClient mockNetworkClient = new MockNetworkClient();
    PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), userMetadata, channel, PutBlobOptions.DEFAULT, future, null, new RouterCallback(mockNetworkClient, new ArrayList<>()), null, null, null, null, time, blobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
    op.startOperation();
    List<RequestInfo> requestInfos = new ArrayList<>();
    requestRegistrationCallback.setRequestsToSend(requestInfos);
    // Since this channel is in memory, one call to fill chunks would end up filling the maximum number of PutChunks.
    op.fillChunks();
    Assert.assertTrue("ReadyForPollCallback should have been invoked as chunks were fully filled", mockNetworkClient.getAndClearWokenUpStatus());
    // A poll should therefore return requestParallelism number of requests from each chunk
    op.poll(requestRegistrationCallback);
    Assert.assertEquals(routerConfig.routerMaxInMemPutChunks * requestParallelism, requestInfos.size());
    // There are routerMaxInMemPutChunks + 1 data chunks for this blob (and a metadata chunk).
    // Once the first chunk is completely sent out, the first PutChunk will be reused. What the test verifies is that
    // the buffer of the first PutChunk does not get reused. It does this as follows:
    // For the first chunk,
    // 1. use first request to succeed the chunk (the successTarget is set to 1).
    // 2. read and store from the second for comparing later.
    // 3. read from the third after the first PutChunk gets reused and ensure that the data from the third is the
    // same as from what was saved off from the second. This means that the buffer was not reused by the first
    // PutChunk.
    // 1.
    ResponseInfo responseInfo = getResponseInfo(requestInfos.get(0));
    PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
    op.handleResponse(responseInfo, putResponse);
    requestInfos.get(0).getRequest().release();
    responseInfo.release();
    // 2.
    PutRequest putRequest = (PutRequest) requestInfos.get(1).getRequest();
    ByteBuffer buf = ByteBuffer.allocate((int) putRequest.sizeInBytes());
    ByteBufferChannel bufChannel = new ByteBufferChannel(buf);
    // read it out (which also marks this request as complete).
    putRequest.writeTo(bufChannel);
    putRequest.release();
    byte[] expectedRequestContent = buf.array();
    // 3.
    // first save the third request
    PutRequest savedRequest = (PutRequest) requestInfos.get(2).getRequest();
    // succeed all the other requests.
    for (int i = 3; i < requestInfos.size(); i++) {
        responseInfo = getResponseInfo(requestInfos.get(i));
        putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
        op.handleResponse(responseInfo, putResponse);
        requestInfos.get(i).getRequest().release();
        responseInfo.release();
    }
    // fill the first PutChunk with the last chunk.
    op.fillChunks();
    // Verify that the last chunk was filled.
    requestInfos.clear();
    op.poll(requestRegistrationCallback);
    Assert.assertEquals(1 * requestParallelism, requestInfos.size());
    // Verify that the buffer of the third request is not affected.
    buf = ByteBuffer.allocate((int) savedRequest.sizeInBytes());
    bufChannel = new ByteBufferChannel(buf);
    savedRequest.writeTo(bufChannel);
    savedRequest.release();
    byte[] savedRequestContent = buf.array();
    // reset the correlation id as they will be different between the two requests.
    resetCorrelationId(expectedRequestContent);
    resetCorrelationId(savedRequestContent);
    Assert.assertArrayEquals("Underlying buffer should not have be reused", expectedRequestContent, savedRequestContent);
    // internal to the chunk (though this can be verified via coverage).
    for (int i = 0; i < requestInfos.size(); i++) {
        responseInfo = getResponseInfo(requestInfos.get(i));
        putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
        op.handleResponse(responseInfo, putResponse);
        requestInfos.get(i).getRequest().release();
        responseInfo.release();
    }
    requestInfos.clear();
    // this should return requests for the metadata chunk
    op.poll(requestRegistrationCallback);
    Assert.assertEquals(1 * requestParallelism, requestInfos.size());
    Assert.assertFalse("Operation should not be complete yet", op.isOperationComplete());
    // once the metadata request succeeds, it should complete the operation.
    responseInfo = getResponseInfo(requestInfos.get(0));
    putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
    op.handleResponse(responseInfo, putResponse);
    responseInfo.release();
    requestInfos.forEach(info -> info.getRequest().release());
    Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) ArrayList(java.util.ArrayList) PutRequest(com.github.ambry.protocol.PutRequest) RequestInfo(com.github.ambry.network.RequestInfo) PutResponse(com.github.ambry.protocol.PutResponse) ByteBuffer(java.nio.ByteBuffer) InMemAccountService(com.github.ambry.account.InMemAccountService) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) BlobProperties(com.github.ambry.messageformat.BlobProperties) Test(org.junit.Test)

Example 5 with PutRequest

use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.

the class StoredBlob method send.

/**
 * Take in a request in the form of {@link Send} and return a response in the form of a
 * {@link BoundedNettyByteBufReceive}.
 * @param send the request.
 * @return the response.
 * @throws IOException if there was an error in interpreting the request.
 */
public BoundedNettyByteBufReceive send(Send send) throws IOException {
    if (!shouldRespond) {
        return null;
    }
    ServerErrorCode serverError = hardError != null ? hardError : serverErrors.size() > 0 ? serverErrors.poll() : ServerErrorCode.No_Error;
    RequestOrResponseType type = ((RequestOrResponse) send).getRequestType();
    RequestOrResponse response;
    requestCounts.computeIfAbsent(type, k -> new LongAdder()).increment();
    switch(type) {
        case PutRequest:
            response = makePutResponse((PutRequest) send, serverError);
            break;
        case GetRequest:
            response = makeGetResponse((GetRequest) send, serverError);
            break;
        case DeleteRequest:
            response = makeDeleteResponse((DeleteRequest) send, serverError);
            break;
        case TtlUpdateRequest:
            response = makeTtlUpdateResponse((TtlUpdateRequest) send, serverError);
            break;
        case UndeleteRequest:
            response = makeUndeleteResponse((UndeleteRequest) send, serverError);
            break;
        default:
            throw new IOException("Unknown request type received");
    }
    ByteBufferChannel channel = new ByteBufferChannel(ByteBuffer.allocate((int) response.sizeInBytes()));
    response.writeTo(channel);
    response.release();
    ByteBuffer payload = channel.getBuffer();
    payload.flip();
    BoundedNettyByteBufReceive receive = new BoundedNettyByteBufReceive(100 * 1024 * 1024);
    receive.readFrom(Channels.newChannel(new ByteBufferInputStream(payload)));
    return receive;
}
Also used : PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetOption(com.github.ambry.protocol.GetOption) DataInputStream(java.io.DataInputStream) LongAdder(java.util.concurrent.atomic.LongAdder) ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) BlobProperties(com.github.ambry.messageformat.BlobProperties) ServerErrorCode(com.github.ambry.server.ServerErrorCode) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) GetResponse(com.github.ambry.protocol.GetResponse) ByteBufferSend(com.github.ambry.network.ByteBufferSend) DeleteResponse(com.github.ambry.protocol.DeleteResponse) Map(java.util.Map) RequestOrResponseType(com.github.ambry.protocol.RequestOrResponseType) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) SystemTime(com.github.ambry.utils.SystemTime) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) TtlUpdateResponse(com.github.ambry.protocol.TtlUpdateResponse) LinkedList(java.util.LinkedList) PutRequest(com.github.ambry.protocol.PutRequest) GetRequest(com.github.ambry.protocol.GetRequest) Container(com.github.ambry.account.Container) MessageMetadata(com.github.ambry.messageformat.MessageMetadata) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) DeleteRequest(com.github.ambry.protocol.DeleteRequest) BoundedNettyByteBufReceive(com.github.ambry.network.BoundedNettyByteBufReceive) Channels(java.nio.channels.Channels) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) PutResponse(com.github.ambry.protocol.PutResponse) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) TtlUpdateRequest(com.github.ambry.protocol.TtlUpdateRequest) MessageInfo(com.github.ambry.store.MessageInfo) Crc32(com.github.ambry.utils.Crc32) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) Account(com.github.ambry.account.Account) Send(com.github.ambry.network.Send) MessageFormatRecord(com.github.ambry.messageformat.MessageFormatRecord) BlobType(com.github.ambry.messageformat.BlobType) RequestOrResponseType(com.github.ambry.protocol.RequestOrResponseType) TtlUpdateRequest(com.github.ambry.protocol.TtlUpdateRequest) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) PutRequest(com.github.ambry.protocol.PutRequest) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) LongAdder(java.util.concurrent.atomic.LongAdder) BoundedNettyByteBufReceive(com.github.ambry.network.BoundedNettyByteBufReceive) ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) GetRequest(com.github.ambry.protocol.GetRequest) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Aggregations

PutRequest (com.github.ambry.protocol.PutRequest)27 BlobId (com.github.ambry.commons.BlobId)19 BlobProperties (com.github.ambry.messageformat.BlobProperties)19 DataInputStream (java.io.DataInputStream)18 ArrayList (java.util.ArrayList)18 PutResponse (com.github.ambry.protocol.PutResponse)15 ByteBuffer (java.nio.ByteBuffer)14 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)12 VerifiableProperties (com.github.ambry.config.VerifiableProperties)12 GetResponse (com.github.ambry.protocol.GetResponse)12 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)12 IOException (java.io.IOException)12 Properties (java.util.Properties)12 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)11 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)11 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)11 InMemAccountService (com.github.ambry.account.InMemAccountService)10 ByteBufferReadableStreamChannel (com.github.ambry.commons.ByteBufferReadableStreamChannel)9 BlobType (com.github.ambry.messageformat.BlobType)9 HashMap (java.util.HashMap)9