Search in sources :

Example 1 with PutResponse

use of com.github.ambry.protocol.PutResponse in project ambry by linkedin.

the class PutManager method extractPutResponseAndNotifyResponseHandler.

/**
 * Extract the {@link PutResponse} from the given {@link ResponseInfo}
 * @param responseInfo the {@link ResponseInfo} from which the {@link PutResponse} is to be extracted.
 * @return the extracted {@link PutResponse} if there is one; null otherwise.
 */
private PutResponse extractPutResponseAndNotifyResponseHandler(ResponseInfo responseInfo) {
    PutResponse putResponse = null;
    ReplicaId replicaId = ((RouterRequestInfo) responseInfo.getRequestInfo()).getReplicaId();
    NetworkClientErrorCode networkClientErrorCode = responseInfo.getError();
    if (networkClientErrorCode == null) {
        try {
            putResponse = PutResponse.readFrom(new DataInputStream(new ByteBufferInputStream(responseInfo.getResponse())));
            responseHandler.onEvent(replicaId, putResponse.getError());
        } catch (Exception e) {
            // Ignore. There is no value in notifying the response handler.
            logger.error("Response deserialization received unexpected error", e);
            routerMetrics.responseDeserializationErrorCount.inc();
        }
    } else {
        responseHandler.onEvent(replicaId, networkClientErrorCode);
    }
    return putResponse;
}
Also used : NetworkClientErrorCode(com.github.ambry.network.NetworkClientErrorCode) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) PutResponse(com.github.ambry.protocol.PutResponse) DataInputStream(java.io.DataInputStream) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Example 2 with PutResponse

use of com.github.ambry.protocol.PutResponse in project ambry by linkedin.

the class PutManager method handleResponse.

/**
 * Hands over the response to the associated PutOperation that issued the request.
 * @param responseInfo the {@link ResponseInfo} containing the response.
 */
void handleResponse(ResponseInfo responseInfo) {
    long startTime = time.milliseconds();
    PutResponse putResponse = extractPutResponseAndNotifyResponseHandler(responseInfo);
    RouterRequestInfo routerRequestInfo = (RouterRequestInfo) responseInfo.getRequestInfo();
    int correlationId = ((PutRequest) routerRequestInfo.getRequest()).getCorrelationId();
    // Get the PutOperation that generated the request.
    PutOperation putOperation = correlationIdToPutOperation.remove(correlationId);
    // If it is still an active operation, hand over the response. Otherwise, ignore.
    if (putOperations.contains(putOperation)) {
        try {
            putOperation.handleResponse(responseInfo, putResponse);
        } catch (Exception e) {
            putOperation.setOperationExceptionAndComplete(new RouterException("Put handleResponse encountered unexpected error", e, RouterErrorCode.UnexpectedInternalError));
        }
        if (putOperation.isOperationComplete() && putOperations.remove(putOperation)) {
            onComplete(putOperation);
        }
        routerMetrics.putManagerHandleResponseTimeMs.update(time.milliseconds() - startTime);
    } else {
        routerMetrics.ignoredResponseCount.inc();
    }
}
Also used : PutRequest(com.github.ambry.protocol.PutRequest) PutResponse(com.github.ambry.protocol.PutResponse)

Example 3 with PutResponse

use of com.github.ambry.protocol.PutResponse in project ambry by linkedin.

the class PutOperationTest method testSendIncomplete.

/**
 * Ensure that if any of the requests associated with the buffer of a PutChunk is not completely read out even
 * after the associated chunk is complete, the buffer is not reused even though the PutChunk is reused.
 */
@Test
public void testSendIncomplete() throws Exception {
    int numChunks = NonBlockingRouter.MAX_IN_MEM_CHUNKS + 1;
    BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false);
    byte[] userMetadata = new byte[10];
    byte[] content = new byte[chunkSize * numChunks];
    random.nextBytes(content);
    ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
    FutureResult<String> future = new FutureResult<>();
    MockNetworkClient mockNetworkClient = new MockNetworkClient();
    PutOperation op = new PutOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, new LoggingNotificationSystem(), userMetadata, channel, future, null, new RouterCallback(mockNetworkClient, new ArrayList<BackgroundDeleteRequest>()), null, null, null, null, time, blobProperties);
    op.startReadingFromChannel();
    List<RequestInfo> requestInfos = new ArrayList<>();
    requestRegistrationCallback.requestListToFill = requestInfos;
    // Since this channel is in memory, one call to fill chunks would end up filling the maximum number of PutChunks.
    op.fillChunks();
    Assert.assertTrue("ReadyForPollCallback should have been invoked as chunks were fully filled", mockNetworkClient.getAndClearWokenUpStatus());
    // A poll should therefore return requestParallelism number of requests from each chunk
    op.poll(requestRegistrationCallback);
    Assert.assertEquals(NonBlockingRouter.MAX_IN_MEM_CHUNKS * requestParallelism, requestInfos.size());
    // There are MAX_IN_MEM_CHUNKS + 1 data chunks for this blob (and a metadata chunk).
    // Once the first chunk is completely sent out, the first PutChunk will be reused. What the test verifies is that
    // the buffer of the first PutChunk does not get reused. It does this as follows:
    // For the first chunk,
    // 1. use first request to succeed the chunk (the successTarget is set to 1).
    // 2. read and store from the second for comparing later.
    // 3. read from the third after the first PutChunk gets reused and ensure that the data from the third is the
    // same as from what was saved off from the second. This means that the buffer was not reused by the first
    // PutChunk.
    // 1.
    ResponseInfo responseInfo = getResponseInfo(requestInfos.get(0));
    PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new DataInputStream(new ByteBufferInputStream(responseInfo.getResponse()))) : null;
    op.handleResponse(responseInfo, putResponse);
    // 2.
    PutRequest putRequest = (PutRequest) requestInfos.get(1).getRequest();
    ByteBuffer buf = ByteBuffer.allocate((int) putRequest.sizeInBytes());
    ByteBufferChannel bufChannel = new ByteBufferChannel(buf);
    // read it out (which also marks this request as complete).
    putRequest.writeTo(bufChannel);
    byte[] expectedRequestContent = buf.array();
    // 3.
    // first save the third request
    PutRequest savedRequest = (PutRequest) requestInfos.get(2).getRequest();
    // succeed all the other requests.
    for (int i = 3; i < requestInfos.size(); i++) {
        responseInfo = getResponseInfo(requestInfos.get(i));
        putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new DataInputStream(new ByteBufferInputStream(responseInfo.getResponse()))) : null;
        op.handleResponse(responseInfo, putResponse);
    }
    // fill the first PutChunk with the last chunk.
    op.fillChunks();
    // Verify that the last chunk was filled.
    requestInfos.clear();
    op.poll(requestRegistrationCallback);
    Assert.assertEquals(1 * requestParallelism, requestInfos.size());
    // Verify that the buffer of the third request is not affected.
    buf = ByteBuffer.allocate((int) savedRequest.sizeInBytes());
    bufChannel = new ByteBufferChannel(buf);
    savedRequest.writeTo(bufChannel);
    byte[] savedRequestContent = buf.array();
    // reset the correlation id as they will be different between the two requests.
    resetCorrelationId(expectedRequestContent);
    resetCorrelationId(savedRequestContent);
    Assert.assertArrayEquals("Underlying buffer should not have be reused", expectedRequestContent, savedRequestContent);
    // internal to the chunk (though this can be verified via coverage).
    for (int i = 0; i < requestInfos.size(); i++) {
        responseInfo = getResponseInfo(requestInfos.get(i));
        putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new DataInputStream(new ByteBufferInputStream(responseInfo.getResponse()))) : null;
        op.handleResponse(responseInfo, putResponse);
    }
    requestInfos.clear();
    // this should return requests for the metadata chunk
    op.poll(requestRegistrationCallback);
    Assert.assertEquals(1 * requestParallelism, requestInfos.size());
    Assert.assertFalse("Operation should not be complete yet", op.isOperationComplete());
    // once the metadata request succeeds, it should complete the operation.
    responseInfo = getResponseInfo(requestInfos.get(0));
    putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new DataInputStream(new ByteBufferInputStream(responseInfo.getResponse()))) : null;
    op.handleResponse(responseInfo, putResponse);
    Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) ArrayList(java.util.ArrayList) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) PutRequest(com.github.ambry.protocol.PutRequest) RequestInfo(com.github.ambry.network.RequestInfo) PutResponse(com.github.ambry.protocol.PutResponse) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) BlobProperties(com.github.ambry.messageformat.BlobProperties) Test(org.junit.Test)

Example 4 with PutResponse

use of com.github.ambry.protocol.PutResponse in project ambry by linkedin.

the class ServerTestUtil method endToEndTest.

protected static void endToEndTest(Port targetPort, String routerDatacenter, String sslEnabledDatacenters, MockCluster cluster, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, Properties routerProps, boolean testEncryption) throws InterruptedException, IOException, InstantiationException {
    try {
        MockClusterMap clusterMap = cluster.getClusterMap();
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        byte[] usermetadata = new byte[1000];
        byte[] data = new byte[31870];
        byte[] encryptionKey = new byte[100];
        short accountId = Utils.getRandomShort(TestUtils.RANDOM);
        short containerId = Utils.getRandomShort(TestUtils.RANDOM);
        BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, false);
        TestUtils.RANDOM.nextBytes(usermetadata);
        TestUtils.RANDOM.nextBytes(data);
        if (testEncryption) {
            TestUtils.RANDOM.nextBytes(encryptionKey);
        }
        List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds();
        short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
        BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false);
        BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false);
        BlobId blobId3 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false);
        BlobId blobId4 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false);
        // put blob 1
        PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        BlockingChannel channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
        channel.connect();
        channel.send(putRequest);
        InputStream putResponseStream = channel.receive().getInputStream();
        PutResponse response = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 2
        PutRequest putRequest2 = new PutRequest(1, "client1", blobId2, properties, ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        channel.send(putRequest2);
        putResponseStream = channel.receive().getInputStream();
        PutResponse response2 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 3
        PutRequest putRequest3 = new PutRequest(1, "client1", blobId3, properties, ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        channel.send(putRequest3);
        putResponseStream = channel.receive().getInputStream();
        PutResponse response3 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 4 that is expired
        BlobProperties propertiesExpired = new BlobProperties(31870, "serviceid1", "ownerid", "jpeg", false, 0, accountId, containerId, false);
        PutRequest putRequest4 = new PutRequest(1, "client1", blobId4, propertiesExpired, ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        channel.send(putRequest4);
        putResponseStream = channel.receive().getInputStream();
        PutResponse response4 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response4.getError());
        // get blob properties
        ArrayList<BlobId> ids = new ArrayList<BlobId>();
        MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        ids.add(blobId1);
        ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
        PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        channel.send(getRequest1);
        InputStream stream = channel.receive().getInputStream();
        GetResponse resp1 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        try {
            BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
            assertEquals(31870, propertyOutput.getBlobSize());
            assertEquals("serviceid1", propertyOutput.getServiceId());
            assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
            assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // get blob properties with expired flag set
        ids = new ArrayList<BlobId>();
        partition = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        ids.add(blobId1);
        partitionRequestInfoList = new ArrayList<>();
        partitionRequestInfo = new PartitionRequestInfo(partition, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_Expired_Blobs);
        channel.send(getRequest1);
        stream = channel.receive().getInputStream();
        resp1 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        try {
            BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
            assertEquals(31870, propertyOutput.getBlobSize());
            assertEquals("serviceid1", propertyOutput.getServiceId());
            assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
            assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // get blob properties for expired blob
        // 1. With no flag
        ArrayList<BlobId> idsExpired = new ArrayList<>();
        MockPartitionId partitionExpired = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        idsExpired.add(blobId4);
        ArrayList<PartitionRequestInfo> partitionRequestInfoListExpired = new ArrayList<>();
        PartitionRequestInfo partitionRequestInfoExpired = new PartitionRequestInfo(partitionExpired, idsExpired);
        partitionRequestInfoListExpired.add(partitionRequestInfoExpired);
        GetRequest getRequestExpired = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoListExpired, GetOption.None);
        channel.send(getRequestExpired);
        InputStream streamExpired = channel.receive().getInputStream();
        GetResponse respExpired = GetResponse.readFrom(new DataInputStream(streamExpired), clusterMap);
        assertEquals(ServerErrorCode.Blob_Expired, respExpired.getPartitionResponseInfoList().get(0).getErrorCode());
        // 2. With Include_Expired flag
        idsExpired = new ArrayList<>();
        partitionExpired = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        idsExpired.add(blobId4);
        partitionRequestInfoListExpired = new ArrayList<>();
        partitionRequestInfoExpired = new PartitionRequestInfo(partitionExpired, idsExpired);
        partitionRequestInfoListExpired.add(partitionRequestInfoExpired);
        getRequestExpired = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoListExpired, GetOption.Include_Expired_Blobs);
        channel.send(getRequestExpired);
        streamExpired = channel.receive().getInputStream();
        respExpired = GetResponse.readFrom(new DataInputStream(streamExpired), clusterMap);
        try {
            BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(respExpired.getInputStream());
            assertEquals(31870, propertyOutput.getBlobSize());
            assertEquals("serviceid1", propertyOutput.getServiceId());
            assertEquals("ownerid", propertyOutput.getOwnerId());
            assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
            assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // get user metadata
        GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
        channel.send(getRequest2);
        stream = channel.receive().getInputStream();
        GetResponse resp2 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        try {
            ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
            Assert.assertArrayEquals(usermetadata, userMetadataOutput.array());
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            assertEquals(false, true);
        }
        // get blob info
        GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobInfo, partitionRequestInfoList, GetOption.None);
        channel.send(getRequest3);
        stream = channel.receive().getInputStream();
        GetResponse resp3 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        InputStream responseStream = resp3.getInputStream();
        // verify blob properties.
        BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(responseStream);
        assertEquals(31870, propertyOutput.getBlobSize());
        assertEquals("serviceid1", propertyOutput.getServiceId());
        assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
        assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
        // verify user metadata
        ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(responseStream);
        Assert.assertArrayEquals(usermetadata, userMetadataOutput.array());
        if (testEncryption) {
            assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
        } else {
            assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
        }
        // get blob all
        GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        channel.send(getRequest4);
        stream = channel.receive().getInputStream();
        GetResponse resp4 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        responseStream = resp4.getInputStream();
        BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
        byte[] actualBlobData = new byte[(int) blobAll.getBlobData().getSize()];
        blobAll.getBlobData().getStream().getByteBuffer().get(actualBlobData);
        // verify content
        Assert.assertArrayEquals("Content mismatch", data, actualBlobData);
        if (testEncryption) {
            Assert.assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
            Assert.assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
        } else {
            Assert.assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
        }
        // encryptionKey in this test doesn't have any relation to the content. Both are random bytes for test purposes.
        if (!testEncryption) {
            // Use router to get the blob
            Properties routerProperties = getRouterProps(routerDatacenter);
            routerProperties.putAll(routerProps);
            VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
            Router router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(9), getSSLFactoryIfRequired(routerVerifiableProps)).getRouter();
            checkBlobId(router, blobId1, data);
            router.close();
        }
        // fetch blob that does not exist
        // get blob properties
        ids = new ArrayList<BlobId>();
        partition = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partition, false));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(partition, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        channel.send(getRequest5);
        stream = channel.receive().getInputStream();
        GetResponse resp5 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
        channel.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail();
    }
}
Also used : ArrayList(java.util.ArrayList) PutResponse(com.github.ambry.protocol.PutResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) SSLBlockingChannel(com.github.ambry.network.SSLBlockingChannel) BlockingChannel(com.github.ambry.network.BlockingChannel) BlobAll(com.github.ambry.messageformat.BlobAll) GetRequest(com.github.ambry.protocol.GetRequest) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) VerifiableProperties(com.github.ambry.config.VerifiableProperties) DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) PutRequest(com.github.ambry.protocol.PutRequest) Router(com.github.ambry.router.Router) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) GeneralSecurityException(java.security.GeneralSecurityException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 5 with PutResponse

use of com.github.ambry.protocol.PutResponse in project ambry by linkedin.

the class ServerTestUtil method endToEndReplicationWithMultiNodeSinglePartitionTest.

protected static void endToEndReplicationWithMultiNodeSinglePartitionTest(String routerDatacenter, String sslEnabledDatacenters, int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLSocketFactory clientSSLSocketFactory1, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) throws InterruptedException, IOException, InstantiationException {
    // interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainText port
    try {
        int expectedTokenSize = 0;
        MockClusterMap clusterMap = cluster.getClusterMap();
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        ArrayList<BlobProperties> propertyList = new ArrayList<>();
        ArrayList<BlobId> blobIdList = new ArrayList<>();
        ArrayList<byte[]> dataList = new ArrayList<>();
        ArrayList<byte[]> encryptionKeyList = new ArrayList<>();
        byte[] usermetadata = new byte[1000];
        TestUtils.RANDOM.nextBytes(usermetadata);
        PartitionId partition = clusterMap.getWritablePartitionIds().get(0);
        for (int i = 0; i < 11; i++) {
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            propertyList.add(new BlobProperties(1000, "serviceid1", accountId, containerId, testEncryption));
            blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partition, false));
            dataList.add(TestUtils.getRandomBytes(1000));
            if (testEncryption) {
                encryptionKeyList.add(TestUtils.getRandomBytes(128));
            } else {
                encryptionKeyList.add(null);
            }
        }
        // put blob 1
        PutRequest putRequest = new PutRequest(1, "client1", blobIdList.get(0), propertyList.get(0), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(0)), propertyList.get(0).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(0), blobIdList.get(0), encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null, ByteBuffer.wrap(usermetadata), dataList.get(0));
        BlockingChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        BlockingChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        BlockingChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        channel1.connect();
        channel2.connect();
        channel3.connect();
        channel1.send(putRequest);
        InputStream putResponseStream = channel1.receive().getInputStream();
        PutResponse response = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 2
        PutRequest putRequest2 = new PutRequest(1, "client1", blobIdList.get(1), propertyList.get(1), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(1)), propertyList.get(1).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(1), blobIdList.get(1), encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null, ByteBuffer.wrap(usermetadata), dataList.get(1));
        channel2.send(putRequest2);
        putResponseStream = channel2.receive().getInputStream();
        PutResponse response2 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 3
        PutRequest putRequest3 = new PutRequest(1, "client1", blobIdList.get(2), propertyList.get(2), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(2)), propertyList.get(2).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(2), blobIdList.get(2), encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null, ByteBuffer.wrap(usermetadata), dataList.get(2));
        channel3.send(putRequest3);
        putResponseStream = channel3.receive().getInputStream();
        PutResponse response3 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 4
        putRequest = new PutRequest(1, "client1", blobIdList.get(3), propertyList.get(3), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(3)), propertyList.get(3).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(3), blobIdList.get(3), encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null, ByteBuffer.wrap(usermetadata), dataList.get(3));
        channel1.send(putRequest);
        putResponseStream = channel1.receive().getInputStream();
        response = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 5
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(4), propertyList.get(4), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(4)), propertyList.get(4).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(4), blobIdList.get(4), encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null, ByteBuffer.wrap(usermetadata), dataList.get(4));
        channel2.send(putRequest2);
        putResponseStream = channel2.receive().getInputStream();
        response2 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 6
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(5), propertyList.get(5), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(5)), propertyList.get(5).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(5), blobIdList.get(5), encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null, ByteBuffer.wrap(usermetadata), dataList.get(5));
        channel3.send(putRequest3);
        putResponseStream = channel3.receive().getInputStream();
        response3 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // wait till replication can complete
        notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
        // get blob properties
        ArrayList<BlobId> ids = new ArrayList<BlobId>();
        MockPartitionId mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        ids.add(blobIdList.get(2));
        ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
        PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        channel2.send(getRequest1);
        InputStream stream = channel2.receive().getInputStream();
        GetResponse resp1 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp1.getError());
        assertEquals(ServerErrorCode.No_Error, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
        try {
            BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
            assertEquals(1000, propertyOutput.getBlobSize());
            assertEquals("serviceid1", propertyOutput.getServiceId());
            assertEquals("AccountId mismatch", propertyList.get(2).getAccountId(), propertyOutput.getAccountId());
            assertEquals("ContainerId mismatch", propertyList.get(2).getContainerId(), propertyOutput.getContainerId());
            assertEquals("IsEncrypted mismatch", propertyList.get(2).isEncrypted(), propertyOutput.isEncrypted());
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // get user metadata
        ids.clear();
        ids.add(blobIdList.get(1));
        GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
        channel1.send(getRequest2);
        stream = channel1.receive().getInputStream();
        GetResponse resp2 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp2.getError());
        assertEquals(ServerErrorCode.No_Error, resp2.getPartitionResponseInfoList().get(0).getErrorCode());
        try {
            ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
            Assert.assertArrayEquals(usermetadata, userMetadataOutput.array());
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(1), resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // get blob
        ids.clear();
        ids.add(blobIdList.get(0));
        GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        channel3.send(getRequest3);
        stream = channel3.receive().getInputStream();
        GetResponse resp3 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        try {
            BlobData blobData = MessageFormatRecord.deserializeBlob(resp3.getInputStream());
            byte[] blobout = new byte[(int) blobData.getSize()];
            int readsize = 0;
            while (readsize < blobData.getSize()) {
                readsize += blobData.getStream().read(blobout, readsize, (int) blobData.getSize() - readsize);
            }
            Assert.assertArrayEquals(dataList.get(0), blobout);
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // get blob all
        ids.clear();
        ids.add(blobIdList.get(0));
        GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        channel1.send(getRequest4);
        stream = channel1.receive().getInputStream();
        GetResponse resp4 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        try {
            BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp4.getInputStream(), blobIdFactory);
            byte[] blobout = new byte[(int) blobAll.getBlobData().getSize()];
            int readsize = 0;
            while (readsize < blobAll.getBlobData().getSize()) {
                readsize += blobAll.getBlobData().getStream().read(blobout, readsize, (int) blobAll.getBlobData().getSize() - readsize);
            }
            Assert.assertArrayEquals(dataList.get(0), blobout);
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", blobAll.getBlobEncryptionKey());
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), blobAll.getBlobEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", blobAll.getBlobEncryptionKey());
            }
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        if (!testEncryption) {
            // get blob data
            // Use router to get the blob
            Properties routerProperties = getRouterProps(routerDatacenter);
            routerProperties.putAll(routerProps);
            VerifiableProperties routerVerifiableProperties = new VerifiableProperties(routerProperties);
            Router router = new NonBlockingRouterFactory(routerVerifiableProperties, clusterMap, notificationSystem, getSSLFactoryIfRequired(routerVerifiableProperties)).getRouter();
            checkBlobId(router, blobIdList.get(0), dataList.get(0));
            checkBlobId(router, blobIdList.get(1), dataList.get(1));
            checkBlobId(router, blobIdList.get(2), dataList.get(2));
            checkBlobId(router, blobIdList.get(3), dataList.get(3));
            checkBlobId(router, blobIdList.get(4), dataList.get(4));
            checkBlobId(router, blobIdList.get(5), dataList.get(5));
            router.close();
        }
        // fetch blob that does not exist
        // get blob properties
        ids = new ArrayList<BlobId>();
        mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds().get(0);
        ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), propertyList.get(0).getAccountId(), propertyList.get(0).getContainerId(), mockPartitionId, false));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        channel3.send(getRequest5);
        stream = channel3.receive().getInputStream();
        GetResponse resp5 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp5.getError());
        assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
        // delete a blob and ensure it is propagated
        DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIdList.get(0), System.currentTimeMillis());
        expectedTokenSize += getDeleteRecordSize(blobIdList.get(0));
        channel1.send(deleteRequest);
        InputStream deleteResponseStream = channel1.receive().getInputStream();
        DeleteResponse deleteResponse = DeleteResponse.readFrom(new DataInputStream(deleteResponseStream));
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
        ids = new ArrayList<BlobId>();
        ids.add(blobIdList.get(0));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(partition, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest6 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        channel3.send(getRequest6);
        stream = channel3.receive().getInputStream();
        GetResponse resp6 = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp6.getError());
        assertEquals(ServerErrorCode.Blob_Deleted, resp6.getPartitionResponseInfoList().get(0).getErrorCode());
        // get the data node to inspect replication tokens on
        DataNodeId dataNodeId = clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
        // read the replica file and check correctness
        // The token offset value of 13098 was derived as followed:
        // - Up to this point we have done 6 puts and 1 delete
        // - Each put takes up 2183 bytes in the log (1000 data, 1000 user metadata, 183 ambry metadata)
        // - Each delete takes up 97 bytes in the log
        // - The offset stored in the token will be the position of the last entry in the log (the delete, in this case)
        // - Thus, it will be at the end of the 6 puts: 6 * 2183 = 13098
        checkReplicaTokens(clusterMap, dataNodeId, expectedTokenSize - getDeleteRecordSize(blobIdList.get(0)), "0");
        // Shut down server 1
        cluster.getServers().get(0).shutdown();
        cluster.getServers().get(0).awaitShutdown();
        // Add more data to server 2 and server 3. Recover server 1 and ensure it is completely replicated
        // put blob 7
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(6), propertyList.get(6), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(6)), propertyList.get(6).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(6), blobIdList.get(6), encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null, ByteBuffer.wrap(usermetadata), dataList.get(6));
        channel2.send(putRequest2);
        putResponseStream = channel2.receive().getInputStream();
        response2 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 8
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(7), propertyList.get(7), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(7)), propertyList.get(7).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(7), blobIdList.get(7), encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null, ByteBuffer.wrap(usermetadata), dataList.get(7));
        channel3.send(putRequest3);
        putResponseStream = channel3.receive().getInputStream();
        response3 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 9
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(8), propertyList.get(8), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(8)), propertyList.get(8).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(8), blobIdList.get(8), encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null, ByteBuffer.wrap(usermetadata), dataList.get(8));
        channel2.send(putRequest2);
        putResponseStream = channel2.receive().getInputStream();
        response2 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 10
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(9), propertyList.get(9), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(9)), propertyList.get(9).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(9), blobIdList.get(9), encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null, ByteBuffer.wrap(usermetadata), dataList.get(9));
        channel3.send(putRequest3);
        putResponseStream = channel3.receive().getInputStream();
        response3 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 11
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(10), propertyList.get(10), ByteBuffer.wrap(usermetadata), ByteBuffer.wrap(dataList.get(10)), propertyList.get(10).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(10), blobIdList.get(10), encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null, ByteBuffer.wrap(usermetadata), dataList.get(10));
        channel2.send(putRequest2);
        putResponseStream = channel2.receive().getInputStream();
        response2 = PutResponse.readFrom(new DataInputStream(putResponseStream));
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        cluster.getServers().get(0).startup();
        // wait for server to recover
        notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
        channel1.disconnect();
        channel1.connect();
        // get blob
        try {
            checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
            checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
            checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
            checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
            checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
            checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
            checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
            checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
            checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
            checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        // Shutdown server 1. Remove all its data from all mount path. Recover server 1 and ensure node is built
        cluster.getServers().get(0).shutdown();
        cluster.getServers().get(0).awaitShutdown();
        File mountFile = new File(clusterMap.getReplicaIds(dataNodeId).get(0).getMountPath());
        for (File toDelete : mountFile.listFiles()) {
            deleteFolderContent(toDelete, true);
        }
        notificationSystem.decrementCreatedReplica(blobIdList.get(1).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(2).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(3).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(4).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(6).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(7).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(8).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(9).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        cluster.getServers().get(0).startup();
        notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
        channel1.disconnect();
        channel1.connect();
        // get blob
        try {
            checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
            checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
            checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
            checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
            checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
            checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
            checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
            checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
            checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
            checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
        } catch (MessageFormatException e) {
            Assert.fail();
        }
        channel1.disconnect();
        channel2.disconnect();
        channel3.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail();
    }
}
Also used : ArrayList(java.util.ArrayList) PutResponse(com.github.ambry.protocol.PutResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) SSLBlockingChannel(com.github.ambry.network.SSLBlockingChannel) BlockingChannel(com.github.ambry.network.BlockingChannel) BlobAll(com.github.ambry.messageformat.BlobAll) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) VerifiableProperties(com.github.ambry.config.VerifiableProperties) DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) PutRequest(com.github.ambry.protocol.PutRequest) Router(com.github.ambry.router.Router) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) GeneralSecurityException(java.security.GeneralSecurityException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) DeleteResponse(com.github.ambry.protocol.DeleteResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Aggregations

PutResponse (com.github.ambry.protocol.PutResponse)11 PutRequest (com.github.ambry.protocol.PutRequest)9 DataInputStream (java.io.DataInputStream)9 InputStream (java.io.InputStream)6 FileInputStream (java.io.FileInputStream)5 ArrayList (java.util.ArrayList)5 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)4 CrcInputStream (com.github.ambry.utils.CrcInputStream)4 IOException (java.io.IOException)4 BlobId (com.github.ambry.commons.BlobId)3 BlobProperties (com.github.ambry.messageformat.BlobProperties)3 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)3 ByteBuffer (java.nio.ByteBuffer)3 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)2 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)2 PartitionId (com.github.ambry.clustermap.PartitionId)2 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)2 VerifiableProperties (com.github.ambry.config.VerifiableProperties)2 BlobAll (com.github.ambry.messageformat.BlobAll)2 BlockingChannel (com.github.ambry.network.BlockingChannel)2