Search in sources :

Example 1 with GetRequest

use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.

the class ReplicaThread method getMessagesForMissingKeys.

/**
 * Gets the messages for the keys that are missing from the local store by issuing a {@link GetRequest} to the remote
 * node, if there are any missing keys. If there are no missing keys to be fetched, then no request is issued and a
 * null response is returned.
 * @param connectedChannel The connection channel to the remote node
 * @param exchangeMetadataResponseList The list of metadata response from the remote node
 * @param replicasToReplicatePerNode The list of remote replicas for the remote node
 * @param remoteNode The remote node from which replication needs to happen
 * @return The response that contains the missing messages; or null if no request was issued because there were no
 * keys missing.
 * @throws ReplicationException
 * @throws IOException
 */
private GetResponse getMessagesForMissingKeys(ConnectedChannel connectedChannel, List<ExchangeMetadataResponse> exchangeMetadataResponseList, List<RemoteReplicaInfo> replicasToReplicatePerNode, DataNodeId remoteNode) throws ReplicationException, IOException {
    List<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    for (int i = 0; i < exchangeMetadataResponseList.size(); i++) {
        ExchangeMetadataResponse exchangeMetadataResponse = exchangeMetadataResponseList.get(i);
        RemoteReplicaInfo remoteReplicaInfo = replicasToReplicatePerNode.get(i);
        if (exchangeMetadataResponse.serverErrorCode == ServerErrorCode.No_Error) {
            Set<StoreKey> missingStoreKeys = exchangeMetadataResponse.missingStoreKeys;
            if (missingStoreKeys.size() > 0) {
                ArrayList<BlobId> keysToFetch = new ArrayList<BlobId>();
                for (StoreKey storeKey : missingStoreKeys) {
                    keysToFetch.add((BlobId) storeKey);
                }
                PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(remoteReplicaInfo.getReplicaId().getPartitionId(), keysToFetch);
                partitionRequestInfoList.add(partitionRequestInfo);
            }
        }
    }
    GetResponse getResponse = null;
    if (!partitionRequestInfoList.isEmpty()) {
        GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), "replication-fetch-" + dataNodeId.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        long startTime = SystemTime.getInstance().milliseconds();
        try {
            connectedChannel.send(getRequest);
            ChannelOutput channelOutput = connectedChannel.receive();
            getResponse = GetResponse.readFrom(new DataInputStream(channelOutput.getInputStream()), clusterMap);
            long getRequestTime = SystemTime.getInstance().milliseconds() - startTime;
            replicationMetrics.updateGetRequestTime(getRequestTime, replicatingFromRemoteColo, replicatingOverSsl, datacenterName);
            if (getResponse.getError() != ServerErrorCode.No_Error) {
                logger.error("Remote node: " + remoteNode + " Thread name: " + threadName + " Remote replicas: " + replicasToReplicatePerNode + " GetResponse from replication: " + getResponse.getError());
                throw new ReplicationException(" Get Request returned error when trying to get missing keys " + getResponse.getError());
            }
        } catch (IOException e) {
            responseHandler.onEvent(replicasToReplicatePerNode.get(0).getReplicaId(), e);
            throw e;
        }
    }
    return getResponse;
}
Also used : ChannelOutput(com.github.ambry.network.ChannelOutput) ArrayList(java.util.ArrayList) IOException(java.io.IOException) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) StoreKey(com.github.ambry.store.StoreKey) GetResponse(com.github.ambry.protocol.GetResponse) GetRequest(com.github.ambry.protocol.GetRequest) BlobId(com.github.ambry.commons.BlobId)

Example 2 with GetRequest

use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.

the class ServerTestUtil method checkBlobContent.

private static void checkBlobContent(MockClusterMap clusterMap, BlobId blobId, BlockingChannel channel, byte[] dataToCheck, byte[] encryptionKey) throws IOException, MessageFormatException {
    ArrayList<BlobId> listIds = new ArrayList<BlobId>();
    listIds.add(blobId);
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    partitionRequestInfoList.clear();
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), listIds);
    partitionRequestInfoList.add(partitionRequestInfo);
    GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
    channel.send(getRequest3);
    InputStream stream = channel.receive().getInputStream();
    GetResponse resp = GetResponse.readFrom(new DataInputStream(stream), clusterMap);
    assertEquals(ServerErrorCode.No_Error, resp.getError());
    BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
    byte[] blobout = new byte[(int) blobData.getSize()];
    int readsize = 0;
    while (readsize < blobData.getSize()) {
        readsize += blobData.getStream().read(blobout, readsize, (int) blobData.getSize() - readsize);
    }
    Assert.assertArrayEquals(dataToCheck, blobout);
    if (encryptionKey != null) {
        Assert.assertNotNull("EncryptionKey should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
        Assert.assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
    } else {
        Assert.assertNull("EncryptionKey should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
    }
}
Also used : DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) GetRequest(com.github.ambry.protocol.GetRequest) ArrayList(java.util.ArrayList) BlobData(com.github.ambry.messageformat.BlobData) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) BlobId(com.github.ambry.commons.BlobId) GetResponse(com.github.ambry.protocol.GetResponse)

Example 3 with GetRequest

use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.

the class AmbryRequests method handleGetRequest.

public void handleGetRequest(Request request) throws IOException, InterruptedException {
    GetRequest getRequest = GetRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    Histogram responseQueueTime = null;
    Histogram responseSendTime = null;
    Histogram responseTotalTime = null;
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
        metrics.getBlobRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobRequestRate.mark();
        responseQueueTime = metrics.getBlobResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobSendTimeInMs;
        responseTotalTime = metrics.getBlobTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
        metrics.getBlobPropertiesRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobPropertiesRequestRate.mark();
        responseQueueTime = metrics.getBlobPropertiesResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobPropertiesSendTimeInMs;
        responseTotalTime = metrics.getBlobPropertiesTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
        metrics.getBlobUserMetadataRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobUserMetadataRequestRate.mark();
        responseQueueTime = metrics.getBlobUserMetadataResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobUserMetadataSendTimeInMs;
        responseTotalTime = metrics.getBlobUserMetadataTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
        metrics.getBlobInfoRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobInfoRequestRate.mark();
        responseQueueTime = metrics.getBlobInfoResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobInfoSendTimeInMs;
        responseTotalTime = metrics.getBlobInfoTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
        metrics.getBlobAllRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobAllRequestRate.mark();
        responseQueueTime = metrics.getBlobAllResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobAllSendTimeInMs;
        responseTotalTime = metrics.getBlobAllTotalTimeInMs;
    }
    long startTime = SystemTime.getInstance().milliseconds();
    GetResponse response = null;
    try {
        List<Send> messagesToSendList = new ArrayList<Send>(getRequest.getPartitionInfoList().size());
        List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>(getRequest.getPartitionInfoList().size());
        for (PartitionRequestInfo partitionRequestInfo : getRequest.getPartitionInfoList()) {
            ServerErrorCode error = validateRequest(partitionRequestInfo.getPartition(), RequestOrResponseType.GetRequest);
            if (error != ServerErrorCode.No_Error) {
                logger.error("Validating get request failed for partition {} with error {}", partitionRequestInfo.getPartition(), error);
                PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), error);
                partitionResponseInfoList.add(partitionResponseInfo);
            } else {
                try {
                    Store storeToGet = storageManager.getStore(partitionRequestInfo.getPartition());
                    EnumSet<StoreGetOptions> storeGetOptions = EnumSet.noneOf(StoreGetOptions.class);
                    // Currently only one option is supported.
                    if (getRequest.getGetOption() == GetOption.Include_Expired_Blobs) {
                        storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Expired);
                    }
                    if (getRequest.getGetOption() == GetOption.Include_Deleted_Blobs) {
                        storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted);
                    }
                    if (getRequest.getGetOption() == GetOption.Include_All) {
                        storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted, StoreGetOptions.Store_Include_Expired);
                    }
                    StoreInfo info = storeToGet.get(partitionRequestInfo.getBlobIds(), storeGetOptions);
                    MessageFormatSend blobsToSend = new MessageFormatSend(info.getMessageReadSet(), getRequest.getMessageFormatFlag(), messageFormatMetrics, storeKeyFactory);
                    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), info.getMessageReadSetInfo(), blobsToSend.getMessageMetadataList());
                    messagesToSendList.add(blobsToSend);
                    partitionResponseInfoList.add(partitionResponseInfo);
                } catch (StoreException e) {
                    boolean logInErrorLevel = false;
                    if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
                        metrics.idNotFoundError.inc();
                    } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
                        metrics.ttlExpiredError.inc();
                    } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
                        metrics.idDeletedError.inc();
                    } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
                        metrics.getAuthorizationFailure.inc();
                    } else {
                        metrics.unExpectedStoreGetError.inc();
                        logInErrorLevel = true;
                    }
                    if (logInErrorLevel) {
                        logger.error("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
                    } else {
                        logger.trace("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
                    }
                    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
                    partitionResponseInfoList.add(partitionResponseInfo);
                } catch (MessageFormatException e) {
                    logger.error("Message format exception on a get with error code " + e.getErrorCode() + " for partitionRequestInfo " + partitionRequestInfo, e);
                    if (e.getErrorCode() == MessageFormatErrorCodes.Data_Corrupt) {
                        metrics.dataCorruptError.inc();
                    } else if (e.getErrorCode() == MessageFormatErrorCodes.Unknown_Format_Version) {
                        metrics.unknownFormatError.inc();
                    }
                    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getMessageFormatErrorMapping(e.getErrorCode()));
                    partitionResponseInfoList.add(partitionResponseInfo);
                }
            }
        }
        CompositeSend compositeSend = new CompositeSend(messagesToSendList);
        response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, compositeSend, ServerErrorCode.No_Error);
    } catch (Exception e) {
        logger.error("Unknown exception for request " + getRequest, e);
        response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), ServerErrorCode.Unknown_Error);
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", getRequest, response, processingTime);
        if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
            metrics.getBlobProcessingTimeInMs.update(processingTime);
            metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
            metrics.getBlobPropertiesProcessingTimeInMs.update(processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
            metrics.getBlobUserMetadataProcessingTimeInMs.update(processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
            metrics.getBlobInfoProcessingTimeInMs.update(processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
            metrics.getBlobAllProcessingTimeInMs.update(processingTime);
            metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
        }
    }
    sendGetResponse(requestResponseChannel, response, request, responseQueueTime, responseSendTime, responseTotalTime, totalTimeSpent, response.sizeInBytes(), getRequest.getMessageFormatFlag(), metrics);
}
Also used : MessageFormatException(com.github.ambry.messageformat.MessageFormatException) Histogram(com.codahale.metrics.Histogram) StoreGetOptions(com.github.ambry.store.StoreGetOptions) ArrayList(java.util.ArrayList) MessageFormatSend(com.github.ambry.messageformat.MessageFormatSend) Store(com.github.ambry.store.Store) StoreInfo(com.github.ambry.store.StoreInfo) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) ServerErrorCode(com.github.ambry.commons.ServerErrorCode) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageFormatSend(com.github.ambry.messageformat.MessageFormatSend) CompositeSend(com.github.ambry.network.CompositeSend) Send(com.github.ambry.network.Send) StoreException(com.github.ambry.store.StoreException) CompositeSend(com.github.ambry.network.CompositeSend) GetRequest(com.github.ambry.protocol.GetRequest) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo)

Example 4 with GetRequest

use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.

the class AmbryRequestsTest method sendAndVerifyOperationRequest.

/**
 * Sends and verifies that an operation specific request works correctly.
 * @param requestType the type of the request to send.
 * @param ids the partitionIds to send requests for.
 * @param expectedErrorCode the {@link ServerErrorCode} expected in the response. For some requests this is the
 *                          response in the constituents rather than the actual response ({@link GetResponse} and
 *                          {@link ReplicaMetadataResponse}).
 * @throws InterruptedException
 * @throws IOException
 */
private void sendAndVerifyOperationRequest(RequestOrResponseType requestType, List<? extends PartitionId> ids, ServerErrorCode expectedErrorCode) throws InterruptedException, IOException {
    for (PartitionId id : ids) {
        int correlationId = TestUtils.RANDOM.nextInt();
        String clientId = UtilsTest.getRandomString(10);
        BlobId blobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMapUtils.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), id, false);
        RequestOrResponse request;
        switch(requestType) {
            case PutRequest:
                BlobProperties properties = new BlobProperties(0, "serviceId", blobId.getAccountId(), blobId.getAccountId(), false);
                request = new PutRequest(correlationId, clientId, blobId, properties, ByteBuffer.allocate(0), ByteBuffer.allocate(0), 0, BlobType.DataBlob, null);
                break;
            case DeleteRequest:
                request = new DeleteRequest(correlationId, clientId, blobId, SystemTime.getInstance().milliseconds());
                break;
            case GetRequest:
                PartitionRequestInfo pRequestInfo = new PartitionRequestInfo(id, Collections.singletonList(blobId));
                request = new GetRequest(correlationId, clientId, MessageFormatFlags.All, Collections.singletonList(pRequestInfo), GetOption.Include_All);
                break;
            case ReplicaMetadataRequest:
                ReplicaMetadataRequestInfo rRequestInfo = new ReplicaMetadataRequestInfo(id, FIND_TOKEN_FACTORY.getNewFindToken(), "localhost", "/tmp");
                request = new ReplicaMetadataRequest(correlationId, clientId, Collections.singletonList(rRequestInfo), Long.MAX_VALUE);
                break;
            default:
                throw new IllegalArgumentException(requestType + " not supported by this function");
        }
        storageManager.resetStore();
        Response response = sendRequestGetResponse(request, requestType == RequestOrResponseType.GetRequest || requestType == RequestOrResponseType.ReplicaMetadataRequest ? ServerErrorCode.No_Error : expectedErrorCode);
        if (expectedErrorCode.equals(ServerErrorCode.No_Error)) {
            assertEquals("Operation received at the store not as expected", requestType, MockStorageManager.operationReceived);
        }
        if (requestType == RequestOrResponseType.GetRequest) {
            GetResponse getResponse = (GetResponse) response;
            for (PartitionResponseInfo info : getResponse.getPartitionResponseInfoList()) {
                assertEquals("Error code does not match expected", expectedErrorCode, info.getErrorCode());
            }
        } else if (requestType == RequestOrResponseType.ReplicaMetadataRequest) {
            ReplicaMetadataResponse replicaMetadataResponse = (ReplicaMetadataResponse) response;
            for (ReplicaMetadataResponseInfo info : replicaMetadataResponse.getReplicaMetadataResponseInfoList()) {
                assertEquals("Error code does not match expected", expectedErrorCode, info.getError());
            }
        }
    }
}
Also used : ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) PutRequest(com.github.ambry.protocol.PutRequest) PartitionId(com.github.ambry.clustermap.PartitionId) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) CatchupStatusAdminResponse(com.github.ambry.protocol.CatchupStatusAdminResponse) GetResponse(com.github.ambry.protocol.GetResponse) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) AdminResponse(com.github.ambry.protocol.AdminResponse) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) Response(com.github.ambry.protocol.Response) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) ReplicaMetadataRequest(com.github.ambry.protocol.ReplicaMetadataRequest) ReplicaMetadataRequestInfo(com.github.ambry.protocol.ReplicaMetadataRequestInfo) BlobProperties(com.github.ambry.messageformat.BlobProperties) GetRequest(com.github.ambry.protocol.GetRequest) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 5 with GetRequest

use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.

the class ServerTestUtil method endToEndReplicationWithMultiNodeSinglePartitionTest.

static void endToEndReplicationWithMultiNodeSinglePartitionTest(String routerDatacenter, int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLSocketFactory clientSSLSocketFactory1, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
    // interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainText port
    try {
        // The header size of a LogSegment. This shouldn't be here since it breaks the interface of Log. But to satisfy the test cases
        // we will use this number here.
        // This also means we only have one log segment for this partition. If we put more operations to the partition and it excceeds
        // the log segment capacity, this number will have to be increased.
        int expectedTokenSize = 18;
        MockClusterMap clusterMap = cluster.getClusterMap();
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        ArrayList<BlobProperties> propertyList = new ArrayList<>();
        ArrayList<BlobId> blobIdList = new ArrayList<>();
        ArrayList<byte[]> dataList = new ArrayList<>();
        ArrayList<byte[]> encryptionKeyList = new ArrayList<>();
        byte[] usermetadata = new byte[1000];
        TestUtils.RANDOM.nextBytes(usermetadata);
        PartitionId partition = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
        for (int i = 0; i < 11; i++) {
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            propertyList.add(new BlobProperties(1000, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null));
            blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partition, false, BlobId.BlobDataType.DATACHUNK));
            dataList.add(TestUtils.getRandomBytes(1000));
            if (testEncryption) {
                encryptionKeyList.add(TestUtils.getRandomBytes(128));
            } else {
                encryptionKeyList.add(null);
            }
        }
        // put blob 1
        PutRequest putRequest = new PutRequest(1, "client1", blobIdList.get(0), propertyList.get(0), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(0)), propertyList.get(0).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(0), blobIdList.get(0), encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null, ByteBuffer.wrap(usermetadata), dataList.get(0));
        ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        channel1.connect();
        channel2.connect();
        channel3.connect();
        DataInputStream putResponseStream = channel1.sendAndReceive(putRequest).getInputStream();
        PutResponse response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 2
        PutRequest putRequest2 = new PutRequest(1, "client1", blobIdList.get(1), propertyList.get(1), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(1)), propertyList.get(1).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(1), blobIdList.get(1), encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null, ByteBuffer.wrap(usermetadata), dataList.get(1));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        PutResponse response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 3
        PutRequest putRequest3 = new PutRequest(1, "client1", blobIdList.get(2), propertyList.get(2), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(2)), propertyList.get(2).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(2), blobIdList.get(2), encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null, ByteBuffer.wrap(usermetadata), dataList.get(2));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        PutResponse response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 4
        putRequest = new PutRequest(1, "client1", blobIdList.get(3), propertyList.get(3), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(3)), propertyList.get(3).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(3), blobIdList.get(3), encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null, ByteBuffer.wrap(usermetadata), dataList.get(3));
        putResponseStream = channel1.sendAndReceive(putRequest).getInputStream();
        response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 5
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(4), propertyList.get(4), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(4)), propertyList.get(4).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(4), blobIdList.get(4), encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null, ByteBuffer.wrap(usermetadata), dataList.get(4));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 6
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(5), propertyList.get(5), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(5)), propertyList.get(5).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(5), blobIdList.get(5), encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null, ByteBuffer.wrap(usermetadata), dataList.get(5));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // wait till replication can complete
        notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
        checkTtlUpdateStatus(channel3, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), false, getExpiryTimeMs(propertyList.get(5)));
        updateBlobTtl(channel3, blobIdList.get(5), cluster.time.milliseconds());
        expectedTokenSize += getUpdateRecordSize(blobIdList.get(5), SubRecord.Type.TTL_UPDATE);
        checkTtlUpdateStatus(channel3, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), true, Utils.Infinite_Time);
        notificationSystem.awaitBlobUpdates(blobIdList.get(5).getID(), UpdateType.TTL_UPDATE);
        // get blob properties
        ArrayList<BlobId> ids = new ArrayList<BlobId>();
        MockPartitionId mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
        ids.add(blobIdList.get(2));
        ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
        PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        DataInputStream stream = channel2.sendAndReceive(getRequest1).getInputStream();
        GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp1.getError());
        assertEquals(ServerErrorCode.No_Error, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
        try {
            BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
            assertEquals(1000, propertyOutput.getBlobSize());
            assertEquals("serviceid1", propertyOutput.getServiceId());
            assertEquals("AccountId mismatch", propertyList.get(2).getAccountId(), propertyOutput.getAccountId());
            assertEquals("ContainerId mismatch", propertyList.get(2).getContainerId(), propertyOutput.getContainerId());
            assertEquals("IsEncrypted mismatch", propertyList.get(2).isEncrypted(), propertyOutput.isEncrypted());
        } catch (MessageFormatException e) {
            fail();
        }
        // get user metadata
        ids.clear();
        ids.add(blobIdList.get(1));
        GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest2).getInputStream();
        GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp2.getError());
        assertEquals(ServerErrorCode.No_Error, resp2.getPartitionResponseInfoList().get(0).getErrorCode());
        try {
            ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
            assertArrayEquals(usermetadata, userMetadataOutput.array());
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(1), resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            fail();
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob
        ids.clear();
        ids.add(blobIdList.get(0));
        GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        stream = channel3.sendAndReceive(getRequest3).getInputStream();
        GetResponse resp3 = GetResponse.readFrom(stream, clusterMap);
        try {
            BlobData blobData = MessageFormatRecord.deserializeBlob(resp3.getInputStream());
            byte[] blobout = getBlobDataAndRelease(blobData);
            assertArrayEquals(dataList.get(0), blobout);
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            fail();
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob all
        ids.clear();
        ids.add(blobIdList.get(0));
        GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest4).getInputStream();
        GetResponse resp4 = GetResponse.readFrom(stream, clusterMap);
        try {
            BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp4.getInputStream(), blobIdFactory);
            byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
            assertArrayEquals(dataList.get(0), blobout);
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", blobAll.getBlobEncryptionKey());
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), blobAll.getBlobEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", blobAll.getBlobEncryptionKey());
            }
        } catch (MessageFormatException e) {
            fail();
        }
        releaseNettyBufUnderneathStream(stream);
        if (!testEncryption) {
            // get blob data
            // Use router to get the blob
            Properties routerProperties = getRouterProps(routerDatacenter);
            routerProperties.putAll(routerProps);
            VerifiableProperties routerVerifiableProperties = new VerifiableProperties(routerProperties);
            AccountService accountService = new InMemAccountService(false, true);
            Router router = new NonBlockingRouterFactory(routerVerifiableProperties, clusterMap, notificationSystem, getSSLFactoryIfRequired(routerVerifiableProperties), accountService).getRouter();
            checkBlobId(router, blobIdList.get(0), dataList.get(0));
            checkBlobId(router, blobIdList.get(1), dataList.get(1));
            checkBlobId(router, blobIdList.get(2), dataList.get(2));
            checkBlobId(router, blobIdList.get(3), dataList.get(3));
            checkBlobId(router, blobIdList.get(4), dataList.get(4));
            checkBlobId(router, blobIdList.get(5), dataList.get(5));
            router.close();
        }
        // fetch blob that does not exist
        // get blob properties
        ids = new ArrayList<BlobId>();
        mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
        ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), propertyList.get(0).getAccountId(), propertyList.get(0).getContainerId(), mockPartitionId, false, BlobId.BlobDataType.DATACHUNK));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        stream = channel3.sendAndReceive(getRequest5).getInputStream();
        GetResponse resp5 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp5.getError());
        assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
        releaseNettyBufUnderneathStream(stream);
        // delete a blob and ensure it is propagated
        DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIdList.get(0), System.currentTimeMillis());
        expectedTokenSize += getUpdateRecordSize(blobIdList.get(0), SubRecord.Type.DELETE);
        DataInputStream deleteResponseStream = channel1.sendAndReceive(deleteRequest).getInputStream();
        DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
        releaseNettyBufUnderneathStream(deleteResponseStream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
        ids = new ArrayList<BlobId>();
        ids.add(blobIdList.get(0));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(partition, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest6 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        stream = channel3.sendAndReceive(getRequest6).getInputStream();
        GetResponse resp6 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp6.getError());
        assertEquals(ServerErrorCode.Blob_Deleted, resp6.getPartitionResponseInfoList().get(0).getErrorCode());
        releaseNettyBufUnderneathStream(stream);
        // get the data node to inspect replication tokens on
        DataNodeId dataNodeId = clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
        checkReplicaTokens(clusterMap, dataNodeId, expectedTokenSize - getUpdateRecordSize(blobIdList.get(0), SubRecord.Type.DELETE), "0");
        // Shut down server 1
        cluster.getServers().get(0).shutdown();
        cluster.getServers().get(0).awaitShutdown();
        // Add more data to server 2 and server 3. Recover server 1 and ensure it is completely replicated
        // put blob 7
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(6), propertyList.get(6), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(6)), propertyList.get(6).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(6), blobIdList.get(6), encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null, ByteBuffer.wrap(usermetadata), dataList.get(6));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 8
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(7), propertyList.get(7), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(7)), propertyList.get(7).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(7), blobIdList.get(7), encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null, ByteBuffer.wrap(usermetadata), dataList.get(7));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 9
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(8), propertyList.get(8), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(8)), propertyList.get(8).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(8), blobIdList.get(8), encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null, ByteBuffer.wrap(usermetadata), dataList.get(8));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 10
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(9), propertyList.get(9), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(9)), propertyList.get(9).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(9), blobIdList.get(9), encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null, ByteBuffer.wrap(usermetadata), dataList.get(9));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 11
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(10), propertyList.get(10), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(10)), propertyList.get(10).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(10), blobIdList.get(10), encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null, ByteBuffer.wrap(usermetadata), dataList.get(10));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        checkTtlUpdateStatus(channel2, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), false, getExpiryTimeMs(propertyList.get(10)));
        updateBlobTtl(channel2, blobIdList.get(10), cluster.time.milliseconds());
        expectedTokenSize += getUpdateRecordSize(blobIdList.get(10), SubRecord.Type.TTL_UPDATE);
        checkTtlUpdateStatus(channel2, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
        cluster.reinitServer(0);
        // wait for server to recover
        notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
        notificationSystem.awaitBlobUpdates(blobIdList.get(10).getID(), UpdateType.TTL_UPDATE);
        channel1.disconnect();
        channel1.connect();
        // get blob
        try {
            checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
            checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
            checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
            checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
            checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
            checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
            checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
            checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
            checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
            checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
        } catch (MessageFormatException e) {
            fail();
        }
        // check that the ttl update went through
        checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
        // Shutdown server 1. Remove all its data from all mount path. Recover server 1 and ensure node is built
        cluster.getServers().get(0).shutdown();
        cluster.getServers().get(0).awaitShutdown();
        File mountFile = new File(clusterMap.getReplicaIds(dataNodeId).get(0).getMountPath());
        for (File toDelete : Objects.requireNonNull(mountFile.listFiles())) {
            deleteFolderContent(toDelete, true);
        }
        notificationSystem.decrementCreatedReplica(blobIdList.get(1).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(2).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(3).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(4).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementUpdatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort(), UpdateType.TTL_UPDATE);
        notificationSystem.decrementCreatedReplica(blobIdList.get(6).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(7).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(8).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(9).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementUpdatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort(), UpdateType.TTL_UPDATE);
        cluster.reinitServer(0);
        notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
        notificationSystem.awaitBlobUpdates(blobIdList.get(5).getID(), UpdateType.TTL_UPDATE);
        notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
        notificationSystem.awaitBlobUpdates(blobIdList.get(10).getID(), UpdateType.TTL_UPDATE);
        channel1.disconnect();
        channel1.connect();
        // get blob
        try {
            checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
            checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
            checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
            checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
            checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
            checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
            checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
            checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
            checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
            checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
        } catch (MessageFormatException e) {
            fail();
        }
        // check that the ttl updates are present
        checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), true, Utils.Infinite_Time);
        checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
        channel1.disconnect();
        channel2.disconnect();
        channel3.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    }
}
Also used : ArrayList(java.util.ArrayList) PutResponse(com.github.ambry.protocol.PutResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) InMemAccountService(com.github.ambry.account.InMemAccountService) BlobAll(com.github.ambry.messageformat.BlobAll) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PutRequest(com.github.ambry.protocol.PutRequest) Router(com.github.ambry.router.Router) ConnectedChannel(com.github.ambry.network.ConnectedChannel) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) DeleteResponse(com.github.ambry.protocol.DeleteResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) InMemAccountService(com.github.ambry.account.InMemAccountService) AccountService(com.github.ambry.account.AccountService) DeleteRequest(com.github.ambry.protocol.DeleteRequest) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Aggregations

GetRequest (com.github.ambry.protocol.GetRequest)31 GetResponse (com.github.ambry.protocol.GetResponse)29 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)29 BlobId (com.github.ambry.commons.BlobId)22 ArrayList (java.util.ArrayList)22 DataInputStream (java.io.DataInputStream)19 BlobProperties (com.github.ambry.messageformat.BlobProperties)16 VerifiableProperties (com.github.ambry.config.VerifiableProperties)11 ConnectedChannel (com.github.ambry.network.ConnectedChannel)11 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)11 IOException (java.io.IOException)11 DeleteResponse (com.github.ambry.protocol.DeleteResponse)10 PutRequest (com.github.ambry.protocol.PutRequest)10 PutResponse (com.github.ambry.protocol.PutResponse)10 ByteBuffer (java.nio.ByteBuffer)10 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)9 PartitionId (com.github.ambry.clustermap.PartitionId)9 BlobData (com.github.ambry.messageformat.BlobData)9 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)9 DeleteRequest (com.github.ambry.protocol.DeleteRequest)9