Search in sources :

Example 36 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class MockReadableStreamChannel method testSlippedPutsSuccess.

/**
 * Tests slipped puts scenario. That is, the first attempt at putting a chunk ends up in a failure,
 * but a second attempt is made by the PutManager which results in a success.
 */
@Test
public void testSlippedPutsSuccess() throws Exception {
    // choose a simple blob, one that will result in a single chunk. This is to ensure setting state properly
    // to test things correctly. Note that slipped puts concern a chunk and not an operation (that is,
    // every chunk is slipped put on its own), so testing for simple blobs is sufficient.
    requestAndResultsList.clear();
    requestAndResultsList.add(new RequestAndResult(chunkSize));
    List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
    // Set the state of the mock servers so that they return an error for the first send issued,
    // but later ones succeed. With 3 nodes, for slipped puts, all partitions will come from the same nodes,
    // so we set the errors in such a way that the first request received by every node fails.
    // Note: The assumption is that there are 3 nodes per DC.
    List<ServerErrorCode> serverErrorList = new ArrayList<>();
    serverErrorList.add(ServerErrorCode.Unknown_Error);
    serverErrorList.add(ServerErrorCode.No_Error);
    for (DataNodeId dataNodeId : dataNodeIds) {
        MockServer server = mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
        server.setServerErrors(serverErrorList);
    }
    submitPutsAndAssertSuccess(true);
}
Also used : ArrayList(java.util.ArrayList) DataNodeId(com.github.ambry.clustermap.DataNodeId) ServerErrorCode(com.github.ambry.server.ServerErrorCode) Test(org.junit.Test)

Example 37 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class ServerTestUtil method undeleteCornerCasesTest.

static void undeleteCornerCasesTest(MockCluster cluster, PortType portType, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
    MockClusterMap clusterMap = cluster.getClusterMap();
    byte[] userMetadata = new byte[1000];
    byte[] data = new byte[31870];
    byte[] encryptionKey = new byte[100];
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, testEncryption, cluster.time.milliseconds());
    TestUtils.RANDOM.nextBytes(userMetadata);
    TestUtils.RANDOM.nextBytes(data);
    if (testEncryption) {
        TestUtils.RANDOM.nextBytes(encryptionKey);
    }
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    Map<String, List<DataNodeId>> dataNodesPerDC = clusterMap.getDataNodes().stream().collect(Collectors.groupingBy(DataNodeId::getDatacenterName));
    Map<String, Pair<SSLConfig, SSLSocketFactory>> sslSettingPerDC = new HashMap<>();
    sslSettingPerDC.put("DC1", new Pair<>(clientSSLConfig1, clientSSLSocketFactory1));
    sslSettingPerDC.put("DC2", new Pair<>(clientSSLConfig2, clientSSLSocketFactory2));
    sslSettingPerDC.put("DC3", new Pair<>(clientSSLConfig3, clientSSLSocketFactory3));
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
    DataNodeId dataNodeId = dataNodesPerDC.get("DC1").get(0);
    Router router = null;
    try {
        Properties routerProperties = getRouterProps("DC1");
        routerProperties.putAll(routerProps);
        VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
        AccountService accountService = new InMemAccountService(false, true);
        router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(clusterMap), getSSLFactoryIfRequired(routerVerifiableProps), accountService).getRouter();
        // channels to all datanodes
        List<ConnectedChannel> channels = new ArrayList<>();
        for (Map.Entry<String, List<DataNodeId>> entry : dataNodesPerDC.entrySet()) {
            Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(entry.getKey());
            for (DataNodeId node : entry.getValue()) {
                ConnectedChannel connectedChannel = getBlockingChannelBasedOnPortType(portType, node, pair.getSecond(), pair.getFirst());
                connectedChannel.connect();
                channels.add(connectedChannel);
            }
        }
        // ////////////////////////////////////////////////////
        // Corner case 1: When only one datacenter has delete
        // ////////////////////////////////////////////////////
        BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
        ConnectedChannel channel = getBlockingChannelBasedOnPortType(portType, dataNodeId, clientSSLSocketFactory1, clientSSLConfig1);
        channel.connect();
        PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        DataInputStream putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
        PutResponse response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        notificationSystem.awaitBlobCreations(blobId1.toString());
        // Now stop the replications this partition.
        PartitionId partitionId = blobId1.getPartition();
        controlReplicationForPartition(channels, partitionId, false);
        // Now send the delete to two data nodes in the same DC
        List<DataNodeId> toBeDeleteDataNodes = dataNodesPerDC.values().stream().findFirst().get();
        Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(toBeDeleteDataNodes.get(0).getDatacenterName());
        ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(0), pair.getSecond(), pair.getFirst());
        channel1.connect();
        ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(1), pair.getSecond(), pair.getFirst());
        channel2.connect();
        DeleteRequest deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
        DataInputStream stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
        DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        DeleteRequest deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId1, deleteRequest1.getDeletionTimeInMs());
        stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        // Now send the undelete operation through router, and it should fail because of not deleted error.
        Future<Void> future = router.undeleteBlob(blobId1.toString(), "service");
        try {
            future.get();
            fail("Undelete blob " + blobId1.toString() + " should fail");
        } catch (ExecutionException e) {
            assertTrue(e.getCause() instanceof RouterException);
            assertEquals(RouterErrorCode.BlobNotDeleted, ((RouterException) e.getCause()).getErrorCode());
        }
        // Now see if either data node 1 or data node 2 has undelete or not, if so, undelete would replicate. If not,
        // delete would replicate.
        List<PartitionRequestInfo> partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId1);
        boolean hasUndelete = false;
        for (ConnectedChannel connectedChannel : new ConnectedChannel[] { channel1, channel2 }) {
            GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
            stream = channel1.sendAndReceive(getRequest).getInputStream();
            GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
            assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
            MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
            hasUndelete = getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == (short) 1;
            if (hasUndelete) {
                break;
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // Now restart the replication
        controlReplicationForPartition(channels, partitionId, true);
        if (hasUndelete) {
            notificationSystem.awaitBlobUndeletes(blobId1.toString());
        } else {
            notificationSystem.awaitBlobDeletions(blobId1.toString());
        }
        for (ConnectedChannel connectedChannel : channels) {
            GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
            stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
            GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
            releaseNettyBufUnderneathStream(stream);
            assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
            MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
            if (hasUndelete) {
                assertEquals((short) 1, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
                assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
                assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
            } else {
                assertEquals((short) 0, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
                assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
                assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
            }
        }
        // ///////////////////////////////////////////////////////////
        // Corner case 2: two data nodes have different life versions
        // //////////////////////////////////////////////////////////
        BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
        putRequest = new PutRequest(1, "client1", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
        response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        notificationSystem.awaitBlobCreations(blobId2.toString());
        // Now delete this blob on all servers.
        DeleteRequest deleteRequest = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
        stream = channel.sendAndReceive(deleteRequest).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        notificationSystem.awaitBlobDeletions(blobId2.toString());
        // Now stop the replication
        partitionId = blobId2.getPartition();
        controlReplicationForPartition(channels, partitionId, false);
        // Now send the undelete to two data nodes in the same DC and then send delete
        UndeleteRequest undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, System.currentTimeMillis());
        stream = channel1.sendAndReceive(undeleteRequest).getInputStream();
        UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
        assertEquals((short) 1, undeleteResponse.getLifeVersion());
        undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, undeleteRequest.getOperationTimeMs());
        stream = channel2.sendAndReceive(undeleteRequest).getInputStream();
        undeleteResponse = UndeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
        assertEquals((short) 1, undeleteResponse.getLifeVersion());
        deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
        stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId2, deleteRequest1.getDeletionTimeInMs());
        stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        // Now send the undelete operation through router, and it should fail because of lifeVersion conflict error.
        future = router.undeleteBlob(blobId2.toString(), "service");
        try {
            future.get();
            fail("Undelete blob " + blobId2.toString() + " should fail");
        } catch (ExecutionException e) {
            assertTrue(e.getCause() instanceof RouterException);
            assertEquals(RouterErrorCode.LifeVersionConflict, ((RouterException) e.getCause()).getErrorCode());
        }
        // Now restart the replication
        controlReplicationForPartition(channels, partitionId, true);
        notificationSystem.awaitBlobUndeletes(blobId2.toString());
        // Now after replication is resumed, the undelete of lifeversion 2 will eventually be replicated to all servers.
        partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId2);
        for (ConnectedChannel connectedChannel : channels) {
            // Even if the notificationSystem acknowledged the undelete, it might be triggered by undelete at lifeversion 1.
            // So check in a loop with a time out.
            long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
            while (true) {
                GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
                stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
                GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
                assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
                MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
                if (getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == 2) {
                    assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
                    assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
                    break;
                } else {
                    Thread.sleep(1000);
                    if (System.currentTimeMillis() > deadline) {
                        throw new TimeoutException("Fail to get blob " + blobId2 + " at lifeversion 2 at " + connectedChannel.getRemoteHost());
                    }
                }
            }
        }
        releaseNettyBufUnderneathStream(stream);
        for (ConnectedChannel connectedChannel : channels) {
            connectedChannel.disconnect();
        }
        channel1.disconnect();
        channel2.disconnect();
        channel.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    } finally {
        if (router != null) {
            try {
                router.close();
            } catch (Exception e) {
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) InMemAccountService(com.github.ambry.account.InMemAccountService) GetRequest(com.github.ambry.protocol.GetRequest) ArrayList(java.util.ArrayList) List(java.util.List) SSLSocketFactory(javax.net.ssl.SSLSocketFactory) TimeoutException(java.util.concurrent.TimeoutException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Router(com.github.ambry.router.Router) ConnectedChannel(com.github.ambry.network.ConnectedChannel) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) DeleteResponse(com.github.ambry.protocol.DeleteResponse) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) InMemAccountService(com.github.ambry.account.InMemAccountService) AccountService(com.github.ambry.account.AccountService) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PutResponse(com.github.ambry.protocol.PutResponse) ExecutionException(java.util.concurrent.ExecutionException) Pair(com.github.ambry.utils.Pair) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) SSLConfig(com.github.ambry.config.SSLConfig) RouterException(com.github.ambry.router.RouterException) PutRequest(com.github.ambry.protocol.PutRequest) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 38 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class ServerTestUtil method endToEndReplicationWithMultiNodeSinglePartitionTest.

static void endToEndReplicationWithMultiNodeSinglePartitionTest(String routerDatacenter, int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLSocketFactory clientSSLSocketFactory1, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
    // interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainText port
    try {
        // The header size of a LogSegment. This shouldn't be here since it breaks the interface of Log. But to satisfy the test cases
        // we will use this number here.
        // This also means we only have one log segment for this partition. If we put more operations to the partition and it excceeds
        // the log segment capacity, this number will have to be increased.
        int expectedTokenSize = 18;
        MockClusterMap clusterMap = cluster.getClusterMap();
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        ArrayList<BlobProperties> propertyList = new ArrayList<>();
        ArrayList<BlobId> blobIdList = new ArrayList<>();
        ArrayList<byte[]> dataList = new ArrayList<>();
        ArrayList<byte[]> encryptionKeyList = new ArrayList<>();
        byte[] usermetadata = new byte[1000];
        TestUtils.RANDOM.nextBytes(usermetadata);
        PartitionId partition = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
        for (int i = 0; i < 11; i++) {
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            propertyList.add(new BlobProperties(1000, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null));
            blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partition, false, BlobId.BlobDataType.DATACHUNK));
            dataList.add(TestUtils.getRandomBytes(1000));
            if (testEncryption) {
                encryptionKeyList.add(TestUtils.getRandomBytes(128));
            } else {
                encryptionKeyList.add(null);
            }
        }
        // put blob 1
        PutRequest putRequest = new PutRequest(1, "client1", blobIdList.get(0), propertyList.get(0), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(0)), propertyList.get(0).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(0), blobIdList.get(0), encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null, ByteBuffer.wrap(usermetadata), dataList.get(0));
        ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
        channel1.connect();
        channel2.connect();
        channel3.connect();
        DataInputStream putResponseStream = channel1.sendAndReceive(putRequest).getInputStream();
        PutResponse response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 2
        PutRequest putRequest2 = new PutRequest(1, "client1", blobIdList.get(1), propertyList.get(1), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(1)), propertyList.get(1).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(1), blobIdList.get(1), encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null, ByteBuffer.wrap(usermetadata), dataList.get(1));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        PutResponse response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 3
        PutRequest putRequest3 = new PutRequest(1, "client1", blobIdList.get(2), propertyList.get(2), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(2)), propertyList.get(2).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(2), blobIdList.get(2), encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null, ByteBuffer.wrap(usermetadata), dataList.get(2));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        PutResponse response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 4
        putRequest = new PutRequest(1, "client1", blobIdList.get(3), propertyList.get(3), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(3)), propertyList.get(3).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(3), blobIdList.get(3), encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null, ByteBuffer.wrap(usermetadata), dataList.get(3));
        putResponseStream = channel1.sendAndReceive(putRequest).getInputStream();
        response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        // put blob 5
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(4), propertyList.get(4), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(4)), propertyList.get(4).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(4), blobIdList.get(4), encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null, ByteBuffer.wrap(usermetadata), dataList.get(4));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 6
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(5), propertyList.get(5), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(5)), propertyList.get(5).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(5), blobIdList.get(5), encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null, ByteBuffer.wrap(usermetadata), dataList.get(5));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // wait till replication can complete
        notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
        checkTtlUpdateStatus(channel3, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), false, getExpiryTimeMs(propertyList.get(5)));
        updateBlobTtl(channel3, blobIdList.get(5), cluster.time.milliseconds());
        expectedTokenSize += getUpdateRecordSize(blobIdList.get(5), SubRecord.Type.TTL_UPDATE);
        checkTtlUpdateStatus(channel3, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), true, Utils.Infinite_Time);
        notificationSystem.awaitBlobUpdates(blobIdList.get(5).getID(), UpdateType.TTL_UPDATE);
        // get blob properties
        ArrayList<BlobId> ids = new ArrayList<BlobId>();
        MockPartitionId mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
        ids.add(blobIdList.get(2));
        ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
        PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        DataInputStream stream = channel2.sendAndReceive(getRequest1).getInputStream();
        GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp1.getError());
        assertEquals(ServerErrorCode.No_Error, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
        try {
            BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
            assertEquals(1000, propertyOutput.getBlobSize());
            assertEquals("serviceid1", propertyOutput.getServiceId());
            assertEquals("AccountId mismatch", propertyList.get(2).getAccountId(), propertyOutput.getAccountId());
            assertEquals("ContainerId mismatch", propertyList.get(2).getContainerId(), propertyOutput.getContainerId());
            assertEquals("IsEncrypted mismatch", propertyList.get(2).isEncrypted(), propertyOutput.isEncrypted());
        } catch (MessageFormatException e) {
            fail();
        }
        // get user metadata
        ids.clear();
        ids.add(blobIdList.get(1));
        GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest2).getInputStream();
        GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp2.getError());
        assertEquals(ServerErrorCode.No_Error, resp2.getPartitionResponseInfoList().get(0).getErrorCode());
        try {
            ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
            assertArrayEquals(usermetadata, userMetadataOutput.array());
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(1), resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            fail();
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob
        ids.clear();
        ids.add(blobIdList.get(0));
        GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        stream = channel3.sendAndReceive(getRequest3).getInputStream();
        GetResponse resp3 = GetResponse.readFrom(stream, clusterMap);
        try {
            BlobData blobData = MessageFormatRecord.deserializeBlob(resp3.getInputStream());
            byte[] blobout = getBlobDataAndRelease(blobData);
            assertArrayEquals(dataList.get(0), blobout);
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
            }
        } catch (MessageFormatException e) {
            fail();
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob all
        ids.clear();
        ids.add(blobIdList.get(0));
        GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest4).getInputStream();
        GetResponse resp4 = GetResponse.readFrom(stream, clusterMap);
        try {
            BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp4.getInputStream(), blobIdFactory);
            byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
            assertArrayEquals(dataList.get(0), blobout);
            if (testEncryption) {
                assertNotNull("MessageMetadata should not have been null", blobAll.getBlobEncryptionKey());
                assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), blobAll.getBlobEncryptionKey().array());
            } else {
                assertNull("MessageMetadata should have been null", blobAll.getBlobEncryptionKey());
            }
        } catch (MessageFormatException e) {
            fail();
        }
        releaseNettyBufUnderneathStream(stream);
        if (!testEncryption) {
            // get blob data
            // Use router to get the blob
            Properties routerProperties = getRouterProps(routerDatacenter);
            routerProperties.putAll(routerProps);
            VerifiableProperties routerVerifiableProperties = new VerifiableProperties(routerProperties);
            AccountService accountService = new InMemAccountService(false, true);
            Router router = new NonBlockingRouterFactory(routerVerifiableProperties, clusterMap, notificationSystem, getSSLFactoryIfRequired(routerVerifiableProperties), accountService).getRouter();
            checkBlobId(router, blobIdList.get(0), dataList.get(0));
            checkBlobId(router, blobIdList.get(1), dataList.get(1));
            checkBlobId(router, blobIdList.get(2), dataList.get(2));
            checkBlobId(router, blobIdList.get(3), dataList.get(3));
            checkBlobId(router, blobIdList.get(4), dataList.get(4));
            checkBlobId(router, blobIdList.get(5), dataList.get(5));
            router.close();
        }
        // fetch blob that does not exist
        // get blob properties
        ids = new ArrayList<BlobId>();
        mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
        ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), propertyList.get(0).getAccountId(), propertyList.get(0).getContainerId(), mockPartitionId, false, BlobId.BlobDataType.DATACHUNK));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        stream = channel3.sendAndReceive(getRequest5).getInputStream();
        GetResponse resp5 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp5.getError());
        assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
        releaseNettyBufUnderneathStream(stream);
        // delete a blob and ensure it is propagated
        DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIdList.get(0), System.currentTimeMillis());
        expectedTokenSize += getUpdateRecordSize(blobIdList.get(0), SubRecord.Type.DELETE);
        DataInputStream deleteResponseStream = channel1.sendAndReceive(deleteRequest).getInputStream();
        DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
        releaseNettyBufUnderneathStream(deleteResponseStream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
        ids = new ArrayList<BlobId>();
        ids.add(blobIdList.get(0));
        partitionRequestInfoList.clear();
        partitionRequestInfo = new PartitionRequestInfo(partition, ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest6 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        stream = channel3.sendAndReceive(getRequest6).getInputStream();
        GetResponse resp6 = GetResponse.readFrom(stream, clusterMap);
        assertEquals(ServerErrorCode.No_Error, resp6.getError());
        assertEquals(ServerErrorCode.Blob_Deleted, resp6.getPartitionResponseInfoList().get(0).getErrorCode());
        releaseNettyBufUnderneathStream(stream);
        // get the data node to inspect replication tokens on
        DataNodeId dataNodeId = clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
        checkReplicaTokens(clusterMap, dataNodeId, expectedTokenSize - getUpdateRecordSize(blobIdList.get(0), SubRecord.Type.DELETE), "0");
        // Shut down server 1
        cluster.getServers().get(0).shutdown();
        cluster.getServers().get(0).awaitShutdown();
        // Add more data to server 2 and server 3. Recover server 1 and ensure it is completely replicated
        // put blob 7
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(6), propertyList.get(6), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(6)), propertyList.get(6).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(6), blobIdList.get(6), encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null, ByteBuffer.wrap(usermetadata), dataList.get(6));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 8
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(7), propertyList.get(7), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(7)), propertyList.get(7).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(7), blobIdList.get(7), encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null, ByteBuffer.wrap(usermetadata), dataList.get(7));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 9
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(8), propertyList.get(8), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(8)), propertyList.get(8).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(8), blobIdList.get(8), encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null, ByteBuffer.wrap(usermetadata), dataList.get(8));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        // put blob 10
        putRequest3 = new PutRequest(1, "client1", blobIdList.get(9), propertyList.get(9), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(9)), propertyList.get(9).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(9), blobIdList.get(9), encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null, ByteBuffer.wrap(usermetadata), dataList.get(9));
        putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
        response3 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response3.getError());
        // put blob 11
        putRequest2 = new PutRequest(1, "client1", blobIdList.get(10), propertyList.get(10), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(10)), propertyList.get(10).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null);
        expectedTokenSize += getPutRecordSize(propertyList.get(10), blobIdList.get(10), encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null, ByteBuffer.wrap(usermetadata), dataList.get(10));
        putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
        response2 = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response2.getError());
        checkTtlUpdateStatus(channel2, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), false, getExpiryTimeMs(propertyList.get(10)));
        updateBlobTtl(channel2, blobIdList.get(10), cluster.time.milliseconds());
        expectedTokenSize += getUpdateRecordSize(blobIdList.get(10), SubRecord.Type.TTL_UPDATE);
        checkTtlUpdateStatus(channel2, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
        cluster.reinitServer(0);
        // wait for server to recover
        notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
        notificationSystem.awaitBlobUpdates(blobIdList.get(10).getID(), UpdateType.TTL_UPDATE);
        channel1.disconnect();
        channel1.connect();
        // get blob
        try {
            checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
            checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
            checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
            checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
            checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
            checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
            checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
            checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
            checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
            checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
        } catch (MessageFormatException e) {
            fail();
        }
        // check that the ttl update went through
        checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
        // Shutdown server 1. Remove all its data from all mount path. Recover server 1 and ensure node is built
        cluster.getServers().get(0).shutdown();
        cluster.getServers().get(0).awaitShutdown();
        File mountFile = new File(clusterMap.getReplicaIds(dataNodeId).get(0).getMountPath());
        for (File toDelete : Objects.requireNonNull(mountFile.listFiles())) {
            deleteFolderContent(toDelete, true);
        }
        notificationSystem.decrementCreatedReplica(blobIdList.get(1).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(2).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(3).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(4).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementUpdatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort(), UpdateType.TTL_UPDATE);
        notificationSystem.decrementCreatedReplica(blobIdList.get(6).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(7).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(8).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(9).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementCreatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
        notificationSystem.decrementUpdatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort(), UpdateType.TTL_UPDATE);
        cluster.reinitServer(0);
        notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
        notificationSystem.awaitBlobUpdates(blobIdList.get(5).getID(), UpdateType.TTL_UPDATE);
        notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
        notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
        notificationSystem.awaitBlobUpdates(blobIdList.get(10).getID(), UpdateType.TTL_UPDATE);
        channel1.disconnect();
        channel1.connect();
        // get blob
        try {
            checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
            checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
            checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
            checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
            checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
            checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
            checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
            checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
            checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
            checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
        } catch (MessageFormatException e) {
            fail();
        }
        // check that the ttl updates are present
        checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), true, Utils.Infinite_Time);
        checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
        channel1.disconnect();
        channel2.disconnect();
        channel3.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    }
}
Also used : ArrayList(java.util.ArrayList) PutResponse(com.github.ambry.protocol.PutResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) InMemAccountService(com.github.ambry.account.InMemAccountService) BlobAll(com.github.ambry.messageformat.BlobAll) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PutRequest(com.github.ambry.protocol.PutRequest) Router(com.github.ambry.router.Router) ConnectedChannel(com.github.ambry.network.ConnectedChannel) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) DeleteResponse(com.github.ambry.protocol.DeleteResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) InMemAccountService(com.github.ambry.account.InMemAccountService) AccountService(com.github.ambry.account.AccountService) DeleteRequest(com.github.ambry.protocol.DeleteRequest) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 39 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class AmbryServer method startup.

public void startup() throws InstantiationException {
    try {
        logger.info("starting");
        clusterParticipants = clusterAgentsFactory.getClusterParticipants();
        logger.info("Setting up JMX.");
        long startTime = SystemTime.getInstance().milliseconds();
        reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
        reporter.start();
        logger.info("creating configs");
        NetworkConfig networkConfig = new NetworkConfig(properties);
        StoreConfig storeConfig = new StoreConfig(properties);
        DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
        ServerConfig serverConfig = new ServerConfig(properties);
        ReplicationConfig replicationConfig = new ReplicationConfig(properties);
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
        SSLConfig sslConfig = new SSLConfig(properties);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
        StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
        CloudConfig cloudConfig = new CloudConfig(properties);
        // verify the configs
        properties.verify();
        scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
        // mismatch in sealed/stopped replica lists that maintained by each participant.
        if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
            consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
            logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
            consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
            consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
        }
        logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
        DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
        if (nodeId == null) {
            throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
        }
        AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
        AccountService accountService = accountServiceFactory.getAccountService();
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        // In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
        // and some components require sole participant, the first one in the list will be primary participant.
        storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
        storageManager.start();
        SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
        if (clusterMapConfig.clusterMapEnableHttp2Replication) {
            connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
        } else {
            connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
        }
        connectionPool.start();
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
        Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
        replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
        replicationManager.start();
        if (replicationConfig.replicationEnabledWithVcrCluster) {
            logger.info("Creating Helix cluster spectator for cloud to store replication.");
            vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
            cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
            cloudToStoreReplicationManager.start();
        }
        logger.info("Creating StatsManager to publish stats");
        accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
        statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
        if (serverConfig.serverStatsPublishLocalEnabled) {
            statsManager.start();
        }
        ArrayList<Port> ports = new ArrayList<Port>();
        ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
        if (nodeId.hasSSLPort()) {
            ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
        }
        networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
        FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
        requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
        requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
        networkServer.start();
        // Start netty http2 server
        if (nodeId.hasHttp2Port()) {
            NettyConfig nettyConfig = new NettyConfig(properties);
            NettyMetrics nettyMetrics = new NettyMetrics(registry);
            Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
            Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
            logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
            NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
            AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
            requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
            NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
            nettyHttp2Server = nioServerFactory.getNioServer();
            nettyHttp2Server.start();
        }
        // Other code
        List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
        Set<String> validStatsTypes = new HashSet<>();
        for (StatsReportType type : StatsReportType.values()) {
            validStatsTypes.add(type.toString());
        }
        if (serverConfig.serverStatsPublishReportEnabled) {
            serverConfig.serverStatsReportsToPublish.forEach(e -> {
                if (validStatsTypes.contains(e)) {
                    ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
                }
            });
        }
        if (vcrClusterSpectator != null) {
            vcrClusterSpectator.spectate();
        }
        Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
        for (ClusterParticipant clusterParticipant : clusterParticipants) {
            clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
        }
        if (nettyInternalMetrics != null) {
            nettyInternalMetrics.start();
            logger.info("NettyInternalMetric starts");
        }
        logger.info("started");
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        metrics.serverStartTimeInMs.update(processingTime);
        logger.info("Server startup time in Ms {}", processingTime);
    } catch (Exception e) {
        logger.error("Error during startup", e);
        throw new InstantiationException("failure during startup " + e);
    }
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) SSLFactory(com.github.ambry.commons.SSLFactory) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Port(com.github.ambry.network.Port) StorageManager(com.github.ambry.store.StorageManager) ReplicationSkipPredicate(com.github.ambry.replication.ReplicationSkipPredicate) ArrayList(java.util.ArrayList) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) NettyConfig(com.github.ambry.config.NettyConfig) ServerConfig(com.github.ambry.config.ServerConfig) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) AccountStatsMySqlStoreFactory(com.github.ambry.accountstats.AccountStatsMySqlStoreFactory) HashSet(java.util.HashSet) CloudToStoreReplicationManager(com.github.ambry.replication.CloudToStoreReplicationManager) ReplicationManager(com.github.ambry.replication.ReplicationManager) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Http2BlockingChannelPool(com.github.ambry.network.http2.Http2BlockingChannelPool) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AccountServiceCallback(com.github.ambry.account.AccountServiceCallback) Http2ServerMetrics(com.github.ambry.network.http2.Http2ServerMetrics) BlobStoreRecovery(com.github.ambry.messageformat.BlobStoreRecovery) RequestHandlerPool(com.github.ambry.protocol.RequestHandlerPool) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) AccountService(com.github.ambry.account.AccountService) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) CloudConfig(com.github.ambry.config.CloudConfig) StorageServerNettyFactory(com.github.ambry.rest.StorageServerNettyFactory) AccountStatsMySqlStore(com.github.ambry.accountstats.AccountStatsMySqlStore) SSLConfig(com.github.ambry.config.SSLConfig) StatsManagerConfig(com.github.ambry.config.StatsManagerConfig) SocketServer(com.github.ambry.network.SocketServer) NettyMetrics(com.github.ambry.rest.NettyMetrics) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) NettyServerRequestResponseChannel(com.github.ambry.network.NettyServerRequestResponseChannel) NetworkConfig(com.github.ambry.config.NetworkConfig) BlobStoreHardDelete(com.github.ambry.messageformat.BlobStoreHardDelete) IOException(java.io.IOException) MessageInfo(com.github.ambry.store.MessageInfo) NioServerFactory(com.github.ambry.rest.NioServerFactory) CloudToStoreReplicationManager(com.github.ambry.replication.CloudToStoreReplicationManager) ClusterParticipant(com.github.ambry.clustermap.ClusterParticipant) AccountServiceFactory(com.github.ambry.account.AccountServiceFactory)

Example 40 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class AmbryServerRequestsTest method addBlobStoreFailureTest.

@Test
public void addBlobStoreFailureTest() throws Exception {
    // create newPartition1 that no replica sits on current node.
    List<MockDataNodeId> dataNodes = clusterMap.getDataNodes().stream().filter(node -> !node.getHostname().equals(dataNodeId.getHostname()) || node.getPort() != dataNodeId.getPort()).collect(Collectors.toList());
    PartitionId newPartition1 = clusterMap.createNewPartition(dataNodes);
    // test that getting new replica from cluster map fails
    sendAndVerifyStoreControlRequest(newPartition1, BlobStoreControlAction.AddStore, (short) 0, ServerErrorCode.Replica_Unavailable);
    // create newPartition2 that has one replica on current node
    PartitionId newPartition2 = clusterMap.createNewPartition(clusterMap.getDataNodes());
    // test that adding store into StorageManager fails
    storageManager.returnValueOfAddBlobStore = false;
    sendAndVerifyStoreControlRequest(newPartition2, BlobStoreControlAction.AddStore, (short) 0, ServerErrorCode.Unknown_Error);
    storageManager.returnValueOfAddBlobStore = true;
    // test that adding replica into ReplicationManager fails (we first add replica into ReplicationManager to trigger failure)
    ReplicaId replicaToAdd = clusterMap.getBootstrapReplica(newPartition2.toPathString(), dataNodeId);
    replicationManager.addReplica(replicaToAdd);
    sendAndVerifyStoreControlRequest(newPartition2, BlobStoreControlAction.AddStore, (short) 0, ServerErrorCode.Unknown_Error);
    assertTrue("Remove replica from replication manager should succeed.", replicationManager.removeReplica(replicaToAdd));
    // test that adding replica into StatsManager fails
    statsManager.returnValOfAddReplica = false;
    sendAndVerifyStoreControlRequest(newPartition2, BlobStoreControlAction.AddStore, (short) 0, ServerErrorCode.Unknown_Error);
    statsManager.returnValOfAddReplica = true;
}
Also used : NetworkRequest(com.github.ambry.network.NetworkRequest) AdminRequestOrResponseType(com.github.ambry.protocol.AdminRequestOrResponseType) GetOption(com.github.ambry.protocol.GetOption) Arrays(java.util.Arrays) BlobProperties(com.github.ambry.messageformat.BlobProperties) CatchupStatusAdminResponse(com.github.ambry.protocol.CatchupStatusAdminResponse) StoreErrorCodes(com.github.ambry.store.StoreErrorCodes) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) ServerMetrics(com.github.ambry.commons.ServerMetrics) RequestOrResponseType(com.github.ambry.protocol.RequestOrResponseType) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) ReplicaMetadataRequest(com.github.ambry.protocol.ReplicaMetadataRequest) EnumSet(java.util.EnumSet) ReplicationConfig(com.github.ambry.config.ReplicationConfig) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) StatsManagerConfig(com.github.ambry.config.StatsManagerConfig) Set(java.util.Set) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ReplicaMetadataRequestInfo(com.github.ambry.protocol.ReplicaMetadataRequestInfo) AmbryRequests(com.github.ambry.protocol.AmbryRequests) StoreKey(com.github.ambry.store.StoreKey) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) MockFindTokenHelper(com.github.ambry.replication.MockFindTokenHelper) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) RunWith(org.junit.runner.RunWith) ArrayList(java.util.ArrayList) TestUtils(com.github.ambry.clustermap.TestUtils) SystemTime(com.github.ambry.utils.SystemTime) StoreException(com.github.ambry.store.StoreException) ReplicationControlAdminRequest(com.github.ambry.protocol.ReplicationControlAdminRequest) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) Before(org.junit.Before) ReplicaState(com.github.ambry.clustermap.ReplicaState) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) IOException(java.io.IOException) Test(org.junit.Test) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) Store(com.github.ambry.store.Store) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicationManager(com.github.ambry.replication.MockReplicationManager) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobStoreControlAdminRequest(com.github.ambry.protocol.BlobStoreControlAdminRequest) BlobStore(com.github.ambry.store.BlobStore) Assert(org.junit.Assert) Response(com.github.ambry.protocol.Response) ByteBufferDataInputStream(com.github.ambry.utils.ByteBufferDataInputStream) MessageInfoTest(com.github.ambry.store.MessageInfoTest) DataNodeId(com.github.ambry.clustermap.DataNodeId) ByteBuffer(java.nio.ByteBuffer) Unpooled(io.netty.buffer.Unpooled) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) GetResponse(com.github.ambry.protocol.GetResponse) ErrorMapping(com.github.ambry.commons.ErrorMapping) BlobStoreControlAction(com.github.ambry.protocol.BlobStoreControlAction) JSONObject(org.json.JSONObject) After(org.junit.After) SocketRequestResponseChannel(com.github.ambry.network.SocketRequestResponseChannel) NettyByteBufLeakHelper(com.github.ambry.utils.NettyByteBufLeakHelper) GetRequest(com.github.ambry.protocol.GetRequest) Parameterized(org.junit.runners.Parameterized) ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) ServerConfig(com.github.ambry.config.ServerConfig) Utils(com.github.ambry.utils.Utils) Collectors(java.util.stream.Collectors) List(java.util.List) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) TtlUpdateRequest(com.github.ambry.protocol.TtlUpdateRequest) MessageFormatFlags(com.github.ambry.messageformat.MessageFormatFlags) BlobType(com.github.ambry.messageformat.BlobType) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) CatchupStatusAdminRequest(com.github.ambry.protocol.CatchupStatusAdminRequest) ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) HashMap(java.util.HashMap) AdminResponse(com.github.ambry.protocol.AdminResponse) MessageWriteSet(com.github.ambry.store.MessageWriteSet) HashSet(java.util.HashSet) AdminRequest(com.github.ambry.protocol.AdminRequest) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) CommonTestUtils(com.github.ambry.commons.CommonTestUtils) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) Assume(org.junit.Assume) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) PutRequest(com.github.ambry.protocol.PutRequest) RequestControlAdminRequest(com.github.ambry.protocol.RequestControlAdminRequest) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MessageFormatInputStreamTest(com.github.ambry.messageformat.MessageFormatInputStreamTest) ReplicationManager(com.github.ambry.replication.ReplicationManager) DeleteRequest(com.github.ambry.protocol.DeleteRequest) ReplicaStatusDelegate(com.github.ambry.clustermap.ReplicaStatusDelegate) ReplicaType(com.github.ambry.clustermap.ReplicaType) MockWrite(com.github.ambry.store.MockWrite) ClusterMap(com.github.ambry.clustermap.ClusterMap) Mockito(org.mockito.Mockito) ReplicationException(com.github.ambry.replication.ReplicationException) MessageInfo(com.github.ambry.store.MessageInfo) ReplicaEventType(com.github.ambry.clustermap.ReplicaEventType) Send(com.github.ambry.network.Send) MessageFormatRecord(com.github.ambry.messageformat.MessageFormatRecord) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) InputStream(java.io.InputStream) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) Test(org.junit.Test) MessageInfoTest(com.github.ambry.store.MessageInfoTest) MessageFormatInputStreamTest(com.github.ambry.messageformat.MessageFormatInputStreamTest)

Aggregations

DataNodeId (com.github.ambry.clustermap.DataNodeId)92 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)45 HashMap (java.util.HashMap)29 PartitionId (com.github.ambry.clustermap.PartitionId)28 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)27 ReplicaId (com.github.ambry.clustermap.ReplicaId)25 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)23 VerifiableProperties (com.github.ambry.config.VerifiableProperties)23 MetricRegistry (com.codahale.metrics.MetricRegistry)22 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)22 List (java.util.List)22 Map (java.util.Map)22 Port (com.github.ambry.network.Port)21 ClusterMap (com.github.ambry.clustermap.ClusterMap)20 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)19 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)18 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)17 HashSet (java.util.HashSet)16 Properties (java.util.Properties)16