Search in sources :

Example 51 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class ReplicationTestHelper method addDeleteMessagesToReplicasOfPartition.

/**
 * For the given partitionId, constructs delete messages and adds them to the given lists.
 * @param partitionId the {@link PartitionId} to use for generating the {@link StoreKey} of the message.
 * @param id the {@link StoreKey} to create a delete message for.
 * @param hosts the list of {@link MockHost} all of which will be populated with the messages.
 * @throws MessageFormatException
 * @throws IOException
 */
public static void addDeleteMessagesToReplicasOfPartition(PartitionId partitionId, StoreKey id, List<MockHost> hosts) throws MessageFormatException, IOException {
    MessageInfo putMsg = getMessageInfo(id, hosts.get(0).infosByPartition.get(partitionId), false, false, false);
    short aid;
    short cid;
    short lifeVersion;
    if (putMsg == null) {
        // the StoreKey must be a BlobId in this case (to get the account and container id)
        aid = ((BlobId) id).getAccountId();
        cid = ((BlobId) id).getContainerId();
        lifeVersion = 0;
    } else {
        aid = putMsg.getAccountId();
        cid = putMsg.getContainerId();
        lifeVersion = putMsg.getLifeVersion();
    }
    ByteBuffer buffer = getDeleteMessage(id, aid, cid, CONSTANT_TIME_MS, lifeVersion);
    for (MockHost host : hosts) {
        // ok to send false for ttlUpdated
        host.addMessage(partitionId, new MessageInfo(id, buffer.remaining(), true, false, false, Utils.Infinite_Time, null, aid, cid, CONSTANT_TIME_MS, lifeVersion), buffer.duplicate());
    }
}
Also used : ByteBuffer(java.nio.ByteBuffer) MessageInfo(com.github.ambry.store.MessageInfo)

Example 52 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class ReplicationTestHelper method addTtlUpdateMessagesToReplicasOfPartition.

/**
 * For the given partitionId, constructs ttl update messages and adds them to the given lists.
 * @param partitionId the {@link PartitionId} to use for generating the {@link StoreKey} of the message.
 * @param id the {@link StoreKey} to create a ttl update message for.
 * @param hosts the list of {@link MockHost} all of which will be populated with the messages.
 * @param expirationTime
 * @throws MessageFormatException
 * @throws IOException
 */
public static void addTtlUpdateMessagesToReplicasOfPartition(PartitionId partitionId, StoreKey id, List<MockHost> hosts, long expirationTime) throws MessageFormatException, IOException {
    MessageInfo putMsg = getMessageInfo(id, hosts.get(0).infosByPartition.get(partitionId), false, false, false);
    short aid;
    short cid;
    short lifeVersion;
    if (putMsg == null) {
        // the StoreKey must be a BlobId in this case (to get the account and container id)
        aid = ((BlobId) id).getAccountId();
        cid = ((BlobId) id).getContainerId();
        lifeVersion = 0;
    } else {
        aid = putMsg.getAccountId();
        cid = putMsg.getContainerId();
        lifeVersion = putMsg.getLifeVersion();
    }
    ByteBuffer buffer = getTtlUpdateMessage(id, aid, cid, expirationTime, CONSTANT_TIME_MS);
    for (MockHost host : hosts) {
        host.addMessage(partitionId, new MessageInfo(id, buffer.remaining(), false, true, false, expirationTime, null, aid, cid, CONSTANT_TIME_MS, lifeVersion), buffer.duplicate());
    }
}
Also used : ByteBuffer(java.nio.ByteBuffer) MessageInfo(com.github.ambry.store.MessageInfo)

Example 53 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionTest.

static void endToEndReplicationWithMultiNodeMultiPartitionTest(int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, boolean testEncryption) throws Exception {
    // interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainTextPort
    MockClusterMap clusterMap = cluster.getClusterMap();
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    List<AmbryServer> serverList = cluster.getServers();
    byte[] usermetadata = new byte[100];
    byte[] data = new byte[100];
    byte[] encryptionKey = null;
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(100, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
    long expectedExpiryTimeMs = getExpiryTimeMs(properties);
    TestUtils.RANDOM.nextBytes(usermetadata);
    TestUtils.RANDOM.nextBytes(data);
    if (testEncryption) {
        encryptionKey = new byte[100];
        TestUtils.RANDOM.nextBytes(encryptionKey);
    }
    // connect to all the servers
    ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
    ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory2, clientSSLConfig2);
    ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory3, clientSSLConfig3);
    // put all the blobs to random servers
    channel1.connect();
    channel2.connect();
    channel3.connect();
    int noOfParallelThreads = 3;
    int totalBlobsToPut = 50;
    CountDownLatch latch = new CountDownLatch(noOfParallelThreads);
    List<DirectSender> runnables = new ArrayList<DirectSender>(noOfParallelThreads);
    ConnectedChannel channel = null;
    for (int i = 0; i < noOfParallelThreads; i++) {
        if (i % noOfParallelThreads == 0) {
            channel = channel1;
        } else if (i % noOfParallelThreads == 1) {
            channel = channel2;
        } else if (i % noOfParallelThreads == 2) {
            channel = channel3;
        }
        DirectSender runnable = new DirectSender(cluster, channel, totalBlobsToPut, data, usermetadata, properties, encryptionKey, latch);
        runnables.add(runnable);
        Thread threadToRun = new Thread(runnable);
        threadToRun.start();
    }
    assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
    // wait till replication can complete
    List<BlobId> blobIds = new ArrayList<BlobId>();
    for (int i = 0; i < runnables.size(); i++) {
        blobIds.addAll(runnables.get(i).getBlobIds());
    }
    for (BlobId blobId : blobIds) {
        notificationSystem.awaitBlobCreations(blobId.getID());
    }
    // Now that the blob is created and replicated, test the cases where a put request arrives for the same blob id
    // later than replication.
    testLatePutRequest(blobIds.get(0), properties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.No_Error);
    // Test the case where a put arrives with the same id as one in the server, but the blob is not identical.
    BlobProperties differentProperties = new BlobProperties(properties.getBlobSize(), properties.getServiceId(), accountId, containerId, testEncryption, cluster.time.milliseconds());
    testLatePutRequest(blobIds.get(0), differentProperties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
    byte[] differentUserMetadata = Arrays.copyOf(usermetadata, usermetadata.length);
    differentUserMetadata[0] = (byte) ~differentUserMetadata[0];
    testLatePutRequest(blobIds.get(0), properties, differentUserMetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
    byte[] differentData = Arrays.copyOf(data, data.length);
    differentData[0] = (byte) ~differentData[0];
    testLatePutRequest(blobIds.get(0), properties, usermetadata, differentData, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
    // verify blob properties, metadata and blob across all nodes
    for (int i = 0; i < 3; i++) {
        channel = null;
        if (i == 0) {
            channel = channel1;
        } else if (i == 1) {
            channel = channel2;
        } else if (i == 2) {
            channel = channel3;
        }
        ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
        for (int j = 0; j < blobIds.size(); j++) {
            ArrayList<BlobId> ids = new ArrayList<BlobId>();
            ids.add(blobIds.get(j));
            partitionRequestInfoList.clear();
            PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
            partitionRequestInfoList.add(partitionRequestInfo);
            GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
            DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
            GetResponse resp = GetResponse.readFrom(stream, clusterMap);
            try {
                BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
                assertEquals(100, propertyOutput.getBlobSize());
                assertEquals("serviceid1", propertyOutput.getServiceId());
                assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
                assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
                assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(propertyOutput));
                assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
                releaseNettyBufUnderneathStream(stream);
            } catch (MessageFormatException e) {
                fail();
            }
            // get user metadata
            getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
            stream = channel.sendAndReceive(getRequest).getInputStream();
            resp = GetResponse.readFrom(stream, clusterMap);
            try {
                ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
                assertArrayEquals(usermetadata, userMetadataOutput.array());
                if (testEncryption) {
                    assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
                } else {
                    assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                }
                assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
            } catch (MessageFormatException e) {
                e.printStackTrace();
                fail();
            }
            releaseNettyBufUnderneathStream(stream);
            // get blob
            getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
            stream = channel.sendAndReceive(getRequest).getInputStream();
            resp = GetResponse.readFrom(stream, clusterMap);
            try {
                BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
                byte[] blobout = getBlobDataAndRelease(blobData);
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
                } else {
                    assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                }
                assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
            } catch (MessageFormatException e) {
                e.printStackTrace();
                fail();
            }
            releaseNettyBufUnderneathStream(stream);
            // get blob all
            getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
            stream = channel.sendAndReceive(getRequest).getInputStream();
            resp = GetResponse.readFrom(stream, clusterMap);
            try {
                BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
                assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(blobAll.getBlobInfo().getBlobProperties()));
                assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
                byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
                } else {
                    assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
                }
                releaseNettyBufUnderneathStream(stream);
            } catch (MessageFormatException e) {
                e.printStackTrace();
                fail();
            }
        }
    }
    // ttl update all blobs and wait for replication
    Map<ConnectedChannel, List<BlobId>> channelToBlobIds = new HashMap<>();
    for (int i = 0; i < blobIds.size(); i++) {
        final BlobId blobId = blobIds.get(i);
        if (i % 3 == 0) {
            channelToBlobIds.computeIfAbsent(channel1, updateChannel -> new ArrayList<>()).add(blobId);
        } else if (i % 3 == 1) {
            channelToBlobIds.computeIfAbsent(channel2, updateChannel -> new ArrayList<>()).add(blobId);
        } else {
            channelToBlobIds.computeIfAbsent(channel3, updateChannel -> new ArrayList<>()).add(blobId);
        }
    }
    channelToBlobIds.entrySet().stream().map(entry -> CompletableFuture.supplyAsync(() -> {
        try {
            for (BlobId blobId : entry.getValue()) {
                updateBlobTtl(entry.getKey(), blobId, cluster.time.milliseconds());
            }
            return null;
        } catch (Throwable e) {
            throw new RuntimeException("Exception updating ttl for: " + entry, e);
        }
    })).forEach(CompletableFuture::join);
    // check that the TTL update has propagated
    blobIds.forEach(blobId -> notificationSystem.awaitBlobUpdates(blobId.getID(), UpdateType.TTL_UPDATE));
    // check all servers
    for (ConnectedChannel channelToUse : new ConnectedChannel[] { channel1, channel2, channel3 }) {
        for (BlobId blobId : blobIds) {
            checkTtlUpdateStatus(channelToUse, clusterMap, blobIdFactory, blobId, data, true, Utils.Infinite_Time);
        }
    }
    // delete random blobs, wait for replication and ensure it is deleted in all nodes
    Set<BlobId> blobsDeleted = new HashSet<BlobId>();
    Set<BlobId> blobsChecked = new HashSet<BlobId>();
    for (int i = 0; i < blobIds.size(); i++) {
        int j = new Random().nextInt(3);
        if (j == 0) {
            j = new Random().nextInt(3);
            if (j == 0) {
                channel = channel1;
            } else if (j == 1) {
                channel = channel2;
            } else if (j == 2) {
                channel = channel3;
            }
            DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIds.get(i), System.currentTimeMillis());
            DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
            DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
            releaseNettyBufUnderneathStream(deleteResponseStream);
            assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
            blobsDeleted.add(blobIds.get(i));
        }
    }
    Iterator<BlobId> iterator = blobsDeleted.iterator();
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    while (iterator.hasNext()) {
        BlobId deletedId = iterator.next();
        notificationSystem.awaitBlobDeletions(deletedId.getID());
        for (int j = 0; j < 3; j++) {
            if (j == 0) {
                channel = channel1;
            } else if (j == 1) {
                channel = channel2;
            } else if (j == 2) {
                channel = channel3;
            }
            ArrayList<BlobId> ids = new ArrayList<BlobId>();
            ids.add(deletedId);
            partitionRequestInfoList.clear();
            PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(deletedId.getPartition(), ids);
            partitionRequestInfoList.add(partitionRequestInfo);
            GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
            DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
            GetResponse resp = GetResponse.readFrom(stream, clusterMap);
            assertEquals(ServerErrorCode.Blob_Deleted, resp.getPartitionResponseInfoList().get(0).getErrorCode());
            releaseNettyBufUnderneathStream(stream);
        }
    }
    // take a server down, clean up a mount path, start and ensure replication fixes it
    serverList.get(0).shutdown();
    serverList.get(0).awaitShutdown();
    MockDataNodeId dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
    System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(0));
    for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
        if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
            System.out.println("Cleaning partition " + replicaId.getPartitionId());
        }
    }
    deleteFolderContent(new File(dataNode.getMountPaths().get(0)), false);
    for (int i = 0; i < blobIds.size(); i++) {
        for (ReplicaId replicaId : blobIds.get(i).getPartition().getReplicaIds()) {
            if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
                if (blobsDeleted.contains(blobIds.get(i))) {
                    notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
                } else {
                    notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
                    notificationSystem.decrementUpdatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort(), UpdateType.TTL_UPDATE);
                }
            }
        }
    }
    cluster.reinitServer(0);
    channel1.disconnect();
    channel1.connect();
    for (int j = 0; j < blobIds.size(); j++) {
        if (blobsDeleted.contains(blobIds.get(j))) {
            notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
        } else {
            notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
            notificationSystem.awaitBlobUpdates(blobIds.get(j).getID(), UpdateType.TTL_UPDATE);
        }
        ArrayList<BlobId> ids = new ArrayList<BlobId>();
        ids.add(blobIds.get(j));
        partitionRequestInfoList.clear();
        PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
        GetResponse resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsDeleted.contains(blobIds.get(j)));
        } else {
            try {
                BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
                assertEquals(100, propertyOutput.getBlobSize());
                assertEquals("serviceid1", propertyOutput.getServiceId());
                assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
                assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
                assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // get user metadata
        getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest).getInputStream();
        resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsDeleted.contains(blobIds.get(j)));
        } else {
            try {
                ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
                assertArrayEquals(usermetadata, userMetadataOutput.array());
                if (testEncryption) {
                    assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
                } else {
                    assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                }
                assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob
        getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest).getInputStream();
        resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsDeleted.contains(blobIds.get(j)));
        } else {
            try {
                BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
                byte[] blobout = getBlobDataAndRelease(blobData);
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
                } else {
                    assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                }
                assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob all
        getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest).getInputStream();
        resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsDeleted.contains(blobIds.get(j)));
            blobsDeleted.remove(blobIds.get(j));
            blobsChecked.add(blobIds.get(j));
        } else {
            try {
                BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
                byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
                } else {
                    assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
                }
                assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
    }
    assertEquals(0, blobsDeleted.size());
    // take a server down, clean all contents, start and ensure replication fixes it
    serverList.get(0).shutdown();
    serverList.get(0).awaitShutdown();
    dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
    for (int i = 0; i < dataNode.getMountPaths().size(); i++) {
        System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(i));
        for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
            if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(i)) == 0) {
                System.out.println("Cleaning partition " + replicaId.getPartitionId());
            }
        }
        deleteFolderContent(new File(dataNode.getMountPaths().get(i)), false);
    }
    for (int i = 0; i < blobIds.size(); i++) {
        if (blobsChecked.contains(blobIds.get(i))) {
            notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
        } else {
            notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
        }
    }
    cluster.reinitServer(0);
    channel1.disconnect();
    channel1.connect();
    for (int j = 0; j < blobIds.size(); j++) {
        if (blobsChecked.contains(blobIds.get(j))) {
            notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
        } else {
            notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
        }
        ArrayList<BlobId> ids = new ArrayList<BlobId>();
        ids.add(blobIds.get(j));
        partitionRequestInfoList.clear();
        PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
        partitionRequestInfoList.add(partitionRequestInfo);
        GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
        DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
        GetResponse resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsChecked.contains(blobIds.get(j)));
        } else {
            try {
                BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
                assertEquals(100, propertyOutput.getBlobSize());
                assertEquals("serviceid1", propertyOutput.getServiceId());
                assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
                assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // get user metadata
        getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest).getInputStream();
        resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsChecked.contains(blobIds.get(j)));
        } else {
            try {
                ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
                assertArrayEquals(usermetadata, userMetadataOutput.array());
                if (testEncryption) {
                    assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
                } else {
                    assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                }
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob
        getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest).getInputStream();
        resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsChecked.contains(blobIds.get(j)));
        } else {
            try {
                BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
                byte[] blobout = getBlobDataAndRelease(blobData);
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
                } else {
                    assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
                }
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // get blob all
        getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        stream = channel1.sendAndReceive(getRequest).getInputStream();
        resp = GetResponse.readFrom(stream, clusterMap);
        if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
            assertTrue(blobsChecked.contains(blobIds.get(j)));
            blobsChecked.remove(blobIds.get(j));
            blobsDeleted.add(blobIds.get(j));
        } else {
            try {
                BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
                byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
                } else {
                    assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
                }
            } catch (MessageFormatException e) {
                fail();
            }
        }
        releaseNettyBufUnderneathStream(stream);
    }
    assertEquals(0, blobsChecked.size());
    short expectedLifeVersion = 1;
    for (int i = 0; i < 2; i++) {
        expectedLifeVersion += i;
        // First undelete all deleted blobs
        for (BlobId deletedId : blobsDeleted) {
            UndeleteRequest undeleteRequest = new UndeleteRequest(2, "reptest", deletedId, System.currentTimeMillis());
            DataInputStream undeleteResponseStream = channel3.sendAndReceive(undeleteRequest).getInputStream();
            UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(undeleteResponseStream);
            releaseNettyBufUnderneathStream(undeleteResponseStream);
            assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
            assertEquals(expectedLifeVersion, undeleteResponse.getLifeVersion());
        }
        Thread.sleep(5000);
        // Then use get request to get all the data back and make sure the lifeVersion is correct
        for (BlobId id : blobsDeleted) {
            // We don't need to wait for blob undeletes, since one of the hosts has Put Record deleted
            // from disk, so undelete this blob would end up replicating Put Record instead of undelete.
            // notificationSystem.awaitBlobUndeletes(id.toString());
            ArrayList<BlobId> ids = new ArrayList<>();
            ids.add(id);
            partitionRequestInfoList.clear();
            PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
            partitionRequestInfoList.add(partitionRequestInfo);
            // get blob all
            GetRequest getRequest = new GetRequest(1, "clientid20", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
            DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
            GetResponse resp = GetResponse.readFrom(stream, clusterMap);
            assertEquals(ServerErrorCode.No_Error, resp.getError());
            assertEquals(1, resp.getPartitionResponseInfoList().size());
            assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
            assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
            MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
            assertEquals(expectedLifeVersion, info.getLifeVersion());
            assertFalse(info.isDeleted());
            try {
                BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
                byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
                } else {
                    assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
                }
                assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
                releaseNettyBufUnderneathStream(stream);
            } catch (MessageFormatException e) {
                fail();
            }
        }
        for (BlobId id : blobsDeleted) {
            DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", id, System.currentTimeMillis());
            DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
            DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
            releaseNettyBufUnderneathStream(deleteResponseStream);
            assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        }
        Thread.sleep(1000);
        for (BlobId id : blobsDeleted) {
            ArrayList<BlobId> ids = new ArrayList<>();
            ids.add(id);
            partitionRequestInfoList.clear();
            PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
            partitionRequestInfoList.add(partitionRequestInfo);
            // get blob all
            GetRequest getRequest = new GetRequest(1, "clientid200", MessageFormatFlags.All, partitionRequestInfoList, GetOption.Include_All);
            DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
            GetResponse resp = GetResponse.readFrom(stream, clusterMap);
            assertEquals(ServerErrorCode.No_Error, resp.getError());
            assertEquals(1, resp.getPartitionResponseInfoList().size());
            assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
            assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
            MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
            assertEquals(expectedLifeVersion, info.getLifeVersion());
            assertTrue(info.isDeleted());
            try {
                BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
                byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
                assertArrayEquals(data, blobout);
                if (testEncryption) {
                    assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
                    assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
                } else {
                    assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
                }
                assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
                releaseNettyBufUnderneathStream(stream);
            } catch (MessageFormatException e) {
                fail();
            }
        }
    }
    channel1.disconnect();
    channel2.disconnect();
    channel3.disconnect();
}
Also used : AdminRequestOrResponseType(com.github.ambry.protocol.AdminRequestOrResponseType) GetOption(com.github.ambry.protocol.GetOption) Arrays(java.util.Arrays) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobAll(com.github.ambry.messageformat.BlobAll) SubRecord(com.github.ambry.messageformat.SubRecord) Future(java.util.concurrent.Future) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) TtlUpdateResponse(com.github.ambry.protocol.TtlUpdateResponse) RetainingAsyncWritableChannel(com.github.ambry.commons.RetainingAsyncWritableChannel) RouterErrorCode(com.github.ambry.router.RouterErrorCode) ReadableStreamChannel(com.github.ambry.router.ReadableStreamChannel) SSLFactory(com.github.ambry.commons.SSLFactory) BlobData(com.github.ambry.messageformat.BlobData) Set(java.util.Set) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) SSLSocketFactory(javax.net.ssl.SSLSocketFactory) CountDownLatch(java.util.concurrent.CountDownLatch) MockDiskId(com.github.ambry.clustermap.MockDiskId) UpdateType(com.github.ambry.notification.UpdateType) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) ArrayList(java.util.ArrayList) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) ReplicationControlAdminRequest(com.github.ambry.protocol.ReplicationControlAdminRequest) BlockingChannel(com.github.ambry.network.BlockingChannel) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) Offset(com.github.ambry.store.Offset) VerifiableProperties(com.github.ambry.config.VerifiableProperties) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) PutResponse(com.github.ambry.protocol.PutResponse) RouterException(com.github.ambry.router.RouterException) LatchBasedInMemoryCloudDestinationFactory(com.github.ambry.cloud.LatchBasedInMemoryCloudDestinationFactory) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) GetBlobResult(com.github.ambry.router.GetBlobResult) ReplicaId(com.github.ambry.clustermap.ReplicaId) PutBlobOptionsBuilder(com.github.ambry.router.PutBlobOptionsBuilder) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobStoreControlAdminRequest(com.github.ambry.protocol.BlobStoreControlAdminRequest) StoreFindToken(com.github.ambry.store.StoreFindToken) Assert(org.junit.Assert) GetBlobOptionsBuilder(com.github.ambry.router.GetBlobOptionsBuilder) Http2BlockingChannel(com.github.ambry.network.http2.Http2BlockingChannel) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) TimeoutException(java.util.concurrent.TimeoutException) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) Unpooled(io.netty.buffer.Unpooled) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) GetResponse(com.github.ambry.protocol.GetResponse) PortType(com.github.ambry.network.PortType) BlobStoreControlAction(com.github.ambry.protocol.BlobStoreControlAction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) SSLBlockingChannel(com.github.ambry.network.SSLBlockingChannel) GetRequest(com.github.ambry.protocol.GetRequest) VcrServer(com.github.ambry.cloud.VcrServer) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Utils(com.github.ambry.utils.Utils) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) ConnectedChannel(com.github.ambry.network.ConnectedChannel) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) Objects(java.util.Objects) RouterConfig(com.github.ambry.config.RouterConfig) List(java.util.List) TtlUpdateRequest(com.github.ambry.protocol.TtlUpdateRequest) MessageFormatFlags(com.github.ambry.messageformat.MessageFormatFlags) QuotaTestUtils(com.github.ambry.quota.QuotaTestUtils) Callback(com.github.ambry.commons.Callback) BlobType(com.github.ambry.messageformat.BlobType) InMemAccountService(com.github.ambry.account.InMemAccountService) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) DataInputStream(java.io.DataInputStream) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) CloudDestinationFactory(com.github.ambry.cloud.CloudDestinationFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AccountService(com.github.ambry.account.AccountService) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) HardwareState(com.github.ambry.clustermap.HardwareState) AdminResponse(com.github.ambry.protocol.AdminResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) CrcInputStream(com.github.ambry.utils.CrcInputStream) HashSet(java.util.HashSet) AdminRequest(com.github.ambry.protocol.AdminRequest) SSLConfig(com.github.ambry.config.SSLConfig) ByteBuf(io.netty.buffer.ByteBuf) LatchBasedInMemoryCloudDestination(com.github.ambry.cloud.LatchBasedInMemoryCloudDestination) DeleteResponse(com.github.ambry.protocol.DeleteResponse) CommonTestUtils(com.github.ambry.commons.CommonTestUtils) HelixControllerManager(com.github.ambry.utils.HelixControllerManager) PutRequest(com.github.ambry.protocol.PutRequest) Router(com.github.ambry.router.Router) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) VcrTestUtil(com.github.ambry.cloud.VcrTestUtil) DeleteRequest(com.github.ambry.protocol.DeleteRequest) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) ReplicaType(com.github.ambry.clustermap.ReplicaType) ConnectionPool(com.github.ambry.network.ConnectionPool) QuotaChargeCallback(com.github.ambry.quota.QuotaChargeCallback) ClusterMap(com.github.ambry.clustermap.ClusterMap) FileInputStream(java.io.FileInputStream) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) TimeUnit(java.util.concurrent.TimeUnit) MessageInfo(com.github.ambry.store.MessageInfo) MessageFormatRecord(com.github.ambry.messageformat.MessageFormatRecord) Port(com.github.ambry.network.Port) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) InputStream(java.io.InputStream) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) CompletableFuture(java.util.concurrent.CompletableFuture) BlobAll(com.github.ambry.messageformat.BlobAll) Random(java.util.Random) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) ConnectedChannel(com.github.ambry.network.ConnectedChannel) CountDownLatch(java.util.concurrent.CountDownLatch) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MessageInfo(com.github.ambry.store.MessageInfo) DeleteResponse(com.github.ambry.protocol.DeleteResponse) BlobProperties(com.github.ambry.messageformat.BlobProperties) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest) File(java.io.File) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 54 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class AmbryServer method startup.

public void startup() throws InstantiationException {
    try {
        logger.info("starting");
        clusterParticipants = clusterAgentsFactory.getClusterParticipants();
        logger.info("Setting up JMX.");
        long startTime = SystemTime.getInstance().milliseconds();
        reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
        reporter.start();
        logger.info("creating configs");
        NetworkConfig networkConfig = new NetworkConfig(properties);
        StoreConfig storeConfig = new StoreConfig(properties);
        DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
        ServerConfig serverConfig = new ServerConfig(properties);
        ReplicationConfig replicationConfig = new ReplicationConfig(properties);
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
        SSLConfig sslConfig = new SSLConfig(properties);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
        StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
        CloudConfig cloudConfig = new CloudConfig(properties);
        // verify the configs
        properties.verify();
        scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
        // mismatch in sealed/stopped replica lists that maintained by each participant.
        if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
            consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
            logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
            consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
            consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
        }
        logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
        DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
        if (nodeId == null) {
            throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
        }
        AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
        AccountService accountService = accountServiceFactory.getAccountService();
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        // In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
        // and some components require sole participant, the first one in the list will be primary participant.
        storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
        storageManager.start();
        SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
        if (clusterMapConfig.clusterMapEnableHttp2Replication) {
            connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
        } else {
            connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
        }
        connectionPool.start();
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
        Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
        replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
        replicationManager.start();
        if (replicationConfig.replicationEnabledWithVcrCluster) {
            logger.info("Creating Helix cluster spectator for cloud to store replication.");
            vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
            cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
            cloudToStoreReplicationManager.start();
        }
        logger.info("Creating StatsManager to publish stats");
        accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
        statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
        if (serverConfig.serverStatsPublishLocalEnabled) {
            statsManager.start();
        }
        ArrayList<Port> ports = new ArrayList<Port>();
        ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
        if (nodeId.hasSSLPort()) {
            ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
        }
        networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
        FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
        requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
        requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
        networkServer.start();
        // Start netty http2 server
        if (nodeId.hasHttp2Port()) {
            NettyConfig nettyConfig = new NettyConfig(properties);
            NettyMetrics nettyMetrics = new NettyMetrics(registry);
            Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
            Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
            logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
            NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
            AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
            requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
            NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
            nettyHttp2Server = nioServerFactory.getNioServer();
            nettyHttp2Server.start();
        }
        // Other code
        List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
        Set<String> validStatsTypes = new HashSet<>();
        for (StatsReportType type : StatsReportType.values()) {
            validStatsTypes.add(type.toString());
        }
        if (serverConfig.serverStatsPublishReportEnabled) {
            serverConfig.serverStatsReportsToPublish.forEach(e -> {
                if (validStatsTypes.contains(e)) {
                    ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
                }
            });
        }
        if (vcrClusterSpectator != null) {
            vcrClusterSpectator.spectate();
        }
        Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
        for (ClusterParticipant clusterParticipant : clusterParticipants) {
            clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
        }
        if (nettyInternalMetrics != null) {
            nettyInternalMetrics.start();
            logger.info("NettyInternalMetric starts");
        }
        logger.info("started");
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        metrics.serverStartTimeInMs.update(processingTime);
        logger.info("Server startup time in Ms {}", processingTime);
    } catch (Exception e) {
        logger.error("Error during startup", e);
        throw new InstantiationException("failure during startup " + e);
    }
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) SSLFactory(com.github.ambry.commons.SSLFactory) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Port(com.github.ambry.network.Port) StorageManager(com.github.ambry.store.StorageManager) ReplicationSkipPredicate(com.github.ambry.replication.ReplicationSkipPredicate) ArrayList(java.util.ArrayList) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) NettyConfig(com.github.ambry.config.NettyConfig) ServerConfig(com.github.ambry.config.ServerConfig) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) AccountStatsMySqlStoreFactory(com.github.ambry.accountstats.AccountStatsMySqlStoreFactory) HashSet(java.util.HashSet) CloudToStoreReplicationManager(com.github.ambry.replication.CloudToStoreReplicationManager) ReplicationManager(com.github.ambry.replication.ReplicationManager) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Http2BlockingChannelPool(com.github.ambry.network.http2.Http2BlockingChannelPool) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AccountServiceCallback(com.github.ambry.account.AccountServiceCallback) Http2ServerMetrics(com.github.ambry.network.http2.Http2ServerMetrics) BlobStoreRecovery(com.github.ambry.messageformat.BlobStoreRecovery) RequestHandlerPool(com.github.ambry.protocol.RequestHandlerPool) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) AccountService(com.github.ambry.account.AccountService) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) CloudConfig(com.github.ambry.config.CloudConfig) StorageServerNettyFactory(com.github.ambry.rest.StorageServerNettyFactory) AccountStatsMySqlStore(com.github.ambry.accountstats.AccountStatsMySqlStore) SSLConfig(com.github.ambry.config.SSLConfig) StatsManagerConfig(com.github.ambry.config.StatsManagerConfig) SocketServer(com.github.ambry.network.SocketServer) NettyMetrics(com.github.ambry.rest.NettyMetrics) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) NettyServerRequestResponseChannel(com.github.ambry.network.NettyServerRequestResponseChannel) NetworkConfig(com.github.ambry.config.NetworkConfig) BlobStoreHardDelete(com.github.ambry.messageformat.BlobStoreHardDelete) IOException(java.io.IOException) MessageInfo(com.github.ambry.store.MessageInfo) NioServerFactory(com.github.ambry.rest.NioServerFactory) CloudToStoreReplicationManager(com.github.ambry.replication.CloudToStoreReplicationManager) ClusterParticipant(com.github.ambry.clustermap.ClusterParticipant) AccountServiceFactory(com.github.ambry.account.AccountServiceFactory)

Example 55 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class VcrRecoveryTest method testGetOnRecoveryNode.

/**
 * Do a get on recovery node to test that all the blobids that were uploaded to vcr node have been recovered on recovery node.
 * @param blobIdToSizeMap {@link Map} of blobid to size uploaded to vcr node.
 * @throws IOException on {@link IOException}
 */
private void testGetOnRecoveryNode(Map<BlobId, Integer> blobIdToSizeMap) throws IOException {
    ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(recoveryNodePort, "localhost", null, null);
    channel.connect();
    AtomicInteger correlationIdGenerator = new AtomicInteger(0);
    List<PartitionRequestInfo> partitionRequestInfoList = Collections.singletonList(new PartitionRequestInfo(partitionId, blobIds));
    GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), GetRequest.Replication_Client_Id_Prefix + recoveryNode.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, new ReplicationConfig(new VerifiableProperties(recoveryProperties)).replicationIncludeAll ? GetOption.Include_All : GetOption.None);
    channel.send(getRequest);
    GetResponse getResponse = GetResponse.readFrom(channel.receive().getInputStream(), recoveryCluster.getClusterMap());
    for (PartitionResponseInfo partitionResponseInfo : getResponse.getPartitionResponseInfoList()) {
        assertEquals("Error in getting the recovered blobs", ServerErrorCode.No_Error, partitionResponseInfo.getErrorCode());
        // old value is 272. Adding 8 Bytes due to the two fields added 4 + 4 Blob Property BYTE.
        for (MessageInfo messageInfo : partitionResponseInfo.getMessageInfoList()) {
            assertEquals(blobIdToSizeMap.get(messageInfo.getStoreKey()) + 280, messageInfo.getSize());
        }
    }
}
Also used : ReplicationConfig(com.github.ambry.config.ReplicationConfig) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) VerifiableProperties(com.github.ambry.config.VerifiableProperties) GetRequest(com.github.ambry.protocol.GetRequest) ConnectedChannel(com.github.ambry.network.ConnectedChannel) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) MessageInfo(com.github.ambry.store.MessageInfo)

Aggregations

MessageInfo (com.github.ambry.store.MessageInfo)109 ArrayList (java.util.ArrayList)49 StoreKey (com.github.ambry.store.StoreKey)42 ByteBuffer (java.nio.ByteBuffer)38 BlobId (com.github.ambry.commons.BlobId)36 StoreException (com.github.ambry.store.StoreException)30 DataInputStream (java.io.DataInputStream)23 Test (org.junit.Test)22 HashMap (java.util.HashMap)21 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)19 PartitionId (com.github.ambry.clustermap.PartitionId)19 IOException (java.io.IOException)19 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)18 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)18 InputStream (java.io.InputStream)17 List (java.util.List)16 ClusterMap (com.github.ambry.clustermap.ClusterMap)15 Map (java.util.Map)15 MockMessageWriteSet (com.github.ambry.store.MockMessageWriteSet)13 HashSet (java.util.HashSet)13