Search in sources :

Example 41 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class BlobIdTest method generateAndAssertBadBlobId.

/**
 * Generates bad blobId strings, and deserializes from the string.
 * @param version The version of BlobId.
 * @throws Exception Any unexpected exception.
 */
private void generateAndAssertBadBlobId(Short version) throws Exception {
    List<String> invalidBlobIdLikeList = new ArrayList<>();
    PartitionId badPartitionId = new MockPartitionId(200000, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.EMPTY_LIST, 0);
    String goodUUID = UUID.randomUUID().toString();
    // Partition ID not in cluster map
    invalidBlobIdLikeList.add(buildBadBlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, badPartitionId, goodUUID.length(), goodUUID, ""));
    // UUID length too long
    invalidBlobIdLikeList.add(buildBadBlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, goodUUID.length() + 1, goodUUID, ""));
    // UUID length too short
    invalidBlobIdLikeList.add(buildBadBlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, goodUUID.length() - 1, goodUUID, ""));
    // UUID length is negative. Only matters for blob IDs with the older UUID serialization format
    if (version < BLOB_ID_V6) {
        invalidBlobIdLikeList.add(buildBadBlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, -1, goodUUID, ""));
    }
    // Extra characters after UUID
    invalidBlobIdLikeList.add(buildBadBlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, goodUUID.length(), goodUUID, "EXTRA"));
    // Invalid version number
    invalidBlobIdLikeList.add(buildBadBlobId((short) (-1), referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, goodUUID.length(), goodUUID, ""));
    // Empty blobId
    invalidBlobIdLikeList.add("");
    // short Blob ID
    invalidBlobIdLikeList.add("AA");
    for (String blobIdLike : invalidBlobIdLikeList) {
        try {
            new BlobId(blobIdLike, referenceClusterMap);
            fail("Expected blobId creation to fail with blobId string " + blobIdLike);
        } catch (Exception e) {
        // expected
        }
    }
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ArrayList(java.util.ArrayList) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) IOException(java.io.IOException)

Example 42 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AmbryIdConverterFactoryTest method ambryIdConverterNamedBlobTest.

@Test
public void ambryIdConverterNamedBlobTest() throws Exception {
    Properties properties = new Properties();
    VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
    IdSigningService idSigningService = mock(IdSigningService.class);
    NamedBlobDb namedBlobDb = mock(NamedBlobDb.class);
    AmbryIdConverterFactory ambryIdConverterFactory = new AmbryIdConverterFactory(verifiableProperties, new MetricRegistry(), idSigningService, namedBlobDb);
    IdConverter idConverter = ambryIdConverterFactory.getIdConverter();
    assertNotNull("No IdConverter returned", idConverter);
    PartitionId partitionId = new MockPartitionId(partition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    List<String> idList = new ArrayList<>();
    idList.add(blobId.getID());
    for (String id : idList) {
        reset(idSigningService);
        reset(namedBlobDb);
        when(namedBlobDb.put(any())).thenReturn(CompletableFuture.completedFuture(new PutResult(new NamedBlobRecord("", "", "", id, Utils.Infinite_Time))));
        testConversionForNamedBlob(idConverter, RestMethod.PUT, null, id, id);
        verify(idSigningService, never()).getSignedId(any(), any());
        verify(namedBlobDb).put(any());
    }
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) NamedBlobDb(com.github.ambry.named.NamedBlobDb) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) PutResult(com.github.ambry.named.PutResult) NamedBlobRecord(com.github.ambry.named.NamedBlobRecord) Test(org.junit.Test)

Example 43 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class RequestResponseTest method doReplicaMetadataRequestTest.

private void doReplicaMetadataRequestTest(short responseVersionToUse, short requestVersionToUse, short messageInfoToUse, ReplicaType replicaType) throws IOException {
    MessageInfoAndMetadataListSerde.AUTO_VERSION = messageInfoToUse;
    MockClusterMap clusterMap = new MockClusterMap();
    List<ReplicaMetadataRequestInfo> replicaMetadataRequestInfoList = new ArrayList<ReplicaMetadataRequestInfo>();
    ReplicaMetadataRequestInfo replicaMetadataRequestInfo = new ReplicaMetadataRequestInfo(new MockPartitionId(), new MockFindToken(0, 1000), "localhost", "path", replicaType, requestVersionToUse);
    replicaMetadataRequestInfoList.add(replicaMetadataRequestInfo);
    ReplicaMetadataRequest request = new ReplicaMetadataRequest(1, "id", replicaMetadataRequestInfoList, 1000, requestVersionToUse);
    DataInputStream requestStream = serAndPrepForRead(request, -1, true);
    ReplicaMetadataRequest replicaMetadataRequestFromBytes = ReplicaMetadataRequest.readFrom(requestStream, new MockClusterMap(), new MockFindTokenHelper());
    Assert.assertEquals(replicaMetadataRequestFromBytes.getMaxTotalSizeOfEntriesInBytes(), 1000);
    Assert.assertEquals(replicaMetadataRequestFromBytes.getReplicaMetadataRequestInfoList().size(), 1);
    request.release();
    try {
        new ReplicaMetadataRequest(1, "id", null, 12, requestVersionToUse);
        Assert.fail("Serializing should have failed");
    } catch (IllegalArgumentException e) {
    // expected. Nothing to do
    }
    try {
        new ReplicaMetadataRequestInfo(new MockPartitionId(), null, "localhost", "path", replicaType, requestVersionToUse);
        Assert.fail("Construction should have failed");
    } catch (IllegalArgumentException e) {
    // expected. Nothing to do
    }
    long operationTimeMs = SystemTime.getInstance().milliseconds() + TestUtils.RANDOM.nextInt();
    int numResponseInfos = 5;
    int numMessagesInEachResponseInfo = 200;
    List<ReplicaMetadataResponseInfo> replicaMetadataResponseInfoList = new ArrayList<>();
    for (int j = 0; j < numResponseInfos; j++) {
        List<MessageInfo> messageInfoList = new ArrayList<MessageInfo>();
        int totalSizeOfAllMessages = 0;
        for (int i = 0; i < numMessagesInEachResponseInfo; i++) {
            int msgSize = TestUtils.RANDOM.nextInt(1000) + 1;
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            BlobId id = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), false, BlobId.BlobDataType.DATACHUNK);
            MessageInfo messageInfo = new MessageInfo(id, msgSize, false, false, true, Utils.Infinite_Time, null, accountId, containerId, operationTimeMs, (short) 1);
            messageInfoList.add(messageInfo);
            totalSizeOfAllMessages += msgSize;
        }
        ReplicaMetadataResponseInfo responseInfo = new ReplicaMetadataResponseInfo(clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), replicaType, new MockFindToken(0, 1000), messageInfoList, 1000, responseVersionToUse);
        Assert.assertEquals("Total size of messages not as expected", totalSizeOfAllMessages, responseInfo.getTotalSizeOfAllMessages());
        replicaMetadataResponseInfoList.add(responseInfo);
    }
    ReplicaMetadataResponse response = new ReplicaMetadataResponse(1234, "clientId", ServerErrorCode.No_Error, replicaMetadataResponseInfoList, responseVersionToUse);
    requestStream = serAndPrepForRead(response, -1, false);
    ReplicaMetadataResponse deserializedReplicaMetadataResponse = ReplicaMetadataResponse.readFrom(requestStream, new MockFindTokenHelper(), clusterMap);
    Assert.assertEquals(deserializedReplicaMetadataResponse.getCorrelationId(), 1234);
    Assert.assertEquals(deserializedReplicaMetadataResponse.getError(), ServerErrorCode.No_Error);
    Assert.assertEquals("ReplicaMetadataResponse list size mismatch ", numResponseInfos, deserializedReplicaMetadataResponse.getReplicaMetadataResponseInfoList().size());
    for (int j = 0; j < replicaMetadataResponseInfoList.size(); j++) {
        ReplicaMetadataResponseInfo originalMetadataResponse = replicaMetadataResponseInfoList.get(j);
        ReplicaMetadataResponseInfo replicaMetadataResponseInfo = deserializedReplicaMetadataResponse.getReplicaMetadataResponseInfoList().get(j);
        Assert.assertEquals("MsgInfo list size in ReplicaMetadataResponse mismatch ", numMessagesInEachResponseInfo, replicaMetadataResponseInfo.getMessageInfoList().size());
        Assert.assertEquals("Total size of messages not as expected", originalMetadataResponse.getTotalSizeOfAllMessages(), replicaMetadataResponseInfo.getTotalSizeOfAllMessages());
        List<MessageInfo> deserializedMsgInfoList = replicaMetadataResponseInfo.getMessageInfoList();
        for (int i = 0; i < originalMetadataResponse.getMessageInfoList().size(); i++) {
            MessageInfo originalMsgInfo = originalMetadataResponse.getMessageInfoList().get(i);
            MessageInfo msgInfo = deserializedMsgInfoList.get(i);
            Assert.assertEquals("MsgInfo size mismatch ", originalMsgInfo.getSize(), msgInfo.getSize());
            Assert.assertEquals("MsgInfo key mismatch ", originalMsgInfo.getStoreKey(), msgInfo.getStoreKey());
            Assert.assertEquals("MsgInfo expiration value mismatch ", Utils.Infinite_Time, msgInfo.getExpirationTimeInMs());
            if (response.getVersionId() >= ReplicaMetadataResponse.REPLICA_METADATA_RESPONSE_VERSION_V_3) {
                Assert.assertEquals("AccountId mismatch ", originalMsgInfo.getAccountId(), msgInfo.getAccountId());
                Assert.assertEquals("ContainerId mismatch ", originalMsgInfo.getContainerId(), msgInfo.getContainerId());
                Assert.assertEquals("OperationTime mismatch ", operationTimeMs, msgInfo.getOperationTimeMs());
            } else {
                Assert.assertEquals("AccountId mismatch ", UNKNOWN_ACCOUNT_ID, msgInfo.getAccountId());
                Assert.assertEquals("ContainerId mismatch ", UNKNOWN_CONTAINER_ID, msgInfo.getContainerId());
                Assert.assertEquals("OperationTime mismatch ", Utils.Infinite_Time, msgInfo.getOperationTimeMs());
            }
            if (messageInfoToUse >= MessageInfoAndMetadataListSerde.VERSION_6) {
                Assert.assertTrue(msgInfo.isUndeleted());
                Assert.assertEquals("LifeVersion mismatch", (short) 1, msgInfo.getLifeVersion());
            } else {
                Assert.assertFalse(msgInfo.isUndeleted());
                Assert.assertEquals("LifeVersion mismatch", (short) 0, msgInfo.getLifeVersion());
            }
        }
    }
    response.release();
    // to ensure that the toString() representation does not go overboard, a random bound check is executed here.
    // a rough estimate is that each response info should contribute about 500 chars to the toString() representation
    int maxLength = 100 + numResponseInfos * 500;
    Assert.assertTrue("toString() representation longer than " + maxLength + " characters", response.toString().length() < maxLength);
    // test toString() of a ReplicaMetadataResponseInfo without any messages
    ReplicaMetadataResponseInfo responseInfo = new ReplicaMetadataResponseInfo(clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), replicaType, new MockFindToken(0, 1000), Collections.emptyList(), 1000, responseVersionToUse);
    Assert.assertTrue("Length of toString() should be > 0", responseInfo.toString().length() > 0);
    // test toString() of a ReplicaMetadataResponse without any ReplicaMetadataResponseInfo
    response = new ReplicaMetadataResponse(1234, "clientId", ServerErrorCode.No_Error, Collections.emptyList(), responseVersionToUse);
    Assert.assertTrue("Length of toString() should be > 0", response.toString().length() > 0);
    response.release();
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ArrayList(java.util.ArrayList) DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) MessageInfo(com.github.ambry.store.MessageInfo) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 44 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class RequestResponseTest method testGetRequestResponse.

private void testGetRequestResponse(short getVersionToUse, short messageInfoAutoVersion) throws IOException {
    GetResponse.CURRENT_VERSION = getVersionToUse;
    MessageInfoAndMetadataListSerde.AUTO_VERSION = messageInfoAutoVersion;
    MockClusterMap clusterMap = new MockClusterMap();
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobId id1 = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), false, BlobId.BlobDataType.DATACHUNK);
    ArrayList<BlobId> blobIdList = new ArrayList<BlobId>();
    blobIdList.add(id1);
    PartitionRequestInfo partitionRequestInfo1 = new PartitionRequestInfo(new MockPartitionId(), blobIdList);
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    partitionRequestInfoList.add(partitionRequestInfo1);
    GetRequest getRequest = new GetRequest(1234, "clientId", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
    DataInputStream requestStream = serAndPrepForRead(getRequest, -1, true);
    GetRequest deserializedGetRequest = GetRequest.readFrom(requestStream, clusterMap);
    Assert.assertEquals(deserializedGetRequest.getClientId(), "clientId");
    Assert.assertEquals(deserializedGetRequest.getPartitionInfoList().size(), 1);
    Assert.assertEquals(deserializedGetRequest.getPartitionInfoList().get(0).getBlobIds().size(), 1);
    Assert.assertEquals(deserializedGetRequest.getPartitionInfoList().get(0).getBlobIds().get(0), id1);
    getRequest.release();
    // Test GetResponse with InputStream
    long operationTimeMs = SystemTime.getInstance().milliseconds() + TestUtils.RANDOM.nextInt();
    byte[] encryptionKey = TestUtils.getRandomBytes(256);
    MessageInfo messageInfo = new MessageInfo(id1, 1000, false, false, true, 1000, null, accountId, containerId, operationTimeMs, (short) 1);
    MessageMetadata messageMetadata = new MessageMetadata(ByteBuffer.wrap(encryptionKey));
    ArrayList<MessageInfo> messageInfoList = new ArrayList<>();
    ArrayList<MessageMetadata> messageMetadataList = new ArrayList<>();
    messageInfoList.add(messageInfo);
    messageMetadataList.add(messageMetadata);
    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), messageInfoList, messageMetadataList);
    List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
    partitionResponseInfoList.add(partitionResponseInfo);
    byte[] buf = TestUtils.getRandomBytes(1000);
    ByteArrayInputStream byteStream = new ByteArrayInputStream(buf);
    GetResponse response = new GetResponse(1234, "clientId", partitionResponseInfoList, byteStream, ServerErrorCode.No_Error);
    requestStream = serAndPrepForRead(response, -1, false);
    GetResponse deserializedGetResponse = GetResponse.readFrom(requestStream, clusterMap);
    Assert.assertEquals(deserializedGetResponse.getCorrelationId(), 1234);
    Assert.assertEquals(deserializedGetResponse.getError(), ServerErrorCode.No_Error);
    Assert.assertEquals(deserializedGetResponse.getPartitionResponseInfoList().size(), 1);
    Assert.assertEquals(deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().size(), 1);
    MessageInfo msgInfo = deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
    Assert.assertEquals(msgInfo.getSize(), 1000);
    Assert.assertEquals(msgInfo.getStoreKey(), id1);
    Assert.assertEquals(msgInfo.getExpirationTimeInMs(), 1000);
    Assert.assertEquals(deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageMetadataList().size(), 1);
    if (GetResponse.getCurrentVersion() >= GetResponse.GET_RESPONSE_VERSION_V_4) {
        MessageMetadata messageMetadataInResponse = deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0);
        Assert.assertEquals(messageMetadata.getEncryptionKey().rewind(), messageMetadataInResponse.getEncryptionKey());
    } else {
        Assert.assertNull(deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
    }
    if (GetResponse.getCurrentVersion() >= GetResponse.GET_RESPONSE_VERSION_V_3) {
        Assert.assertEquals("AccountId mismatch ", accountId, msgInfo.getAccountId());
        Assert.assertEquals("ConatinerId mismatch ", containerId, msgInfo.getContainerId());
        Assert.assertEquals("OperationTime mismatch ", operationTimeMs, msgInfo.getOperationTimeMs());
    } else {
        Assert.assertEquals("AccountId mismatch ", UNKNOWN_ACCOUNT_ID, msgInfo.getAccountId());
        Assert.assertEquals("ConatinerId mismatch ", UNKNOWN_CONTAINER_ID, msgInfo.getContainerId());
        Assert.assertEquals("OperationTime mismatch ", Utils.Infinite_Time, msgInfo.getOperationTimeMs());
    }
    if (messageInfoAutoVersion >= MessageInfoAndMetadataListSerde.VERSION_6) {
        Assert.assertTrue(msgInfo.isUndeleted());
        Assert.assertEquals("LifeVersion mismatch", (short) 1, msgInfo.getLifeVersion());
    } else {
        Assert.assertFalse(msgInfo.isUndeleted());
        Assert.assertEquals("LifeVersion mismatch", (short) 0, msgInfo.getLifeVersion());
    }
    response.release();
    // Test GetResponse with Send
    for (boolean useComposite : new boolean[] { false, true }) {
        for (boolean withContent : new boolean[] { false, true }) {
            operationTimeMs = SystemTime.getInstance().milliseconds() + TestUtils.RANDOM.nextInt();
            encryptionKey = TestUtils.getRandomBytes(256);
            messageInfo = new MessageInfo(id1, 1000, false, false, true, 1000, null, accountId, containerId, operationTimeMs, (short) 1);
            messageMetadata = new MessageMetadata(ByteBuffer.wrap(encryptionKey));
            messageInfoList.clear();
            messageMetadataList.clear();
            messageInfoList.add(messageInfo);
            messageMetadataList.add(messageMetadata);
            partitionResponseInfo = new PartitionResponseInfo(clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), messageInfoList, messageMetadataList);
            partitionResponseInfoList.clear();
            partitionResponseInfoList.add(partitionResponseInfo);
            Send send;
            if (withContent) {
                send = new SendWithContent(1000, useComposite);
            } else {
                send = new SendWithoutContent(1000, useComposite);
            }
            response = new GetResponse(1234, "clientId", partitionResponseInfoList, send, ServerErrorCode.No_Error);
            requestStream = serAndPrepForRead(response, -1, false);
            deserializedGetResponse = GetResponse.readFrom(requestStream, clusterMap);
            Assert.assertEquals(deserializedGetResponse.getCorrelationId(), 1234);
            Assert.assertEquals(deserializedGetResponse.getError(), ServerErrorCode.No_Error);
            Assert.assertEquals(deserializedGetResponse.getPartitionResponseInfoList().size(), 1);
            Assert.assertEquals(deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().size(), 1);
            msgInfo = deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
            Assert.assertEquals(msgInfo.getSize(), 1000);
            Assert.assertEquals(msgInfo.getStoreKey(), id1);
            Assert.assertEquals(msgInfo.getExpirationTimeInMs(), 1000);
            Assert.assertEquals(deserializedGetResponse.getPartitionResponseInfoList().get(0).getMessageMetadataList().size(), 1);
            response.release();
        }
    }
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ArrayList(java.util.ArrayList) DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) MessageInfo(com.github.ambry.store.MessageInfo) Send(com.github.ambry.network.Send) MessageMetadata(com.github.ambry.messageformat.MessageMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 45 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class ReplicationTest method onReplicaAddedOrRemovedCallbackTest.

/**
 * Test cluster map change callback in {@link ReplicationManager} when any remote replicas are added or removed.
 * Test setup: attempt to add 3 replicas and remove 3 replicas respectively. The three replicas are picked as follows:
 *   (1) 1st replica on current node (should skip)
 *   (2) 2nd replica on remote node sharing partition with current one (should be added or removed)
 *   (3) 3rd replica on remote node but doesn't share partition with current one (should skip)
 * @throws Exception
 */
@Test
public void onReplicaAddedOrRemovedCallbackTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    StoreConfig storeConfig = new StoreConfig(verifiableProperties);
    // pick a node with no special partition as current node
    Set<DataNodeId> specialPartitionNodes = clusterMap.getSpecialPartition().getReplicaIds().stream().map(ReplicaId::getDataNodeId).collect(Collectors.toSet());
    DataNodeId currentNode = clusterMap.getDataNodes().stream().filter(d -> !specialPartitionNodes.contains(d)).findFirst().get();
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, currentNode, null, null, new MockTime(), null, new InMemAccountService(false, false));
    storageManager.start();
    MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, currentNode, storeKeyConverterFactory, null);
    ClusterMapChangeListener clusterMapChangeListener = clusterMap.getClusterMapChangeListener();
    // find the special partition (not on current node) and get an irrelevant replica from it
    PartitionId absentPartition = clusterMap.getSpecialPartition();
    ReplicaId irrelevantReplica = absentPartition.getReplicaIds().get(0);
    // find an existing replica on current node and one of its peer replicas on remote node
    ReplicaId existingReplica = clusterMap.getReplicaIds(currentNode).get(0);
    ReplicaId peerReplicaToRemove = existingReplica.getPartitionId().getReplicaIds().stream().filter(r -> r != existingReplica).findFirst().get();
    // create a new node and place a peer of existing replica on it.
    MockDataNodeId remoteNode = createDataNode(getListOfPorts(PLAIN_TEXT_PORT_START_NUMBER + 10, SSL_PORT_START_NUMBER + 10, HTTP2_PORT_START_NUMBER + 10), clusterMap.getDatacenterName((byte) 0), 3);
    ReplicaId addedReplica = new MockReplicaId(remoteNode.getPort(), (MockPartitionId) existingReplica.getPartitionId(), remoteNode, 0);
    // populate added replica and removed replica lists
    List<ReplicaId> replicasToAdd = new ArrayList<>(Arrays.asList(existingReplica, addedReplica, irrelevantReplica));
    List<ReplicaId> replicasToRemove = new ArrayList<>(Arrays.asList(existingReplica, peerReplicaToRemove, irrelevantReplica));
    PartitionInfo partitionInfo = replicationManager.getPartitionToPartitionInfoMap().get(existingReplica.getPartitionId());
    assertNotNull("PartitionInfo is not found", partitionInfo);
    RemoteReplicaInfo peerReplicaInfo = partitionInfo.getRemoteReplicaInfos().stream().filter(info -> info.getReplicaId() == peerReplicaToRemove).findFirst().get();
    // get the replica-thread for this peer replica
    ReplicaThread peerReplicaThread = peerReplicaInfo.getReplicaThread();
    // Test Case 1: replication manager encountered exception during startup (remote replica addition/removal will be skipped)
    replicationManager.startWithException();
    clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
    // verify that PartitionInfo stays unchanged
    verifyRemoteReplicaInfo(partitionInfo, addedReplica, false);
    verifyRemoteReplicaInfo(partitionInfo, peerReplicaToRemove, true);
    // Test Case 2: startup latch is interrupted
    CountDownLatch initialLatch = replicationManager.startupLatch;
    CountDownLatch mockLatch = Mockito.mock(CountDownLatch.class);
    doThrow(new InterruptedException()).when(mockLatch).await();
    replicationManager.startupLatch = mockLatch;
    try {
        clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
        fail("should fail because startup latch is interrupted");
    } catch (IllegalStateException e) {
    // expected
    }
    replicationManager.startupLatch = initialLatch;
    // Test Case 3: replication manager is successfully started
    replicationManager.start();
    clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
    // verify that PartitionInfo has latest remote replica infos
    verifyRemoteReplicaInfo(partitionInfo, addedReplica, true);
    verifyRemoteReplicaInfo(partitionInfo, peerReplicaToRemove, false);
    verifyRemoteReplicaInfo(partitionInfo, irrelevantReplica, false);
    // verify new added replica is assigned to a certain thread
    ReplicaThread replicaThread = replicationManager.getDataNodeIdToReplicaThreadMap().get(addedReplica.getDataNodeId());
    assertNotNull("There is no ReplicaThread assocated with new replica", replicaThread);
    Optional<RemoteReplicaInfo> findResult = replicaThread.getRemoteReplicaInfos().get(remoteNode).stream().filter(info -> info.getReplicaId() == addedReplica).findAny();
    assertTrue("New added remote replica info should exist in corresponding thread", findResult.isPresent());
    // verify the removed replica info's thread is null
    assertNull("Thread in removed replica info should be null", peerReplicaInfo.getReplicaThread());
    findResult = peerReplicaThread.getRemoteReplicaInfos().get(peerReplicaToRemove.getDataNodeId()).stream().filter(info -> info.getReplicaId() == peerReplicaToRemove).findAny();
    assertFalse("Previous replica thread should not contain RemoteReplicaInfo that is already removed", findResult.isPresent());
    storageManager.shutdown();
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) CoreMatchers(org.hamcrest.CoreMatchers) Arrays(java.util.Arrays) StorageManager(com.github.ambry.store.StorageManager) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter) DataNodeId(com.github.ambry.clustermap.DataNodeId) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) PortType(com.github.ambry.network.PortType) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) Parameterized(org.junit.runners.Parameterized) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Container(com.github.ambry.account.Container) DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) Predicate(java.util.function.Predicate) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Collection(java.util.Collection) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Collectors(java.util.stream.Collectors) ConnectedChannel(com.github.ambry.network.ConnectedChannel) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) PartitionStateChangeListener(com.github.ambry.clustermap.PartitionStateChangeListener) MockTime(com.github.ambry.utils.MockTime) Account(com.github.ambry.account.Account) Optional(java.util.Optional) TransitionErrorCode(com.github.ambry.clustermap.StateTransitionException.TransitionErrorCode) MockId(com.github.ambry.store.MockId) InMemAccountService(com.github.ambry.account.InMemAccountService) AmbryReplicaSyncUpManager(com.github.ambry.clustermap.AmbryReplicaSyncUpManager) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) ClusterMapChangeListener(com.github.ambry.clustermap.ClusterMapChangeListener) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Transformer(com.github.ambry.store.Transformer) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) CommonTestUtils(com.github.ambry.commons.CommonTestUtils) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) Time(com.github.ambry.utils.Time) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) ReplicaState(com.github.ambry.clustermap.ReplicaState) StateModelListenerType(com.github.ambry.clustermap.StateModelListenerType) StoreConfig(com.github.ambry.config.StoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) ReplicaType(com.github.ambry.clustermap.ReplicaType) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMap(com.github.ambry.clustermap.ClusterMap) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Store(com.github.ambry.store.Store) Mockito(org.mockito.Mockito) MessageInfo(com.github.ambry.store.MessageInfo) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) ReplicaId(com.github.ambry.clustermap.ReplicaId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Port(com.github.ambry.network.Port) Comparator(java.util.Comparator) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) ClusterMapChangeListener(com.github.ambry.clustermap.ClusterMapChangeListener) StorageManager(com.github.ambry.store.StorageManager) ArrayList(java.util.ArrayList) InMemAccountService(com.github.ambry.account.InMemAccountService) MockTime(com.github.ambry.utils.MockTime) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Aggregations

MockPartitionId (com.github.ambry.clustermap.MockPartitionId)66 Test (org.junit.Test)51 PartitionId (com.github.ambry.clustermap.PartitionId)33 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)31 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)26 ArrayList (java.util.ArrayList)26 ReplicaId (com.github.ambry.clustermap.ReplicaId)25 BlobId (com.github.ambry.commons.BlobId)23 Port (com.github.ambry.network.Port)20 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)17 MetricRegistry (com.codahale.metrics.MetricRegistry)11 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)10 VerifiableProperties (com.github.ambry.config.VerifiableProperties)9 StorageManager (com.github.ambry.store.StorageManager)9 DataNodeId (com.github.ambry.clustermap.DataNodeId)8 BlobStoreTest (com.github.ambry.store.BlobStoreTest)8 Store (com.github.ambry.store.Store)7 ByteArrayInputStream (java.io.ByteArrayInputStream)7 Properties (java.util.Properties)7 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)6