Search in sources :

Example 51 with MockDataNodeId

use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.

the class VcrRecoveryTest method setup.

/**
 * Create a cluster with a vcr node and a recovery (ambry data) node.
 * @throws Exception on {@link Exception}
 */
@Before
public void setup() throws Exception {
    String vcrMountPath = ClusterMapSnapshotConstants.CLOUD_REPLICA_MOUNT + "/1";
    recoveryProperties = new Properties();
    recoveryProperties.setProperty("replication.metadata.request.version", "2");
    // create vcr node
    List<Port> vcrPortList = new ArrayList<>(2);
    Port vcrClusterMapPort = new Port(12310, PortType.PLAINTEXT);
    Port vcrSslPort = new Port(12410, PortType.SSL);
    vcrPortList.add(vcrClusterMapPort);
    vcrPortList.add(vcrSslPort);
    MockDataNodeId vcrNode = new MockDataNodeId("localhost", vcrPortList, Collections.singletonList(vcrMountPath), dcName);
    // create recovery node
    recoveryNodePort = new Port(12311, PortType.PLAINTEXT);
    ArrayList<Port> recoveryPortList = new ArrayList<>(2);
    recoveryPortList.add(recoveryNodePort);
    recoveryNode = MockClusterMap.createDataNode(recoveryPortList, dcName, 1);
    // create cluster for recovery
    recoveryCluster = MockCluster.createOneNodeRecoveryCluster(vcrNode, recoveryNode, dcName);
    partitionId = recoveryCluster.getClusterMap().getWritablePartitionIds(null).get(0);
    // Start ZK Server and Helix Controller.
    if (!zkInfo.isZkServerStarted()) {
        zkInfo.startZkServer();
    }
    helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, recoveryCluster.getClusterMap());
    Properties vcrProperties = VcrTestUtil.createVcrProperties(vcrNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, null);
    vcrProperties.putAll(recoveryProperties);
    NotificationSystem notificationSystem = new MockNotificationSystem(recoveryCluster.getClusterMap());
    // Create blobs and data for upload to vcr.
    int blobCount = 10;
    blobIds = ServerTestUtil.createBlobIds(blobCount, recoveryCluster.getClusterMap(), accountId, containerId, partitionId);
    // Create cloud destination and start vcr server.
    latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, recoveryCluster.getClusterMap());
    CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
    vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(vcrProperties), recoveryCluster.getClusterAgentsFactory(), notificationSystem, cloudDestinationFactory);
    vcrServer.startup();
    // start ambry server with data node
    recoveryCluster.initializeServers(notificationSystem, vcrNode, recoveryProperties);
    recoveryCluster.startServers();
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) NotificationSystem(com.github.ambry.notification.NotificationSystem) CloudDestinationFactory(com.github.ambry.cloud.CloudDestinationFactory) LatchBasedInMemoryCloudDestinationFactory(com.github.ambry.cloud.LatchBasedInMemoryCloudDestinationFactory) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) LatchBasedInMemoryCloudDestination(com.github.ambry.cloud.LatchBasedInMemoryCloudDestination) LatchBasedInMemoryCloudDestinationFactory(com.github.ambry.cloud.LatchBasedInMemoryCloudDestinationFactory) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Before(org.junit.Before)

Example 52 with MockDataNodeId

use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.

the class StatsManagerTest method testReplicaFromOfflineToBootstrap.

/**
 * Test state transition in stats manager from OFFLINE to BOOTSTRAP
 */
@Test
public void testReplicaFromOfflineToBootstrap() {
    MockStatsManager mockStatsManager = new MockStatsManager(storageManager, replicas, new MetricRegistry(), statsManagerConfig, clusterParticipant);
    // 1. verify stats manager's listener is registered
    assertTrue("Stats manager listener is found in cluster participant", clusterParticipant.getPartitionStateChangeListeners().containsKey(StateModelListenerType.StatsManagerListener));
    // 2. test partition not found
    try {
        clusterParticipant.onPartitionBecomeBootstrapFromOffline("InvalidPartition");
        fail("should fail because partition is not found");
    } catch (StateTransitionException e) {
        assertEquals("Transition error doesn't match", ReplicaNotFound, e.getErrorCode());
    }
    // 3. create a new partition and test replica addition failure
    PartitionId newPartition = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    ((MockStorageManager) storageManager).getReplicaReturnVal = newPartition.getReplicaIds().get(0);
    mockStatsManager.returnValOfAddReplica = false;
    try {
        clusterParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString());
        fail("should fail because adding replica to stats manager failed");
    } catch (StateTransitionException e) {
        assertEquals("Transition error code doesn't match", ReplicaOperationFailure, e.getErrorCode());
    }
    // 4. test replica addition success during Offline-To-Bootstrap transition
    assertFalse("Before adding new replica, in-mem data structure should not contain new partition", mockStatsManager.partitionToReplicaMap.containsKey(newPartition));
    mockStatsManager.returnValOfAddReplica = null;
    clusterParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString());
    assertTrue("After adding new replica, in-mem data structure should contain new partition", mockStatsManager.partitionToReplicaMap.containsKey(newPartition));
    // 5. state transition on existing replica should be no-op
    clusterParticipant.onPartitionBecomeBootstrapFromOffline(replicas.get(0).getPartitionId().toPathString());
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 53 with MockDataNodeId

use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.

the class StatsManagerTest method testStatsManagerWithProblematicStores.

/**
 * Test to verify the behavior when dealing with {@link Store} that is null and when {@link StoreException} is thrown.
 * @throws Exception
 */
@Test
public void testStatsManagerWithProblematicStores() throws Exception {
    DataNodeId dataNodeId = new MockDataNodeId(Collections.singletonList(new Port(6667, PortType.PLAINTEXT)), Collections.singletonList("/tmp"), "DC1");
    Map<PartitionId, Store> problematicStoreMap = new HashMap<>();
    PartitionId partitionId1 = new MockPartitionId(1, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    PartitionId partitionId2 = new MockPartitionId(2, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    problematicStoreMap.put(partitionId1, null);
    Store exceptionStore = new MockStore(new MockStoreStats(new HashMap<>(), true));
    problematicStoreMap.put(partitionId2, exceptionStore);
    StatsManager testStatsManager = new StatsManager(new MockStorageManager(problematicStoreMap, dataNodeId), Arrays.asList(partitionId1.getReplicaIds().get(0), partitionId2.getReplicaIds().get(0)), new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
    List<PartitionId> unreachablePartitions = new ArrayList<>();
    Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> hostAccountStorageStatsMap = new HashMap<>();
    for (PartitionId partitionId : problematicStoreMap.keySet()) {
        testStatsManager.collectAndAggregateAccountStorageStats(hostAccountStorageStatsMap, partitionId, unreachablePartitions);
    }
    assertEquals("Aggregated map should not contain any value", 0L, hostAccountStorageStatsMap.size());
    assertEquals("Unreachable store count mismatch with expected value", 2, unreachablePartitions.size());
    StatsManager.AccountStatsPublisher publisher = testStatsManager.new AccountStatsPublisher(accountStatsStore);
    publisher.run();
    HostAccountStorageStatsWrapper statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    List<String> unreachableStores = statsWrapper.getHeader().getUnreachableStores();
    assertTrue("The unreachable store list should contain Partition1 and Partition2", unreachableStores.containsAll(Arrays.asList(partitionId1.toPathString(), partitionId2.toPathString())));
    // test for the scenario where some stores are healthy and some are bad
    Map<PartitionId, Store> mixedStoreMap = new HashMap<>(storeMap);
    unreachablePartitions.clear();
    PartitionId partitionId3 = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    PartitionId partitionId4 = new MockPartitionId(4, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    mixedStoreMap.put(partitionId3, null);
    mixedStoreMap.put(partitionId4, exceptionStore);
    testStatsManager = new StatsManager(new MockStorageManager(mixedStoreMap, dataNodeId), Arrays.asList(partitionId3.getReplicaIds().get(0), partitionId4.getReplicaIds().get(0)), new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
    hostAccountStorageStatsMap.clear();
    for (PartitionId partitionId : mixedStoreMap.keySet()) {
        testStatsManager.collectAndAggregateAccountStorageStats(hostAccountStorageStatsMap, partitionId, unreachablePartitions);
    }
    assertEquals("Unreachable store count mismatch with expected value", 2, unreachablePartitions.size());
    // test fetchSnapshot method in StatsManager
    unreachablePartitions.clear();
    // partition 0, 1, 2 are healthy stores, partition 3, 4 are bad ones.
    for (PartitionId partitionId : mixedStoreMap.keySet()) {
        Map<Short, Map<Short, ContainerStorageStats>> containerStatsMapForPartition = hostAccountStorageStatsMap.get(partitionId.getId());
        if (partitionId.getId() < 3) {
            assertEquals("Actual map does not match with expected snapshot with partition id " + partitionId.toPathString(), hostAccountStorageStats.getStorageStats().get(partitionId.getId()), containerStatsMapForPartition);
        }
    }
}
Also used : HashMap(java.util.HashMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) InmemoryAccountStatsStore(com.github.ambry.accountstats.InmemoryAccountStatsStore) Store(com.github.ambry.store.Store) ContainerStorageStats(com.github.ambry.server.storagestats.ContainerStorageStats) MockTime(com.github.ambry.utils.MockTime) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Map(java.util.Map) HashMap(java.util.HashMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 54 with MockDataNodeId

use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.

the class StatsManagerTest method testAddAndRemoveReplica.

/**
 * Test to verify the {@link StatsManager} behaves correctly when dynamically adding/removing {@link ReplicaId}.
 * @throws Exception
 */
@Test
public void testAddAndRemoveReplica() throws Exception {
    // setup testing environment
    Map<PartitionId, Store> testStoreMap = new HashMap<>();
    List<ReplicaId> testReplicas = new ArrayList<>();
    DataNodeId dataNodeId = new MockDataNodeId(Collections.singletonList(new Port(6667, PortType.PLAINTEXT)), Collections.singletonList("/tmp"), "DC1");
    for (int i = 0; i < 3; i++) {
        PartitionId partitionId = new MockPartitionId(i, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
        testStoreMap.put(partitionId, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(i), false)));
        testReplicas.add(partitionId.getReplicaIds().get(0));
    }
    StorageManager mockStorageManager = new MockStorageManager(testStoreMap, dataNodeId);
    StatsManager testStatsManager = new StatsManager(mockStorageManager, testReplicas, new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
    // verify that adding an existing store to StatsManager should fail
    assertFalse("Adding a store which already exists should fail", testStatsManager.addReplica(testReplicas.get(0)));
    PartitionId partitionId3 = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    testStoreMap.put(partitionId3, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
    // verify that partitionId3 is not in stats report before adding to statsManager
    StatsManager.AccountStatsPublisher publisher = testStatsManager.new AccountStatsPublisher(accountStatsStore);
    publisher.run();
    HostAccountStorageStatsWrapper statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertFalse("Partition3 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
    // verify that after adding into statsManager, PartitionId3 is in stats report
    testStatsManager.addReplica(partitionId3.getReplicaIds().get(0));
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertTrue("Partition3 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
    // verify that after removing PartitionId0 (corresponding to the first replica in replicas list), PartitionId0 is not in the stats report
    PartitionId partitionId0 = testReplicas.get(0).getPartitionId();
    assertTrue("Partition0 should present in stats report before removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
    testStoreMap.remove(testReplicas.get(0).getPartitionId());
    testStatsManager.removeReplica(testReplicas.get(0));
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertFalse("Partition0 should not present in stats report after removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
    // verify that removing the PartitionId0 should fail because it no longer exists in StatsManager
    assertFalse(testStatsManager.removeReplica(testReplicas.get(0)));
    // concurrent remove test
    CountDownLatch getStatsCountdown1 = new CountDownLatch(1);
    CountDownLatch waitRemoveCountdown = new CountDownLatch(1);
    ((MockStorageManager) mockStorageManager).waitOperationCountdown = waitRemoveCountdown;
    ((MockStorageManager) mockStorageManager).firstCall = true;
    ((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
    for (Store store : testStoreMap.values()) {
        ((MockStore) store).getStatsCountdown = getStatsCountdown1;
        ((MockStore) store).isCollected = false;
    }
    List<PartitionId> partitionRemoved = new ArrayList<>();
    Utils.newThread(() -> {
        // wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
        try {
            getStatsCountdown1.await();
        } catch (InterruptedException e) {
            throw new IllegalStateException("CountDown await was interrupted", e);
        }
        // find one store which hasn't been collected
        ReplicaId replicaToRemove = null;
        for (Map.Entry<PartitionId, Store> partitionToStore : testStoreMap.entrySet()) {
            MockStore store = (MockStore) partitionToStore.getValue();
            if (!store.isCollected) {
                replicaToRemove = partitionToStore.getKey().getReplicaIds().get(0);
                break;
            }
        }
        if (replicaToRemove != null) {
            testStatsManager.removeReplica(replicaToRemove);
            testStoreMap.remove(replicaToRemove.getPartitionId());
            partitionRemoved.add(replicaToRemove.getPartitionId());
            // count down to allow stats aggregation to proceed
            waitRemoveCountdown.countDown();
        }
    }, false).start();
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    // verify that the removed store is indeed unreachable during stats aggregation
    assertTrue("The removed partition should be unreachable during aggregation", ((MockStorageManager) mockStorageManager).unreachablePartitions.contains(partitionRemoved.get(0)));
    // verify unreachable store list doesn't contain the store which is removed.
    List<String> unreachableStores = statsWrapper.getHeader().getUnreachableStores();
    assertFalse("The removed partition should not present in unreachable list", unreachableStores.contains(partitionRemoved.get(0).toPathString()));
    // concurrent add test
    CountDownLatch getStatsCountdown2 = new CountDownLatch(1);
    CountDownLatch waitAddCountdown = new CountDownLatch(1);
    ((MockStorageManager) mockStorageManager).waitOperationCountdown = waitAddCountdown;
    ((MockStorageManager) mockStorageManager).firstCall = true;
    ((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
    for (Store store : testStoreMap.values()) {
        ((MockStore) store).getStatsCountdown = getStatsCountdown2;
        ((MockStore) store).isCollected = false;
    }
    PartitionId partitionId4 = new MockPartitionId(4, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    Utils.newThread(() -> {
        // wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
        try {
            getStatsCountdown2.await();
        } catch (InterruptedException e) {
            throw new IllegalStateException("CountDown await was interrupted", e);
        }
        testStatsManager.addReplica(partitionId4.getReplicaIds().get(0));
        testStoreMap.put(partitionId4, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
        // count down to allow stats aggregation to proceed
        waitAddCountdown.countDown();
    }, false).start();
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    // verify that new added PartitionId4 is not in report for this round of aggregation
    assertFalse("Partition4 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
    // verify that new added PartitionId4 will be collected for next round of aggregation
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertTrue("Partition4 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
}
Also used : HashMap(java.util.HashMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) StorageManager(com.github.ambry.store.StorageManager) InmemoryAccountStatsStore(com.github.ambry.accountstats.InmemoryAccountStatsStore) Store(com.github.ambry.store.Store) MockTime(com.github.ambry.utils.MockTime) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Test(org.junit.Test)

Example 55 with MockDataNodeId

use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.

the class StorageManagerTest method startBlobStoreTest.

/**
 * Test start BlobStore with given {@link PartitionId}.
 */
@Test
public void startBlobStoreTest() throws Exception {
    MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
    List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
    List<MockDataNodeId> dataNodes = new ArrayList<>();
    dataNodes.add(dataNode);
    MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, MockClusterMap.DEFAULT_PARTITION_CLASS, dataNodes, 0);
    List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
    StorageManager storageManager = createStorageManager(dataNode, metricRegistry, null);
    PartitionId id = null;
    storageManager.start();
    assertEquals("There should be 1 unexpected partition reported", 1, getNumUnrecognizedPartitionsReported());
    // shutdown all the replicas first
    for (ReplicaId replica : replicas) {
        id = replica.getPartitionId();
        assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
    }
    ReplicaId replica = replicas.get(0);
    id = replica.getPartitionId();
    // test start a store successfully
    assertTrue("Start should succeed on given store", storageManager.startBlobStore(id));
    // test start the store which is already started
    assertTrue("Start should succeed on the store which is already started", storageManager.startBlobStore(id));
    // test invalid partition
    replica = invalidPartitionReplicas.get(0);
    id = replica.getPartitionId();
    assertFalse("Start should fail on given invalid replica", storageManager.startBlobStore(id));
    // test start the store whose DiskManager is not running
    replica = replicas.get(replicas.size() - 1);
    id = replica.getPartitionId();
    storageManager.getDiskManager(id).shutdown();
    assertFalse("Start should fail on given store whose DiskManager is not running", storageManager.startBlobStore(id));
    shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) ArrayList(java.util.ArrayList) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobStoreTest(com.github.ambry.store.BlobStoreTest) Test(org.junit.Test)

Aggregations

MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)63 Test (org.junit.Test)49 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)44 ReplicaId (com.github.ambry.clustermap.ReplicaId)35 Port (com.github.ambry.network.Port)29 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)28 PartitionId (com.github.ambry.clustermap.PartitionId)28 ArrayList (java.util.ArrayList)25 BlobStoreTest (com.github.ambry.store.BlobStoreTest)24 VerifiableProperties (com.github.ambry.config.VerifiableProperties)16 Properties (java.util.Properties)16 MetricRegistry (com.codahale.metrics.MetricRegistry)15 File (java.io.File)14 HashSet (java.util.HashSet)13 DataNodeId (com.github.ambry.clustermap.DataNodeId)12 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)11 StateTransitionException (com.github.ambry.clustermap.StateTransitionException)11 HashMap (java.util.HashMap)10 CountDownLatch (java.util.concurrent.CountDownLatch)10 Counter (com.codahale.metrics.Counter)9