Search in sources :

Example 56 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class UndeleteOperationTrackerTest method initialize.

/**
 * Initialize 4 DCs, each DC has 1 data node, which has 3 replicas.
 */
private void initialize() {
    int replicaCount = 12;
    List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
    List<String> mountPaths = Collections.singletonList("mockMountPath");
    datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"), new MockDataNodeId(portList, mountPaths, "dc-2"), new MockDataNodeId(portList, mountPaths, "dc-3")));
    mockPartition = new MockPartitionId();
    populateReplicaList(replicaCount, ReplicaState.STANDBY);
    localDcName = datanodes.get(0).getDatacenterName();
    mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Port(com.github.ambry.network.Port) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 57 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class StatsManagerTest method testReplicaFromOfflineToBootstrap.

/**
 * Test state transition in stats manager from OFFLINE to BOOTSTRAP
 */
@Test
public void testReplicaFromOfflineToBootstrap() {
    MockStatsManager mockStatsManager = new MockStatsManager(storageManager, replicas, new MetricRegistry(), statsManagerConfig, clusterParticipant);
    // 1. verify stats manager's listener is registered
    assertTrue("Stats manager listener is found in cluster participant", clusterParticipant.getPartitionStateChangeListeners().containsKey(StateModelListenerType.StatsManagerListener));
    // 2. test partition not found
    try {
        clusterParticipant.onPartitionBecomeBootstrapFromOffline("InvalidPartition");
        fail("should fail because partition is not found");
    } catch (StateTransitionException e) {
        assertEquals("Transition error doesn't match", ReplicaNotFound, e.getErrorCode());
    }
    // 3. create a new partition and test replica addition failure
    PartitionId newPartition = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    ((MockStorageManager) storageManager).getReplicaReturnVal = newPartition.getReplicaIds().get(0);
    mockStatsManager.returnValOfAddReplica = false;
    try {
        clusterParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString());
        fail("should fail because adding replica to stats manager failed");
    } catch (StateTransitionException e) {
        assertEquals("Transition error code doesn't match", ReplicaOperationFailure, e.getErrorCode());
    }
    // 4. test replica addition success during Offline-To-Bootstrap transition
    assertFalse("Before adding new replica, in-mem data structure should not contain new partition", mockStatsManager.partitionToReplicaMap.containsKey(newPartition));
    mockStatsManager.returnValOfAddReplica = null;
    clusterParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString());
    assertTrue("After adding new replica, in-mem data structure should contain new partition", mockStatsManager.partitionToReplicaMap.containsKey(newPartition));
    // 5. state transition on existing replica should be no-op
    clusterParticipant.onPartitionBecomeBootstrapFromOffline(replicas.get(0).getPartitionId().toPathString());
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 58 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class StatsManagerTest method testStatsManagerWithProblematicStores.

/**
 * Test to verify the behavior when dealing with {@link Store} that is null and when {@link StoreException} is thrown.
 * @throws Exception
 */
@Test
public void testStatsManagerWithProblematicStores() throws Exception {
    DataNodeId dataNodeId = new MockDataNodeId(Collections.singletonList(new Port(6667, PortType.PLAINTEXT)), Collections.singletonList("/tmp"), "DC1");
    Map<PartitionId, Store> problematicStoreMap = new HashMap<>();
    PartitionId partitionId1 = new MockPartitionId(1, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    PartitionId partitionId2 = new MockPartitionId(2, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    problematicStoreMap.put(partitionId1, null);
    Store exceptionStore = new MockStore(new MockStoreStats(new HashMap<>(), true));
    problematicStoreMap.put(partitionId2, exceptionStore);
    StatsManager testStatsManager = new StatsManager(new MockStorageManager(problematicStoreMap, dataNodeId), Arrays.asList(partitionId1.getReplicaIds().get(0), partitionId2.getReplicaIds().get(0)), new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
    List<PartitionId> unreachablePartitions = new ArrayList<>();
    Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> hostAccountStorageStatsMap = new HashMap<>();
    for (PartitionId partitionId : problematicStoreMap.keySet()) {
        testStatsManager.collectAndAggregateAccountStorageStats(hostAccountStorageStatsMap, partitionId, unreachablePartitions);
    }
    assertEquals("Aggregated map should not contain any value", 0L, hostAccountStorageStatsMap.size());
    assertEquals("Unreachable store count mismatch with expected value", 2, unreachablePartitions.size());
    StatsManager.AccountStatsPublisher publisher = testStatsManager.new AccountStatsPublisher(accountStatsStore);
    publisher.run();
    HostAccountStorageStatsWrapper statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    List<String> unreachableStores = statsWrapper.getHeader().getUnreachableStores();
    assertTrue("The unreachable store list should contain Partition1 and Partition2", unreachableStores.containsAll(Arrays.asList(partitionId1.toPathString(), partitionId2.toPathString())));
    // test for the scenario where some stores are healthy and some are bad
    Map<PartitionId, Store> mixedStoreMap = new HashMap<>(storeMap);
    unreachablePartitions.clear();
    PartitionId partitionId3 = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    PartitionId partitionId4 = new MockPartitionId(4, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    mixedStoreMap.put(partitionId3, null);
    mixedStoreMap.put(partitionId4, exceptionStore);
    testStatsManager = new StatsManager(new MockStorageManager(mixedStoreMap, dataNodeId), Arrays.asList(partitionId3.getReplicaIds().get(0), partitionId4.getReplicaIds().get(0)), new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
    hostAccountStorageStatsMap.clear();
    for (PartitionId partitionId : mixedStoreMap.keySet()) {
        testStatsManager.collectAndAggregateAccountStorageStats(hostAccountStorageStatsMap, partitionId, unreachablePartitions);
    }
    assertEquals("Unreachable store count mismatch with expected value", 2, unreachablePartitions.size());
    // test fetchSnapshot method in StatsManager
    unreachablePartitions.clear();
    // partition 0, 1, 2 are healthy stores, partition 3, 4 are bad ones.
    for (PartitionId partitionId : mixedStoreMap.keySet()) {
        Map<Short, Map<Short, ContainerStorageStats>> containerStatsMapForPartition = hostAccountStorageStatsMap.get(partitionId.getId());
        if (partitionId.getId() < 3) {
            assertEquals("Actual map does not match with expected snapshot with partition id " + partitionId.toPathString(), hostAccountStorageStats.getStorageStats().get(partitionId.getId()), containerStatsMapForPartition);
        }
    }
}
Also used : HashMap(java.util.HashMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) InmemoryAccountStatsStore(com.github.ambry.accountstats.InmemoryAccountStatsStore) Store(com.github.ambry.store.Store) ContainerStorageStats(com.github.ambry.server.storagestats.ContainerStorageStats) MockTime(com.github.ambry.utils.MockTime) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Map(java.util.Map) HashMap(java.util.HashMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 59 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class StatsManagerTest method testAddAndRemoveReplica.

/**
 * Test to verify the {@link StatsManager} behaves correctly when dynamically adding/removing {@link ReplicaId}.
 * @throws Exception
 */
@Test
public void testAddAndRemoveReplica() throws Exception {
    // setup testing environment
    Map<PartitionId, Store> testStoreMap = new HashMap<>();
    List<ReplicaId> testReplicas = new ArrayList<>();
    DataNodeId dataNodeId = new MockDataNodeId(Collections.singletonList(new Port(6667, PortType.PLAINTEXT)), Collections.singletonList("/tmp"), "DC1");
    for (int i = 0; i < 3; i++) {
        PartitionId partitionId = new MockPartitionId(i, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
        testStoreMap.put(partitionId, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(i), false)));
        testReplicas.add(partitionId.getReplicaIds().get(0));
    }
    StorageManager mockStorageManager = new MockStorageManager(testStoreMap, dataNodeId);
    StatsManager testStatsManager = new StatsManager(mockStorageManager, testReplicas, new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
    // verify that adding an existing store to StatsManager should fail
    assertFalse("Adding a store which already exists should fail", testStatsManager.addReplica(testReplicas.get(0)));
    PartitionId partitionId3 = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    testStoreMap.put(partitionId3, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
    // verify that partitionId3 is not in stats report before adding to statsManager
    StatsManager.AccountStatsPublisher publisher = testStatsManager.new AccountStatsPublisher(accountStatsStore);
    publisher.run();
    HostAccountStorageStatsWrapper statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertFalse("Partition3 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
    // verify that after adding into statsManager, PartitionId3 is in stats report
    testStatsManager.addReplica(partitionId3.getReplicaIds().get(0));
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertTrue("Partition3 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
    // verify that after removing PartitionId0 (corresponding to the first replica in replicas list), PartitionId0 is not in the stats report
    PartitionId partitionId0 = testReplicas.get(0).getPartitionId();
    assertTrue("Partition0 should present in stats report before removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
    testStoreMap.remove(testReplicas.get(0).getPartitionId());
    testStatsManager.removeReplica(testReplicas.get(0));
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertFalse("Partition0 should not present in stats report after removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
    // verify that removing the PartitionId0 should fail because it no longer exists in StatsManager
    assertFalse(testStatsManager.removeReplica(testReplicas.get(0)));
    // concurrent remove test
    CountDownLatch getStatsCountdown1 = new CountDownLatch(1);
    CountDownLatch waitRemoveCountdown = new CountDownLatch(1);
    ((MockStorageManager) mockStorageManager).waitOperationCountdown = waitRemoveCountdown;
    ((MockStorageManager) mockStorageManager).firstCall = true;
    ((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
    for (Store store : testStoreMap.values()) {
        ((MockStore) store).getStatsCountdown = getStatsCountdown1;
        ((MockStore) store).isCollected = false;
    }
    List<PartitionId> partitionRemoved = new ArrayList<>();
    Utils.newThread(() -> {
        // wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
        try {
            getStatsCountdown1.await();
        } catch (InterruptedException e) {
            throw new IllegalStateException("CountDown await was interrupted", e);
        }
        // find one store which hasn't been collected
        ReplicaId replicaToRemove = null;
        for (Map.Entry<PartitionId, Store> partitionToStore : testStoreMap.entrySet()) {
            MockStore store = (MockStore) partitionToStore.getValue();
            if (!store.isCollected) {
                replicaToRemove = partitionToStore.getKey().getReplicaIds().get(0);
                break;
            }
        }
        if (replicaToRemove != null) {
            testStatsManager.removeReplica(replicaToRemove);
            testStoreMap.remove(replicaToRemove.getPartitionId());
            partitionRemoved.add(replicaToRemove.getPartitionId());
            // count down to allow stats aggregation to proceed
            waitRemoveCountdown.countDown();
        }
    }, false).start();
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    // verify that the removed store is indeed unreachable during stats aggregation
    assertTrue("The removed partition should be unreachable during aggregation", ((MockStorageManager) mockStorageManager).unreachablePartitions.contains(partitionRemoved.get(0)));
    // verify unreachable store list doesn't contain the store which is removed.
    List<String> unreachableStores = statsWrapper.getHeader().getUnreachableStores();
    assertFalse("The removed partition should not present in unreachable list", unreachableStores.contains(partitionRemoved.get(0).toPathString()));
    // concurrent add test
    CountDownLatch getStatsCountdown2 = new CountDownLatch(1);
    CountDownLatch waitAddCountdown = new CountDownLatch(1);
    ((MockStorageManager) mockStorageManager).waitOperationCountdown = waitAddCountdown;
    ((MockStorageManager) mockStorageManager).firstCall = true;
    ((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
    for (Store store : testStoreMap.values()) {
        ((MockStore) store).getStatsCountdown = getStatsCountdown2;
        ((MockStore) store).isCollected = false;
    }
    PartitionId partitionId4 = new MockPartitionId(4, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
    Utils.newThread(() -> {
        // wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
        try {
            getStatsCountdown2.await();
        } catch (InterruptedException e) {
            throw new IllegalStateException("CountDown await was interrupted", e);
        }
        testStatsManager.addReplica(partitionId4.getReplicaIds().get(0));
        testStoreMap.put(partitionId4, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
        // count down to allow stats aggregation to proceed
        waitAddCountdown.countDown();
    }, false).start();
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    // verify that new added PartitionId4 is not in report for this round of aggregation
    assertFalse("Partition4 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
    // verify that new added PartitionId4 will be collected for next round of aggregation
    publisher.run();
    statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
    assertTrue("Partition4 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
}
Also used : HashMap(java.util.HashMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) StorageManager(com.github.ambry.store.StorageManager) InmemoryAccountStatsStore(com.github.ambry.accountstats.InmemoryAccountStatsStore) Store(com.github.ambry.store.Store) MockTime(com.github.ambry.utils.MockTime) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Test(org.junit.Test)

Example 60 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AmbryServerRequestsTest method startBlobStoreFailureTest.

/**
 * Tests for the startBlobStore response received on a {@link BlobStoreControlAdminRequest} for different failure cases
 * @throws InterruptedException
 * @throws IOException
 */
@Test
public void startBlobStoreFailureTest() throws Exception {
    // Recreate storage manager and ambryRequest to pass in HelixParticipant
    storageManager.shutdown();
    storageManager = new MockStorageManager(validKeysInStore, clusterMap, dataNodeId, findTokenHelper, helixParticipant);
    ambryRequests = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, dataNodeId, clusterMap.getMetricRegistry(), serverMetrics, findTokenHelper, null, replicationManager, null, serverConfig, storeKeyConverterFactory, statsManager, helixParticipant);
    // find a partition that has a replica on current node
    ReplicaId localReplica = clusterMap.getReplicaIds(dataNodeId).get(0);
    MockPartitionId id = (MockPartitionId) localReplica.getPartitionId();
    short numReplicasCaughtUpPerPartition = 3;
    // test start BlobStore failure
    storageManager.returnValueOfStartingBlobStore = false;
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    storageManager.returnValueOfStartingBlobStore = true;
    // test start BlobStore with runtime exception
    storageManager.exceptionToThrowOnStartingBlobStore = new IllegalStateException();
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    storageManager.exceptionToThrowOnStartingBlobStore = null;
    // test enable replication failure
    replicationManager.controlReplicationReturnVal = false;
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    replicationManager.controlReplicationReturnVal = true;
    // test enable compaction failure
    storageManager.returnValueOfControllingCompaction = false;
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    storageManager.returnValueOfControllingCompaction = true;
    // test enable compaction with runtime exception
    storageManager.exceptionToThrowOnControllingCompaction = new IllegalStateException();
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    storageManager.exceptionToThrowOnControllingCompaction = null;
    // test HelixParticipant resets partition error
    helixParticipant.resetPartitionVal = false;
    id.replicaAndState.put(localReplica, ReplicaState.ERROR);
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    helixParticipant.resetPartitionVal = true;
    id.replicaAndState.put(localReplica, ReplicaState.STANDBY);
    // test stop list update failure
    helixParticipant.setStoppedStateReturnVal = false;
    sendAndVerifyStoreControlRequest(id, BlobStoreControlAction.StartStore, numReplicasCaughtUpPerPartition, ServerErrorCode.Unknown_Error);
    helixParticipant.setStoppedStateReturnVal = null;
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) Test(org.junit.Test) MessageInfoTest(com.github.ambry.store.MessageInfoTest) MessageFormatInputStreamTest(com.github.ambry.messageformat.MessageFormatInputStreamTest)

Aggregations

MockPartitionId (com.github.ambry.clustermap.MockPartitionId)66 Test (org.junit.Test)51 PartitionId (com.github.ambry.clustermap.PartitionId)33 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)31 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)26 ArrayList (java.util.ArrayList)26 ReplicaId (com.github.ambry.clustermap.ReplicaId)25 BlobId (com.github.ambry.commons.BlobId)23 Port (com.github.ambry.network.Port)20 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)17 MetricRegistry (com.codahale.metrics.MetricRegistry)11 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)10 VerifiableProperties (com.github.ambry.config.VerifiableProperties)9 StorageManager (com.github.ambry.store.StorageManager)9 DataNodeId (com.github.ambry.clustermap.DataNodeId)8 BlobStoreTest (com.github.ambry.store.BlobStoreTest)8 Store (com.github.ambry.store.Store)7 ByteArrayInputStream (java.io.ByteArrayInputStream)7 Properties (java.util.Properties)7 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)6