use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class StorageManagerTest method scheduleAndDisableCompactionTest.
/**
* Tests that schedule compaction and disable compaction in StorageManager
* @throws Exception
*/
@Test
public void scheduleAndDisableCompactionTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, dataNodes, 0);
List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
StorageManager storageManager = createStorageManager(replicas, metricRegistry);
storageManager.start();
// add invalid replica id
replicas.add(invalidPartitionReplicas.get(0));
for (int i = 0; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
if (i == replicas.size() - 1) {
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
assertFalse("Disable compaction should fail", storageManager.disableCompactionForBlobStore(id));
} else {
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
}
}
ReplicaId replica = replicas.get(0);
PartitionId id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.disableCompactionForBlobStore(id));
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
replica = replicas.get(1);
id = replica.getPartitionId();
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
replicas.remove(replicas.size() - 1);
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class StorageManagerTest method setBlobStoreStoppedStateWithMultiDelegatesTest.
/**
* Test setting blob stop state in two clusters (if server participates into two Helix clusters)
* @throws Exception
*/
@Test
public void setBlobStoreStoppedStateWithMultiDelegatesTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
MockClusterParticipant mockClusterParticipant1 = new MockClusterParticipant();
MockClusterParticipant mockClusterParticipant2 = new MockClusterParticipant(null, false);
List<ClusterParticipant> participants = Arrays.asList(mockClusterParticipant1, mockClusterParticipant2);
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, participants);
storageManager.start();
PartitionId id = replicas.get(0).getPartitionId();
// test that any delegate fails to update stop state, then the whole operation fails
List<PartitionId> failToUpdateList = storageManager.setBlobStoreStoppedState(Collections.singletonList(id), true);
assertEquals("Set store stopped state should fail because one of delegates returns false", id, failToUpdateList.get(0));
// test the success case, both delegates succeed in updating stop state of replica
mockClusterParticipant2.setStopStateReturnVal = null;
failToUpdateList = storageManager.setBlobStoreStoppedState(Collections.singletonList(id), true);
assertTrue("Set store stopped state should succeed", failToUpdateList.isEmpty());
// verify both delegates have the correct stopped replica list.
List<String> expectedStoppedReplicas = Collections.singletonList(id.toPathString());
assertEquals("Stopped replica list from participant 1 is not expected", expectedStoppedReplicas, mockClusterParticipant1.getStoppedReplicas());
assertEquals("Stopped replica list from participant 2 is not expected", expectedStoppedReplicas, mockClusterParticipant2.getStoppedReplicas());
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class StorageManagerTest method storeStartFailureTest.
/**
* Tests that {@link StorageManager} can start even when certain stores cannot be started. Checks that these stores
* are not accessible. We can make the replica path non-readable to induce a store starting failure.
* @throws Exception
*/
@Test
public void storeStartFailureTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
Set<Integer> badReplicaIndexes = new HashSet<>(Arrays.asList(2, 7));
for (Integer badReplicaIndex : badReplicaIndexes) {
new File(replicas.get(badReplicaIndex).getReplicaPath()).setReadable(false);
}
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, null);
storageManager.start();
assertEquals("There should be no unexpected partitions reported", 0, getNumUnrecognizedPartitionsReported());
Map<String, Counter> counters = metricRegistry.getCounters();
assertEquals(0, getCounterValue(counters, DiskSpaceAllocator.class.getName(), "DiskSpaceAllocatorInitFailureCount"));
assertEquals(badReplicaIndexes.size(), getCounterValue(counters, DiskManager.class.getName(), "TotalStoreStartFailures"));
assertEquals(0, getCounterValue(counters, DiskManager.class.getName(), "DiskMountPathFailures"));
for (int i = 0; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
if (badReplicaIndexes.contains(i)) {
assertNull("This store should not be accessible.", storageManager.getStore(id, false));
assertFalse("Compaction should not be scheduled", storageManager.scheduleNextForCompaction(id));
} else {
Store store = storageManager.getStore(id, false);
assertTrue("Store should be started", store.isStarted());
assertTrue("Compaction should be scheduled", storageManager.scheduleNextForCompaction(id));
}
}
assertEquals("Compaction thread count is incorrect", dataNode.getMountPaths().size(), TestUtils.numThreadsByThisName(CompactionManager.THREAD_NAME_PREFIX));
verifyCompactionThreadCount(storageManager, dataNode.getMountPaths().size());
shutdownAndAssertStoresInaccessible(storageManager, replicas);
assertEquals("Compaction thread count is incorrect", 0, storageManager.getCompactionThreadCount());
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class StorageManagerTest method setBlobStoreStoppedStateFailureTest.
/**
* Test set stopped state of blobstore with given list of {@link PartitionId} in failure cases.
*/
@Test
public void setBlobStoreStoppedStateFailureTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, MockClusterMap.DEFAULT_PARTITION_CLASS, dataNodes, 0);
List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, null);
storageManager.start();
assertEquals("There should be 1 unexpected partition reported", 1, getNumUnrecognizedPartitionsReported());
// test set the state of store whose replicaStatusDelegate is null
ReplicaId replica = replicas.get(0);
PartitionId id = replica.getPartitionId();
storageManager.getDiskManager(id).shutdown();
List<PartitionId> failToUpdateList = storageManager.setBlobStoreStoppedState(Arrays.asList(id), true);
assertEquals("Set store stopped state should fail on given store whose replicaStatusDelegate is null", id, failToUpdateList.get(0));
// test invalid partition case (where diskManager == null)
replica = invalidPartitionReplicas.get(0);
id = replica.getPartitionId();
failToUpdateList = storageManager.setBlobStoreStoppedState(Arrays.asList(id), true);
assertEquals("Set store stopped state should fail on given invalid replica", id, failToUpdateList.get(0));
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class StorageManagerTest method removeBlobStoreTest.
/**
* Test remove blob store with given {@link PartitionId}
* @throws Exception
*/
@Test
public void removeBlobStoreTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, MockClusterMap.DEFAULT_PARTITION_CLASS, dataNodes, 0);
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, null);
storageManager.start();
// Replica[1] will be used to test removing a started store. Replica[2] will be used to test a store with compaction enabled
for (int i = 3; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.controlCompactionForBlobStore(id, false));
assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
assertTrue("Removing store should succeed", storageManager.removeBlobStore(id));
assertNull("The store should not exist", storageManager.getStore(id, false));
}
// test remove store that compaction is still enabled on it, even though it is shutdown
PartitionId id = replicas.get(2).getPartitionId();
assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
assertFalse("Removing store should fail because compaction is enabled on this store", storageManager.removeBlobStore(id));
// test remove store that is still started
id = replicas.get(1).getPartitionId();
assertFalse("Removing store should fail because store is still started", storageManager.removeBlobStore(id));
// test remove store that the disk manager is not running
id = replicas.get(0).getPartitionId();
storageManager.getDiskManager(id).shutdown();
assertFalse("Removing store should fail because disk manager is not running", storageManager.removeBlobStore(id));
// test a store that doesn't exist
assertFalse("Removing not-found store should return false", storageManager.removeBlobStore(invalidPartition));
shutdownAndAssertStoresInaccessible(storageManager, replicas);
// test that remove store when compaction executor is not instantiated
// by default, storeCompactionTriggers = "" which makes compaction executor = null during initialization
VerifiableProperties vProps = new VerifiableProperties(new Properties());
storageManager = new StorageManager(new StoreConfig(vProps), diskManagerConfig, Utils.newScheduler(1, false), metricRegistry, new MockIdFactory(), clusterMap, dataNode, new DummyMessageStoreHardDelete(), null, SystemTime.getInstance(), new DummyMessageStoreRecovery(), new InMemAccountService(false, false));
storageManager.start();
for (ReplicaId replica : replicas) {
id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.controlCompactionForBlobStore(id, false));
assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
assertTrue("Removing store should succeed", storageManager.removeBlobStore(id));
assertNull("The store should not exist", storageManager.getStore(id, false));
}
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
Aggregations