use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class StorageManagerTest method scheduleAndDisableCompactionTest.
/**
* Tests that schedule compaction and disable compaction in StorageManager
* @throws Exception
*/
@Test
public void scheduleAndDisableCompactionTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, dataNodes, 0);
List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
StorageManager storageManager = createStorageManager(replicas, metricRegistry);
storageManager.start();
// add invalid replica id
replicas.add(invalidPartitionReplicas.get(0));
for (int i = 0; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
if (i == replicas.size() - 1) {
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
assertFalse("Disable compaction should fail", storageManager.disableCompactionForBlobStore(id));
} else {
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
}
}
ReplicaId replica = replicas.get(0);
PartitionId id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.disableCompactionForBlobStore(id));
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
replica = replicas.get(1);
id = replica.getPartitionId();
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
replicas.remove(replicas.size() - 1);
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class StorageManagerTest method setBlobStoreStoppedStateFailureTest.
/**
* Test set stopped state of blobstore with given list of {@link PartitionId} in failure cases.
*/
@Test
public void setBlobStoreStoppedStateFailureTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, MockClusterMap.DEFAULT_PARTITION_CLASS, dataNodes, 0);
List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, null);
storageManager.start();
assertEquals("There should be 1 unexpected partition reported", 1, getNumUnrecognizedPartitionsReported());
// test set the state of store whose replicaStatusDelegate is null
ReplicaId replica = replicas.get(0);
PartitionId id = replica.getPartitionId();
storageManager.getDiskManager(id).shutdown();
List<PartitionId> failToUpdateList = storageManager.setBlobStoreStoppedState(Arrays.asList(id), true);
assertEquals("Set store stopped state should fail on given store whose replicaStatusDelegate is null", id, failToUpdateList.get(0));
// test invalid partition case (where diskManager == null)
replica = invalidPartitionReplicas.get(0);
id = replica.getPartitionId();
failToUpdateList = storageManager.setBlobStoreStoppedState(Arrays.asList(id), true);
assertEquals("Set store stopped state should fail on given invalid replica", id, failToUpdateList.get(0));
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class StorageManagerTest method removeBlobStoreTest.
/**
* Test remove blob store with given {@link PartitionId}
* @throws Exception
*/
@Test
public void removeBlobStoreTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, MockClusterMap.DEFAULT_PARTITION_CLASS, dataNodes, 0);
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, null);
storageManager.start();
// Replica[1] will be used to test removing a started store. Replica[2] will be used to test a store with compaction enabled
for (int i = 3; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.controlCompactionForBlobStore(id, false));
assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
assertTrue("Removing store should succeed", storageManager.removeBlobStore(id));
assertNull("The store should not exist", storageManager.getStore(id, false));
}
// test remove store that compaction is still enabled on it, even though it is shutdown
PartitionId id = replicas.get(2).getPartitionId();
assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
assertFalse("Removing store should fail because compaction is enabled on this store", storageManager.removeBlobStore(id));
// test remove store that is still started
id = replicas.get(1).getPartitionId();
assertFalse("Removing store should fail because store is still started", storageManager.removeBlobStore(id));
// test remove store that the disk manager is not running
id = replicas.get(0).getPartitionId();
storageManager.getDiskManager(id).shutdown();
assertFalse("Removing store should fail because disk manager is not running", storageManager.removeBlobStore(id));
// test a store that doesn't exist
assertFalse("Removing not-found store should return false", storageManager.removeBlobStore(invalidPartition));
shutdownAndAssertStoresInaccessible(storageManager, replicas);
// test that remove store when compaction executor is not instantiated
// by default, storeCompactionTriggers = "" which makes compaction executor = null during initialization
VerifiableProperties vProps = new VerifiableProperties(new Properties());
storageManager = new StorageManager(new StoreConfig(vProps), diskManagerConfig, Utils.newScheduler(1, false), metricRegistry, new MockIdFactory(), clusterMap, dataNode, new DummyMessageStoreHardDelete(), null, SystemTime.getInstance(), new DummyMessageStoreRecovery(), new InMemAccountService(false, false));
storageManager.start();
for (ReplicaId replica : replicas) {
id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.controlCompactionForBlobStore(id, false));
assertTrue("Shutdown should succeed on given store", storageManager.shutdownBlobStore(id));
assertTrue("Removing store should succeed", storageManager.removeBlobStore(id));
assertNull("The store should not exist", storageManager.getStore(id, false));
}
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class StorageManagerTest method addBlobStoreTest.
/**
* Test add new BlobStore with given {@link ReplicaId}.
*/
@Test
public void addBlobStoreTest() throws Exception {
generateConfigs(true, false);
MockDataNodeId localNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> localReplicas = clusterMap.getReplicaIds(localNode);
int newMountPathIndex = 3;
// add new MountPath to local node
File f = File.createTempFile("ambry", ".tmp");
File mountFile = new File(f.getParent(), "mountpathfile" + MockClusterMap.PLAIN_TEXT_PORT_START_NUMBER + newMountPathIndex);
MockClusterMap.deleteFileOrDirectory(mountFile);
assertTrue("Couldn't create mount path directory", mountFile.mkdir());
localNode.addMountPaths(Collections.singletonList(mountFile.getAbsolutePath()));
PartitionId newPartition1 = new MockPartitionId(10L, MockClusterMap.DEFAULT_PARTITION_CLASS, clusterMap.getDataNodes(), newMountPathIndex);
StorageManager storageManager = createStorageManager(localNode, metricRegistry, null);
storageManager.start();
// test add store that already exists, which should fail
assertFalse("Add store which is already existing should fail", storageManager.addBlobStore(localReplicas.get(0)));
// test add store onto a new disk, which should succeed
assertTrue("Add new store should succeed", storageManager.addBlobStore(newPartition1.getReplicaIds().get(0)));
assertNotNull("The store shouldn't be null because new store is successfully added", storageManager.getStore(newPartition1, false));
// test add store whose diskManager is not running, which should fail
PartitionId newPartition2 = new MockPartitionId(11L, MockClusterMap.DEFAULT_PARTITION_CLASS, clusterMap.getDataNodes(), 0);
storageManager.getDiskManager(localReplicas.get(0).getPartitionId()).shutdown();
assertFalse("Add store onto the DiskManager which is not running should fail", storageManager.addBlobStore(newPartition2.getReplicaIds().get(0)));
storageManager.getDiskManager(localReplicas.get(0).getPartitionId()).start();
// test replica addition can correctly handle existing dir (should delete it and create a new one)
// To verify the directory has been recreated, we purposely put a test file in previous dir.
PartitionId newPartition3 = new MockPartitionId(12L, MockClusterMap.DEFAULT_PARTITION_CLASS, clusterMap.getDataNodes(), 0);
ReplicaId replicaToAdd = newPartition3.getReplicaIds().get(0);
File previousDir = new File(replicaToAdd.getReplicaPath());
File testFile = new File(previousDir, "testFile");
MockClusterMap.deleteFileOrDirectory(previousDir);
assertTrue("Cannot create dir for " + replicaToAdd.getReplicaPath(), previousDir.mkdir());
assertTrue("Cannot create test file within previous dir", testFile.createNewFile());
assertTrue("Adding new store should succeed", storageManager.addBlobStore(replicaToAdd));
assertFalse("Test file should not exist", testFile.exists());
assertNotNull("Store associated new added replica should not be null", storageManager.getStore(newPartition3, false));
shutdownAndAssertStoresInaccessible(storageManager, localReplicas);
// test add store but fail to add segment requirements to DiskSpaceAllocator. (This is simulated by inducing
// addRequiredSegments failure to make store inaccessible)
List<String> mountPaths = localNode.getMountPaths();
String diskToFail = mountPaths.get(0);
File reservePoolDir = new File(diskToFail, diskManagerConfig.diskManagerReserveFileDirName);
File storeReserveDir = new File(reservePoolDir, DiskSpaceAllocator.STORE_DIR_PREFIX + newPartition2.toPathString());
StorageManager storageManager2 = createStorageManager(localNode, new MetricRegistry(), null);
storageManager2.start();
Utils.deleteFileOrDirectory(storeReserveDir);
assertTrue("File creation should succeed", storeReserveDir.createNewFile());
assertFalse("Add store should fail if store couldn't start due to initializePool failure", storageManager2.addBlobStore(newPartition2.getReplicaIds().get(0)));
assertNull("New store shouldn't be in in-memory data structure", storageManager2.getStore(newPartition2, false));
shutdownAndAssertStoresInaccessible(storageManager2, localReplicas);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureContainerCompactorIntegrationTest method testCompactAssignedDeprecatedContainers.
@Test
public void testCompactAssignedDeprecatedContainers() throws CloudStorageException, DocumentClientException {
// Create a deprecated container.
Set<Container> containers = generateContainers(1);
cloudDestination.deprecateContainers(containers);
verifyCosmosData(containers);
verifyCheckpoint(containers);
Container testContainer = containers.iterator().next();
// Create blobs in the deprecated container and test partition.
int numBlobs = 100;
PartitionId partitionId = new MockPartitionId(testPartitionId, MockClusterMap.DEFAULT_PARTITION_CLASS);
long creationTime = System.currentTimeMillis();
Map<BlobId, byte[]> blobIdtoDataMap = createUnencryptedPermanentBlobs(numBlobs, dataCenterId, testContainer.getParentAccountId(), testContainer.getId(), partitionId, blobSize, cloudRequestAgent, cloudDestination, creationTime);
// Assert that blobs exist.
Map<String, CloudBlobMetadata> metadataMap = getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination);
assertEquals("Unexpected size of returned metadata map", numBlobs, metadataMap.size());
// compact the deprecated container.
cloudDestination.getContainerCompactor().compactAssignedDeprecatedContainers(Collections.singletonList(partitionId));
// Assert that deprecated container's blobs don't exist anymore.
assertTrue("Expected empty set after container compaction", getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination).isEmpty());
cleanup();
}
Aggregations