use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class StorageManagerTest method updateInstanceConfigFailureTest.
/**
* Test failure cases when updating InstanceConfig in Helix for both Offline-To-Bootstrap and Inactive-To-Offline.
*/
@Test
public void updateInstanceConfigFailureTest() throws Exception {
generateConfigs(true, true);
MockDataNodeId localNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> localReplicas = clusterMap.getReplicaIds(localNode);
MockClusterParticipant mockHelixParticipant = new MockClusterParticipant();
StorageManager storageManager = createStorageManager(localNode, metricRegistry, Collections.singletonList(mockHelixParticipant));
storageManager.start();
// create a new partition and get its replica on local node
PartitionId newPartition = clusterMap.createNewPartition(Collections.singletonList(localNode));
// override return value of updateDataNodeInfoInCluster() to mock update InstanceConfig failure
mockHelixParticipant.updateNodeInfoReturnVal = false;
try {
mockHelixParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString());
fail("should fail because updating InstanceConfig didn't succeed during Offline-To-Bootstrap");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", StateTransitionException.TransitionErrorCode.HelixUpdateFailure, e.getErrorCode());
}
try {
mockHelixParticipant.onPartitionBecomeOfflineFromInactive(localReplicas.get(0).getPartitionId().toPathString());
fail("should fail because updating InstanceConfig didn't succeed during Inactive-To-Offline");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", StateTransitionException.TransitionErrorCode.HelixUpdateFailure, e.getErrorCode());
}
mockHelixParticipant.updateNodeInfoReturnVal = null;
// mock InstanceConfig not found error (note that MockHelixAdmin is empty by default, so no InstanceConfig is present)
newPartition = clusterMap.createNewPartition(Collections.singletonList(localNode));
try {
mockHelixParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString());
fail("should fail because InstanceConfig is not found during Offline-To-Bootstrap");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", StateTransitionException.TransitionErrorCode.HelixUpdateFailure, e.getErrorCode());
}
try {
mockHelixParticipant.onPartitionBecomeOfflineFromInactive(localReplicas.get(1).getPartitionId().toPathString());
fail("should fail because InstanceConfig is not found during Inactive-To-Offline");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", StateTransitionException.TransitionErrorCode.HelixUpdateFailure, e.getErrorCode());
}
shutdownAndAssertStoresInaccessible(storageManager, localReplicas);
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class StorageManagerTest method addBlobStoreTest.
/**
* Test add new BlobStore with given {@link ReplicaId}.
*/
@Test
public void addBlobStoreTest() throws Exception {
generateConfigs(true, false);
MockDataNodeId localNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> localReplicas = clusterMap.getReplicaIds(localNode);
int newMountPathIndex = 3;
// add new MountPath to local node
File f = File.createTempFile("ambry", ".tmp");
File mountFile = new File(f.getParent(), "mountpathfile" + MockClusterMap.PLAIN_TEXT_PORT_START_NUMBER + newMountPathIndex);
MockClusterMap.deleteFileOrDirectory(mountFile);
assertTrue("Couldn't create mount path directory", mountFile.mkdir());
localNode.addMountPaths(Collections.singletonList(mountFile.getAbsolutePath()));
PartitionId newPartition1 = new MockPartitionId(10L, MockClusterMap.DEFAULT_PARTITION_CLASS, clusterMap.getDataNodes(), newMountPathIndex);
StorageManager storageManager = createStorageManager(localNode, metricRegistry, null);
storageManager.start();
// test add store that already exists, which should fail
assertFalse("Add store which is already existing should fail", storageManager.addBlobStore(localReplicas.get(0)));
// test add store onto a new disk, which should succeed
assertTrue("Add new store should succeed", storageManager.addBlobStore(newPartition1.getReplicaIds().get(0)));
assertNotNull("The store shouldn't be null because new store is successfully added", storageManager.getStore(newPartition1, false));
// test add store whose diskManager is not running, which should fail
PartitionId newPartition2 = new MockPartitionId(11L, MockClusterMap.DEFAULT_PARTITION_CLASS, clusterMap.getDataNodes(), 0);
storageManager.getDiskManager(localReplicas.get(0).getPartitionId()).shutdown();
assertFalse("Add store onto the DiskManager which is not running should fail", storageManager.addBlobStore(newPartition2.getReplicaIds().get(0)));
storageManager.getDiskManager(localReplicas.get(0).getPartitionId()).start();
// test replica addition can correctly handle existing dir (should delete it and create a new one)
// To verify the directory has been recreated, we purposely put a test file in previous dir.
PartitionId newPartition3 = new MockPartitionId(12L, MockClusterMap.DEFAULT_PARTITION_CLASS, clusterMap.getDataNodes(), 0);
ReplicaId replicaToAdd = newPartition3.getReplicaIds().get(0);
File previousDir = new File(replicaToAdd.getReplicaPath());
File testFile = new File(previousDir, "testFile");
MockClusterMap.deleteFileOrDirectory(previousDir);
assertTrue("Cannot create dir for " + replicaToAdd.getReplicaPath(), previousDir.mkdir());
assertTrue("Cannot create test file within previous dir", testFile.createNewFile());
assertTrue("Adding new store should succeed", storageManager.addBlobStore(replicaToAdd));
assertFalse("Test file should not exist", testFile.exists());
assertNotNull("Store associated new added replica should not be null", storageManager.getStore(newPartition3, false));
shutdownAndAssertStoresInaccessible(storageManager, localReplicas);
// test add store but fail to add segment requirements to DiskSpaceAllocator. (This is simulated by inducing
// addRequiredSegments failure to make store inaccessible)
List<String> mountPaths = localNode.getMountPaths();
String diskToFail = mountPaths.get(0);
File reservePoolDir = new File(diskToFail, diskManagerConfig.diskManagerReserveFileDirName);
File storeReserveDir = new File(reservePoolDir, DiskSpaceAllocator.STORE_DIR_PREFIX + newPartition2.toPathString());
StorageManager storageManager2 = createStorageManager(localNode, new MetricRegistry(), null);
storageManager2.start();
Utils.deleteFileOrDirectory(storeReserveDir);
assertTrue("File creation should succeed", storeReserveDir.createNewFile());
assertFalse("Add store should fail if store couldn't start due to initializePool failure", storageManager2.addBlobStore(newPartition2.getReplicaIds().get(0)));
assertNull("New store shouldn't be in in-memory data structure", storageManager2.getStore(newPartition2, false));
shutdownAndAssertStoresInaccessible(storageManager2, localReplicas);
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class CloudStorageManagerTest method addStartAndRemoveBlobStoreTest.
/**
* Test {@code CloudStorageManager#addBlobStore}, {@code CloudStorageManager#startBlobStore}, {@code CloudStorageManager#removeBlobStore}
* @throws IOException
*/
@Test
public void addStartAndRemoveBlobStoreTest() throws IOException {
CloudStorageManager cloudStorageManager = createNewCloudStorageManager();
ReplicaId mockReplicaId = clusterMap.getReplicaIds(clusterMap.getDataNodeIds().get(0)).get(0);
PartitionId partitionId = mockReplicaId.getPartitionId();
// start store for Partitionid not added to the store
Assert.assertFalse(cloudStorageManager.startBlobStore(partitionId));
// remove store for Partitionid not added to the store
Assert.assertFalse(cloudStorageManager.removeBlobStore(partitionId));
// add a replica to the store
Assert.assertTrue(cloudStorageManager.addBlobStore(mockReplicaId));
// add an already added replica to the store
Assert.assertTrue(cloudStorageManager.addBlobStore(mockReplicaId));
// try start for the added paritition
Assert.assertTrue(cloudStorageManager.startBlobStore(partitionId));
// try remove for an added partition
Assert.assertTrue(cloudStorageManager.removeBlobStore(partitionId));
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class BlobStoreTest method storeStartupTests.
/**
* Tests {@link BlobStore#start()} for corner cases and error cases.
* Corner cases
* 1. Creating a directory on first startup
* Error cases
* 1. Start an already started store.
* 2. Unable to create store directory on first startup.
* 3. Starting two stores at the same path.
* 4. Directory not readable.
* 5. Path is not a directory.
* @throws IOException
* @throws StoreException
*/
@Test
public void storeStartupTests() throws IOException, StoreException {
// attempt to start when store is already started fails
verifyStartupFailure(store, StoreErrorCodes.Store_Already_Started);
String nonExistentDir = new File(tempDir, TestUtils.getRandomString(10)).getAbsolutePath();
// fail if attempt to create directory fails
String badPath = new File(nonExistentDir, TestUtils.getRandomString(10)).getAbsolutePath();
BlobStore blobStore = createBlobStore(getMockReplicaId(badPath));
verifyStartupFailure(blobStore, StoreErrorCodes.Initialization_Error);
ReplicaId replicaIdWithNonExistentDir = getMockReplicaId(nonExistentDir);
// create directory if it does not exist
blobStore = createBlobStore(replicaIdWithNonExistentDir);
verifyStartupSuccess(blobStore);
File createdDir = new File(nonExistentDir);
assertTrue("Directory should now exist", createdDir.exists() && createdDir.isDirectory());
// should not be able to start two stores at the same path
blobStore = createBlobStore(replicaIdWithNonExistentDir);
blobStore.start();
BlobStore secondStore = createBlobStore(replicaIdWithNonExistentDir);
verifyStartupFailure(secondStore, StoreErrorCodes.Initialization_Error);
blobStore.shutdown();
// fail if directory is not readable
assertTrue("Could not set readable state to false", createdDir.setReadable(false));
verifyStartupFailure(blobStore, StoreErrorCodes.Initialization_Error);
assertTrue("Could not set readable state to true", createdDir.setReadable(true));
assertTrue("Directory could not be deleted", StoreTestUtils.cleanDirectory(createdDir, true));
// fail if provided path is not a directory
File file = new File(tempDir, TestUtils.getRandomString(10));
assertTrue("Test file could not be created", file.createNewFile());
file.deleteOnExit();
blobStore = createBlobStore(getMockReplicaId(file.getAbsolutePath()));
verifyStartupFailure(blobStore, StoreErrorCodes.Initialization_Error);
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class MockSelector method testCloseWithDanglingRequest.
/**
* Test when close the router, the dangling requests will be correctly released.
*/
@Test
public void testCloseWithDanglingRequest() throws Exception {
Properties props = new Properties();
props.setProperty(NetworkConfig.NETWORK_CLIENT_ENABLE_CONNECTION_REPLENISHMENT, "true");
VerifiableProperties vprops = new VerifiableProperties(props);
NetworkConfig networkConfig = new NetworkConfig(vprops);
MockSelector mockSelector = new MockSelector(networkConfig);
NetworkMetrics localNetworkMetrics = new NetworkMetrics(new MetricRegistry());
SocketNetworkClient localNetworkClient = new SocketNetworkClient(mockSelector, networkConfig, localNetworkMetrics, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, time);
DataNodeId dataNodeId = localPlainTextDataNodes.get(0);
ReplicaId replicaId = sslDisabledClusterMap.getReplicaIds(dataNodeId).get(0);
List<RequestInfo> requestInfoList = new ArrayList<>();
List<ResponseInfo> responseInfoList;
requestInfoList.add(new RequestInfo(dataNodeId.getHostname(), dataNodeId.getPortToConnectTo(), new MockSend(1), replicaId, null));
requestInfoList.add(new RequestInfo(dataNodeId.getHostname(), dataNodeId.getPortToConnectTo(), new MockSend(2), replicaId, null));
// This call would create two connection and not send any requests out
localNetworkClient.sendAndPoll(requestInfoList, Collections.EMPTY_SET, POLL_TIMEOUT_MS);
requestInfoList.clear();
requestInfoList.add(new RequestInfo(dataNodeId.getHostname(), dataNodeId.getPortToConnectTo(), new MockSend(3), replicaId, null));
requestInfoList.add(new RequestInfo(dataNodeId.getHostname(), dataNodeId.getPortToConnectTo(), new MockSend(4), replicaId, null));
mockSelector.setState(MockSelectorState.IdlePoll);
// This call would send first two requests out. At the same time, keep last two requests in the pendingRequests queue.
localNetworkClient.sendAndPoll(requestInfoList, Collections.EMPTY_SET, POLL_TIMEOUT_MS);
localNetworkClient.close();
}
Aggregations