use of com.github.ambry.network.Port in project ambry by linkedin.
the class UndeleteOperationTrackerTest method successWithSufficientEligibleHostsTest.
/**
* Tests when not all hosts are eligible not enough hosts are.
*/
@Test
public void successWithSufficientEligibleHostsTest() {
assumeTrue(replicasStateEnabled);
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"), new MockDataNodeId(portList, mountPaths, "dc-2"), new MockDataNodeId(portList, mountPaths, "dc-3")));
mockPartition = new MockPartitionId();
populateReplicaList(8, ReplicaState.STANDBY);
populateReplicaList(4, ReplicaState.INACTIVE);
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
UndeleteOperationTracker tracker = getOperationTracker(2);
// Now we need to set all the response to be success
for (int i = 0; i < 4; i++) {
sendRequests(tracker, 2);
assertFalse("Operation should not have failed", tracker.hasFailed());
assertFalse("Operation should not have succeeded", tracker.hasSucceeded());
assertFalse("Operation should not be done", tracker.isDone());
for (int j = 0; j < 2; j++) {
tracker.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
}
}
assertFalse("Operation should not have failed", tracker.hasFailed());
assertTrue("Operation should have succeeded", tracker.hasSucceeded());
assertTrue("Operation should be done", tracker.isDone());
}
use of com.github.ambry.network.Port in project ambry by linkedin.
the class UndeleteOperationTrackerTest method initialize.
/**
* Initialize 4 DCs, each DC has 1 data node, which has 3 replicas.
*/
private void initialize() {
int replicaCount = 12;
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"), new MockDataNodeId(portList, mountPaths, "dc-2"), new MockDataNodeId(portList, mountPaths, "dc-3")));
mockPartition = new MockPartitionId();
populateReplicaList(replicaCount, ReplicaState.STANDBY);
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
}
use of com.github.ambry.network.Port in project ambry by linkedin.
the class StatsManagerTest method testAddAndRemoveReplica.
/**
* Test to verify the {@link StatsManager} behaves correctly when dynamically adding/removing {@link ReplicaId}.
* @throws Exception
*/
@Test
public void testAddAndRemoveReplica() throws Exception {
// setup testing environment
Map<PartitionId, Store> testStoreMap = new HashMap<>();
List<ReplicaId> testReplicas = new ArrayList<>();
DataNodeId dataNodeId = new MockDataNodeId(Collections.singletonList(new Port(6667, PortType.PLAINTEXT)), Collections.singletonList("/tmp"), "DC1");
for (int i = 0; i < 3; i++) {
PartitionId partitionId = new MockPartitionId(i, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
testStoreMap.put(partitionId, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(i), false)));
testReplicas.add(partitionId.getReplicaIds().get(0));
}
StorageManager mockStorageManager = new MockStorageManager(testStoreMap, dataNodeId);
StatsManager testStatsManager = new StatsManager(mockStorageManager, testReplicas, new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
// verify that adding an existing store to StatsManager should fail
assertFalse("Adding a store which already exists should fail", testStatsManager.addReplica(testReplicas.get(0)));
PartitionId partitionId3 = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
testStoreMap.put(partitionId3, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
// verify that partitionId3 is not in stats report before adding to statsManager
StatsManager.AccountStatsPublisher publisher = testStatsManager.new AccountStatsPublisher(accountStatsStore);
publisher.run();
HostAccountStorageStatsWrapper statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertFalse("Partition3 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
// verify that after adding into statsManager, PartitionId3 is in stats report
testStatsManager.addReplica(partitionId3.getReplicaIds().get(0));
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertTrue("Partition3 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
// verify that after removing PartitionId0 (corresponding to the first replica in replicas list), PartitionId0 is not in the stats report
PartitionId partitionId0 = testReplicas.get(0).getPartitionId();
assertTrue("Partition0 should present in stats report before removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
testStoreMap.remove(testReplicas.get(0).getPartitionId());
testStatsManager.removeReplica(testReplicas.get(0));
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertFalse("Partition0 should not present in stats report after removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
// verify that removing the PartitionId0 should fail because it no longer exists in StatsManager
assertFalse(testStatsManager.removeReplica(testReplicas.get(0)));
// concurrent remove test
CountDownLatch getStatsCountdown1 = new CountDownLatch(1);
CountDownLatch waitRemoveCountdown = new CountDownLatch(1);
((MockStorageManager) mockStorageManager).waitOperationCountdown = waitRemoveCountdown;
((MockStorageManager) mockStorageManager).firstCall = true;
((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
for (Store store : testStoreMap.values()) {
((MockStore) store).getStatsCountdown = getStatsCountdown1;
((MockStore) store).isCollected = false;
}
List<PartitionId> partitionRemoved = new ArrayList<>();
Utils.newThread(() -> {
// wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
try {
getStatsCountdown1.await();
} catch (InterruptedException e) {
throw new IllegalStateException("CountDown await was interrupted", e);
}
// find one store which hasn't been collected
ReplicaId replicaToRemove = null;
for (Map.Entry<PartitionId, Store> partitionToStore : testStoreMap.entrySet()) {
MockStore store = (MockStore) partitionToStore.getValue();
if (!store.isCollected) {
replicaToRemove = partitionToStore.getKey().getReplicaIds().get(0);
break;
}
}
if (replicaToRemove != null) {
testStatsManager.removeReplica(replicaToRemove);
testStoreMap.remove(replicaToRemove.getPartitionId());
partitionRemoved.add(replicaToRemove.getPartitionId());
// count down to allow stats aggregation to proceed
waitRemoveCountdown.countDown();
}
}, false).start();
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
// verify that the removed store is indeed unreachable during stats aggregation
assertTrue("The removed partition should be unreachable during aggregation", ((MockStorageManager) mockStorageManager).unreachablePartitions.contains(partitionRemoved.get(0)));
// verify unreachable store list doesn't contain the store which is removed.
List<String> unreachableStores = statsWrapper.getHeader().getUnreachableStores();
assertFalse("The removed partition should not present in unreachable list", unreachableStores.contains(partitionRemoved.get(0).toPathString()));
// concurrent add test
CountDownLatch getStatsCountdown2 = new CountDownLatch(1);
CountDownLatch waitAddCountdown = new CountDownLatch(1);
((MockStorageManager) mockStorageManager).waitOperationCountdown = waitAddCountdown;
((MockStorageManager) mockStorageManager).firstCall = true;
((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
for (Store store : testStoreMap.values()) {
((MockStore) store).getStatsCountdown = getStatsCountdown2;
((MockStore) store).isCollected = false;
}
PartitionId partitionId4 = new MockPartitionId(4, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
Utils.newThread(() -> {
// wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
try {
getStatsCountdown2.await();
} catch (InterruptedException e) {
throw new IllegalStateException("CountDown await was interrupted", e);
}
testStatsManager.addReplica(partitionId4.getReplicaIds().get(0));
testStoreMap.put(partitionId4, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
// count down to allow stats aggregation to proceed
waitAddCountdown.countDown();
}, false).start();
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
// verify that new added PartitionId4 is not in report for this round of aggregation
assertFalse("Partition4 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
// verify that new added PartitionId4 will be collected for next round of aggregation
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertTrue("Partition4 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
}
use of com.github.ambry.network.Port in project ambry by linkedin.
the class VcrRecoveryTest method setup.
/**
* Create a cluster with a vcr node and a recovery (ambry data) node.
* @throws Exception on {@link Exception}
*/
@Before
public void setup() throws Exception {
String vcrMountPath = ClusterMapSnapshotConstants.CLOUD_REPLICA_MOUNT + "/1";
recoveryProperties = new Properties();
recoveryProperties.setProperty("replication.metadata.request.version", "2");
// create vcr node
List<Port> vcrPortList = new ArrayList<>(2);
Port vcrClusterMapPort = new Port(12310, PortType.PLAINTEXT);
Port vcrSslPort = new Port(12410, PortType.SSL);
vcrPortList.add(vcrClusterMapPort);
vcrPortList.add(vcrSslPort);
MockDataNodeId vcrNode = new MockDataNodeId("localhost", vcrPortList, Collections.singletonList(vcrMountPath), dcName);
// create recovery node
recoveryNodePort = new Port(12311, PortType.PLAINTEXT);
ArrayList<Port> recoveryPortList = new ArrayList<>(2);
recoveryPortList.add(recoveryNodePort);
recoveryNode = MockClusterMap.createDataNode(recoveryPortList, dcName, 1);
// create cluster for recovery
recoveryCluster = MockCluster.createOneNodeRecoveryCluster(vcrNode, recoveryNode, dcName);
partitionId = recoveryCluster.getClusterMap().getWritablePartitionIds(null).get(0);
// Start ZK Server and Helix Controller.
if (!zkInfo.isZkServerStarted()) {
zkInfo.startZkServer();
}
helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, recoveryCluster.getClusterMap());
Properties vcrProperties = VcrTestUtil.createVcrProperties(vcrNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, null);
vcrProperties.putAll(recoveryProperties);
NotificationSystem notificationSystem = new MockNotificationSystem(recoveryCluster.getClusterMap());
// Create blobs and data for upload to vcr.
int blobCount = 10;
blobIds = ServerTestUtil.createBlobIds(blobCount, recoveryCluster.getClusterMap(), accountId, containerId, partitionId);
// Create cloud destination and start vcr server.
latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, recoveryCluster.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(vcrProperties), recoveryCluster.getClusterAgentsFactory(), notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// start ambry server with data node
recoveryCluster.initializeServers(notificationSystem, vcrNode, recoveryProperties);
recoveryCluster.startServers();
}
use of com.github.ambry.network.Port in project ambry by linkedin.
the class VcrBackupTest method sendBlobToDataNode.
/**
* Send blobs to given dataNode.
* @param dataNode the target node.
* @param blobCount number of blobs to send.
* @return list of blobs successfully sent.
*/
private List<BlobId> sendBlobToDataNode(DataNodeId dataNode, int blobCount) throws Exception {
int userMetaDataSize = 10;
// Send blobs to DataNode
byte[] userMetadata = new byte[userMetaDataSize];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", null, null, false, -1, accountId, containerId, false, null, null, null);
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
Port port = new Port(dataNode.getPort(), PortType.PLAINTEXT);
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(port, "localhost", null, null);
channel.connect();
CountDownLatch latch = new CountDownLatch(1);
DirectSender runnable = new DirectSender(mockCluster, channel, blobCount, data, userMetadata, properties, null, latch);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
List<BlobId> blobIds = runnable.getBlobIds();
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
}
return blobIds;
}
Aggregations