use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method replicaFromLeaderToStandbyTest.
/**
* Test state transition in replication manager from LEADER to STANDBY
* Test setup: When creating partitions, make sure that there is exactly one replica in LEADER STATE on each data center
* Test condition: When a partition on the current node moves from leader to standby, verify that in-memory map storing
* partition to peer leader replicas is updated correctly
* @throws Exception
*/
@Test
public void replicaFromLeaderToStandbyTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
MockHelixParticipant.metricRegistry = new MetricRegistry();
MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
ReplicationConfig initialReplicationConfig = replicationConfig;
properties.setProperty("replication.model.across.datacenters", "LEADER_BASED");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
StorageManager storageManager = managers.getFirst();
MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
PartitionId existingPartition = replicationManager.partitionToPartitionInfo.keySet().iterator().next();
mockHelixParticipant.onPartitionBecomeLeaderFromStandby(existingPartition.toPathString());
Map<String, Set<ReplicaId>> peerLeaderReplicasByPartition = replicationManager.leaderBasedReplicationAdmin.getLeaderPartitionToPeerLeaderReplicas();
assertTrue("Partition is not present in the map of partition to peer leader replicas after it moved from standby to leader", peerLeaderReplicasByPartition.containsKey(existingPartition.toPathString()));
mockHelixParticipant.onPartitionBecomeStandbyFromLeader(existingPartition.toPathString());
assertFalse("Partition is still present in the map of partition to peer leader replicas after it moved from leader to standby", peerLeaderReplicasByPartition.containsKey(existingPartition.toPathString()));
storageManager.shutdown();
replicationConfig = initialReplicationConfig;
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method onReplicaAddedOrRemovedCallbackTest.
/**
* Test cluster map change callback in {@link ReplicationManager} when any remote replicas are added or removed.
* Test setup: attempt to add 3 replicas and remove 3 replicas respectively. The three replicas are picked as follows:
* (1) 1st replica on current node (should skip)
* (2) 2nd replica on remote node sharing partition with current one (should be added or removed)
* (3) 3rd replica on remote node but doesn't share partition with current one (should skip)
* @throws Exception
*/
@Test
public void onReplicaAddedOrRemovedCallbackTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
// pick a node with no special partition as current node
Set<DataNodeId> specialPartitionNodes = clusterMap.getSpecialPartition().getReplicaIds().stream().map(ReplicaId::getDataNodeId).collect(Collectors.toSet());
DataNodeId currentNode = clusterMap.getDataNodes().stream().filter(d -> !specialPartitionNodes.contains(d)).findFirst().get();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, currentNode, null, null, new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, currentNode, storeKeyConverterFactory, null);
ClusterMapChangeListener clusterMapChangeListener = clusterMap.getClusterMapChangeListener();
// find the special partition (not on current node) and get an irrelevant replica from it
PartitionId absentPartition = clusterMap.getSpecialPartition();
ReplicaId irrelevantReplica = absentPartition.getReplicaIds().get(0);
// find an existing replica on current node and one of its peer replicas on remote node
ReplicaId existingReplica = clusterMap.getReplicaIds(currentNode).get(0);
ReplicaId peerReplicaToRemove = existingReplica.getPartitionId().getReplicaIds().stream().filter(r -> r != existingReplica).findFirst().get();
// create a new node and place a peer of existing replica on it.
MockDataNodeId remoteNode = createDataNode(getListOfPorts(PLAIN_TEXT_PORT_START_NUMBER + 10, SSL_PORT_START_NUMBER + 10, HTTP2_PORT_START_NUMBER + 10), clusterMap.getDatacenterName((byte) 0), 3);
ReplicaId addedReplica = new MockReplicaId(remoteNode.getPort(), (MockPartitionId) existingReplica.getPartitionId(), remoteNode, 0);
// populate added replica and removed replica lists
List<ReplicaId> replicasToAdd = new ArrayList<>(Arrays.asList(existingReplica, addedReplica, irrelevantReplica));
List<ReplicaId> replicasToRemove = new ArrayList<>(Arrays.asList(existingReplica, peerReplicaToRemove, irrelevantReplica));
PartitionInfo partitionInfo = replicationManager.getPartitionToPartitionInfoMap().get(existingReplica.getPartitionId());
assertNotNull("PartitionInfo is not found", partitionInfo);
RemoteReplicaInfo peerReplicaInfo = partitionInfo.getRemoteReplicaInfos().stream().filter(info -> info.getReplicaId() == peerReplicaToRemove).findFirst().get();
// get the replica-thread for this peer replica
ReplicaThread peerReplicaThread = peerReplicaInfo.getReplicaThread();
// Test Case 1: replication manager encountered exception during startup (remote replica addition/removal will be skipped)
replicationManager.startWithException();
clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
// verify that PartitionInfo stays unchanged
verifyRemoteReplicaInfo(partitionInfo, addedReplica, false);
verifyRemoteReplicaInfo(partitionInfo, peerReplicaToRemove, true);
// Test Case 2: startup latch is interrupted
CountDownLatch initialLatch = replicationManager.startupLatch;
CountDownLatch mockLatch = Mockito.mock(CountDownLatch.class);
doThrow(new InterruptedException()).when(mockLatch).await();
replicationManager.startupLatch = mockLatch;
try {
clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
fail("should fail because startup latch is interrupted");
} catch (IllegalStateException e) {
// expected
}
replicationManager.startupLatch = initialLatch;
// Test Case 3: replication manager is successfully started
replicationManager.start();
clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
// verify that PartitionInfo has latest remote replica infos
verifyRemoteReplicaInfo(partitionInfo, addedReplica, true);
verifyRemoteReplicaInfo(partitionInfo, peerReplicaToRemove, false);
verifyRemoteReplicaInfo(partitionInfo, irrelevantReplica, false);
// verify new added replica is assigned to a certain thread
ReplicaThread replicaThread = replicationManager.getDataNodeIdToReplicaThreadMap().get(addedReplica.getDataNodeId());
assertNotNull("There is no ReplicaThread assocated with new replica", replicaThread);
Optional<RemoteReplicaInfo> findResult = replicaThread.getRemoteReplicaInfos().get(remoteNode).stream().filter(info -> info.getReplicaId() == addedReplica).findAny();
assertTrue("New added remote replica info should exist in corresponding thread", findResult.isPresent());
// verify the removed replica info's thread is null
assertNull("Thread in removed replica info should be null", peerReplicaInfo.getReplicaThread());
findResult = peerReplicaThread.getRemoteReplicaInfos().get(peerReplicaToRemove.getDataNodeId()).stream().filter(info -> info.getReplicaId() == peerReplicaToRemove).findAny();
assertFalse("Previous replica thread should not contain RemoteReplicaInfo that is already removed", findResult.isPresent());
storageManager.shutdown();
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTestHelper method lifeVersionLocalGreaterThanRemote_Delete.
/**
* Helepr function to test when the local lifeVersion is greater than the remote lifeVersion.
* @param localTtlUpdated
* @param remoteTtlUpdated
* @throws Exception
*/
protected void lifeVersionLocalGreaterThanRemote_Delete(boolean localTtlUpdated, boolean remoteTtlUpdated) throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
// add 1 messages to remote host with lifeVersion being 0 and add it local host with lifeVersion being 1.
StoreKey toDeleteId = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1).get(0);
if (remoteTtlUpdated) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toDeleteId, Arrays.asList(remoteHost), UPDATED_EXPIRY_TIME_MS);
}
addDeleteMessagesToReplicasOfPartition(partitionId, toDeleteId, Collections.singletonList(remoteHost));
BlobId blobId = (BlobId) toDeleteId;
short accountId = blobId.getAccountId();
short containerId = blobId.getContainerId();
short lifeVersion = 1;
// first put message has encryption turned on
boolean toEncrypt = true;
// create a put message with lifeVersion bigger than 0
PutMsgInfoAndBuffer msgInfoAndBuffer = createPutMessage(toDeleteId, accountId, containerId, toEncrypt, lifeVersion);
localHost.addMessage(partitionId, new MessageInfo(toDeleteId, msgInfoAndBuffer.byteBuffer.remaining(), false, false, false, Utils.Infinite_Time, null, accountId, containerId, msgInfoAndBuffer.messageInfo.getOperationTimeMs(), lifeVersion), msgInfoAndBuffer.byteBuffer);
if (localTtlUpdated) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toDeleteId, Collections.singletonList(localHost), EXPIRY_TIME_MS, lifeVersion);
}
// ensure that the first key is not deleted in the local host
assertNull(toDeleteId + " should not be deleted in the local host", getMessageInfo(toDeleteId, localHost.infosByPartition.get(partitionId), true, false, false));
}
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, null, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// Do the replica metadata exchange.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
// we don't have any missing key here.
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
PartitionId partitionId = partitionIds.get(i);
StoreKey key = localHost.infosByPartition.get(partitionId).get(0).getStoreKey();
assertNull(key + " should not be deleted in the local host", getMessageInfo(key, localHost.infosByPartition.get(partitionId), true, false, false));
if (!localTtlUpdated) {
assertNull(key + " should not be ttlUpdated in the local host", getMessageInfo(key, localHost.infosByPartition.get(partitionId), false, false, true));
}
}
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class AsyncRequestResponseHandlerFactoryTest method getFactoryTestWithBadInputTest.
/**
* Tests instantiation of {@link AsyncRequestResponseHandlerFactory} with bad input.
*/
@Test
public void getFactoryTestWithBadInputTest() throws IOException {
VerifiableProperties verifiableProperties = new VerifiableProperties(new Properties());
Router router = new InMemoryRouter(verifiableProperties, new MockClusterMap());
RestRequestService restRequestService = new MockRestRequestService(verifiableProperties, router);
// handlerCount = 0
try {
new AsyncRequestResponseHandlerFactory(0, new MetricRegistry(), restRequestService);
fail("Instantiation should have failed because response handler count is 0");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
// handlerCount < 0
try {
new AsyncRequestResponseHandlerFactory(-1, new MetricRegistry(), restRequestService);
fail("Instantiation should have failed because response handler count is less than 0");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
// MetricRegistry null.
try {
new AsyncRequestResponseHandlerFactory(1, null, restRequestService);
fail("Instantiation should have failed because one of the arguments was null");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
// handlerCount = 0
try {
new AsyncRequestResponseHandlerFactory(0, new MetricRegistry(), restRequestService);
fail("Instantiation should have failed because request handler count is 0");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
// handlerCount < 0
try {
new AsyncRequestResponseHandlerFactory(-1, new MetricRegistry(), restRequestService);
fail("Instantiation should have failed because request handler count is less than 0");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
// MetricRegistry null.
try {
new AsyncRequestResponseHandlerFactory(1, null, restRequestService);
fail("Instantiation should have failed because one of the arguments was null");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
// RestRequestService null.
try {
new AsyncRequestResponseHandlerFactory(1, new MetricRegistry(), null);
fail("Instantiation should have failed because one of the arguments was null");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class RestServerTest method shutdownWithoutStartTest.
/**
* Tests for {@link RestServer#shutdown()} when {@link RestServer#start()} had not been called previously. This test
* is for cases where {@link RestServer#start()} has failed and {@link RestServer#shutdown()} needs to be run.
* @throws Exception
*/
@Test
public void shutdownWithoutStartTest() throws Exception {
Properties properties = new Properties();
VerifiableProperties verifiableProperties = getVProps(properties);
ClusterMap clusterMap = new MockClusterMap();
NotificationSystem notificationSystem = new LoggingNotificationSystem();
RestServer server = new RestServer(verifiableProperties, clusterMap, notificationSystem, SSL_FACTORY);
server.shutdown();
server.awaitShutdown();
}
Aggregations