use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class MockHost method getRemoteReplicaInfos.
/**
* Gets the list of {@link RemoteReplicaInfo} from this host to the given {@code remoteHost}
* @param remoteHost the host whose replica info is required.
* @param listener the {@link ReplicationTest.StoreEventListener} to use.
* @return the list of {@link RemoteReplicaInfo} from this host to the given {@code remoteHost}
*/
List<RemoteReplicaInfo> getRemoteReplicaInfos(MockHost remoteHost, ReplicationTest.StoreEventListener listener) {
List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
List<RemoteReplicaInfo> remoteReplicaInfos = new ArrayList<>();
for (ReplicaId replicaId : replicaIds) {
for (ReplicaId peerReplicaId : replicaId.getPeerReplicaIds()) {
if (peerReplicaId.getDataNodeId().equals(remoteHost.dataNodeId)) {
PartitionId partitionId = replicaId.getPartitionId();
InMemoryStore store = storesByPartition.computeIfAbsent(partitionId, partitionId1 -> new InMemoryStore(partitionId, infosByPartition.computeIfAbsent(partitionId1, (Function<PartitionId, List<MessageInfo>>) partitionId2 -> new ArrayList<>()), buffersByPartition.computeIfAbsent(partitionId1, (Function<PartitionId, List<ByteBuffer>>) partitionId22 -> new ArrayList<>()), listener));
store.start();
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerReplicaId, replicaId, store, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(peerReplicaId.getDataNodeId().getPort(), PortType.PLAINTEXT));
remoteReplicaInfos.add(remoteReplicaInfo);
}
}
}
return remoteReplicaInfos;
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ReplicationMetrics method generateRemoteReplicaMetricPrefix.
private String generateRemoteReplicaMetricPrefix(RemoteReplicaInfo remoteReplicaInfo) {
ReplicaId replicaId = remoteReplicaInfo.getReplicaId();
DataNodeId dataNodeId = replicaId.getDataNodeId();
return dataNodeId.getHostname() + "-" + dataNodeId.getPort() + "-" + replicaId.getPartitionId().toString();
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class CloudToStoreReplicationManagerTest method testGetCloudDataNode.
/**
* Test {@code CloudToStoreReplicationManager#getCloudDataNode}
*/
@Test
public void testGetCloudDataNode() throws NoSuchFieldException, ReplicationException {
CloudToStoreReplicationManager mockCloudToStoreReplicationManager = mock(CloudToStoreReplicationManager.class);
List<DataNodeId> dataNodeIds = new ArrayList<>();
Port port = new Port(1000, PortType.PLAINTEXT);
AtomicReference<List<DataNodeId>> vcrNodes = new AtomicReference<>();
vcrNodes.set(dataNodeIds);
FieldSetter.setField(mockCloudToStoreReplicationManager, CloudToStoreReplicationManager.class.getDeclaredField("vcrNodes"), vcrNodes);
when(mockCloudToStoreReplicationManager.getCloudDataNode()).thenCallRealMethod();
// test getCloudDataNode() with empty vcrNodes
try {
mockCloudToStoreReplicationManager.getCloudDataNode();
fail("Calling getCloudDataNode when there are no vcrNodes should throw exception.");
} catch (ReplicationException rex) {
}
// add vcr nodes.
for (int i = 0; i < 100; i++) {
dataNodeIds.add(new MockDataNodeId("hosname" + i, Collections.singletonList(port), null, null));
}
// Make sure that calling getCloudDataNode() doesn't return the same node every time.
try {
Set<String> dataNodeIdSet = new HashSet<>();
for (int i = 0; i < 5; i++) {
dataNodeIdSet.add(mockCloudToStoreReplicationManager.getCloudDataNode().getHostname());
}
assertTrue("getCloudDataNode shouldn't return same node every time", dataNodeIdSet.size() > 1);
} catch (ReplicationException rex) {
fail("getCloudDataNode shouldn't fail if vcrNodes is not empty");
}
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ReplicationTest method blockDeprecatedContainerReplicationTest.
/**
* Tests if deprecated containers have been blocked during replication.
*/
@Test
public void blockDeprecatedContainerReplicationTest() throws Exception {
Properties properties = new Properties();
properties.setProperty("replication.container.deletion.enabled", "true");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
BlobId b0 = generateRandomBlobId(partitionId);
conversionMap.put(b0, b0);
BlobId b1 = generateRandomBlobId(partitionId);
conversionMap.put(b1, b1);
// add 2 messages to both hosts.
storeKeyConverter.setConversionMap(conversionMap);
storeKeyConverter.convert(conversionMap.keySet());
// addPutMessagesToReplicasOfPartition(Arrays.asList(b0), Arrays.asList(localHost, remoteHost));
// add 3 messages to the remote host only
addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1), Collections.singletonList(remoteHost));
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = Collections.singletonMap(remoteHost.dataNodeId, remoteReplicaInfoList);
storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, null, skipPredicate);
for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
}
List<RemoteReplicaInfo> remoteReplicaInfos = replicasToReplicate.get(remoteHost.dataNodeId);
DataNodeId remoteNode = remoteReplicaInfos.get(0).getReplicaId().getDataNodeId();
ReplicaMetadataResponse response = replicaThread.getReplicaMetadataResponse(remoteReplicaInfos, new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteNode);
// case1 DELETE_IN_PROGRESS container with retention time qualified.
for (int i = 0; i < 2; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(replicationConfig.replicationContainerDeletionRetentionDays + 1));
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All DELETE_IN_PROGRESS blobs qualified with retention time should be skipped during replication", 0, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case2 DELETE_IN_PROGRESS container with retention time not qualified.
for (int i = 2; i < 4; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis());
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All DELETE_IN_PROGRESS blobs not qualified with retention time should not be skipped during replication", 2, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case3 INACTIVE container
for (int i = 4; i < 6; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.INACTIVE);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All INACTIVE blobs should be skipped during replication", 0, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case 4 ACTIVE Container
for (int i = 6; i < 8; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.ACTIVE);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All non-deprecated blobs should not be skipped during replication", 2, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ReplicationTest method remoteReplicaInfoAddRemoveTest.
/**
* Tests add/remove replicaInfo to {@link ReplicaThread}
* @throws Exception
*/
@Test
public void remoteReplicaInfoAddRemoveTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, null);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, mockStoreKeyConverterFactory.getStoreKeyConverter(), transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, null, null, null);
for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
}
List<RemoteReplicaInfo> actualRemoteReplicaInfoList = replicaThread.getRemoteReplicaInfos().get(remoteHost.dataNodeId);
Comparator<RemoteReplicaInfo> remoteReplicaInfoComparator = Comparator.comparing(info -> info.getReplicaId().getPartitionId().toPathString());
Collections.sort(remoteReplicaInfoList, remoteReplicaInfoComparator);
Collections.sort(actualRemoteReplicaInfoList, remoteReplicaInfoComparator);
assertEquals("getRemoteReplicaInfos not correct", remoteReplicaInfoList, actualRemoteReplicaInfoList);
// Test remove remoteReplicaInfo.
replicaThread.removeRemoteReplicaInfo(remoteReplicaInfoList.get(remoteReplicaInfoList.size() - 1));
actualRemoteReplicaInfoList = replicaThread.getRemoteReplicaInfos().get(remoteHost.dataNodeId);
Collections.sort(actualRemoteReplicaInfoList, remoteReplicaInfoComparator);
remoteReplicaInfoList.remove(remoteReplicaInfoList.size() - 1);
assertEquals("getRemoteReplicaInfos not correct", remoteReplicaInfoList, actualRemoteReplicaInfoList);
}
Aggregations