use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class OperationTrackerTest method deleteTtlUpdateWithReplicaStateTest.
/**
* Test delete/ttlUpdate operation when replicasStateEnabled is enabled/disabled.
* local dc: 2 STANDBY and 1 INACTIVE; remote dc: 2 STANDBY and 1 INACTIVE
* 1. Issue 3 requests in parallel
* 2. Make 2 requests fail
* 3. Issue 1 requests (replicaState enabled tracker only has 4 eligible replicas)
* 4. Make 1 succeed and 1 fail (replicaState enabled tracker should fail)
* 5. Make remaining requests succeed, this only applies for tracker with replicaState disabled and operation should succeed.
*/
@Test
public void deleteTtlUpdateWithReplicaStateTest() {
assumeTrue(operationTrackerType.equals(SIMPLE_OP_TRACKER));
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
// set up one node per data center for testing
datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1")));
mockPartition = new MockPartitionId();
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
// put two STANDBY replicas in each data center (note that "populateReplicaList" method alternatively distributes
// the replica, so here we set 4 for two dc in total)
populateReplicaList(4, ReplicaState.STANDBY);
// put one INACTIVE in each data center
populateReplicaList(2, ReplicaState.INACTIVE);
// test both delete and Ttl Update cases
for (RouterOperation operation : EnumSet.of(RouterOperation.DeleteOperation, RouterOperation.TtlUpdateOperation)) {
repetitionTracker.clear();
OperationTracker ot = getOperationTracker(true, 1, 2, operation, true);
// issue delete/ttlUpdate requests to 2 local replica and 1 remote replica
sendRequests(ot, 3, false);
// make 2 requests fail and send requests again
for (int i = 0; i < 2; ++i) {
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.FAILURE);
}
// for replicaState enabled operation tracker, only 1 eligible replica left, so numRequestsExpected = 1
sendRequests(ot, replicasStateEnabled ? 1 : 2, false);
// make 1 requests fail and 1 request succeed then replicaState enabled operation tracker should fail
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.FAILURE);
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
if (replicasStateEnabled) {
assertFalse("Operation should fail", ot.hasSucceeded());
} else {
// if replicasStateEnabled = false, operation tracker is able to succeed after 1 more request succeed
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
assertTrue("Operation should succeed", ot.hasSucceeded());
}
assertTrue("Operation should be done", ot.isDone());
}
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class OperationTrackerTest method localPutWithReplicaStateTest.
/**
* Test put operation in local dc when replicasStateEnabled is enabled/disabled.
* Test steps:
* Case1: Only 2 STANDBY replicas in local dc;
* Make 1 succeed and the other fail (both current and replicaState enabled tracker should fail)
* Case2: 2 STANDBY, 1 INACTIVE replicas in local dc
* Make 1 fail and 2 succeed (replicaState enabled operation tracker should fail)
* Case3: 1 LEADER, 4 STANDBY and 1 INACTIVE in local dc
* Make 3 succeed and 2 fail (replicaState enabled operation tracker should fail)
*/
@Test
public void localPutWithReplicaStateTest() {
assumeTrue(operationTrackerType.equals(SIMPLE_OP_TRACKER));
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = Collections.singletonList(new MockDataNodeId(portList, mountPaths, "dc-0"));
mockPartition = new MockPartitionId();
// test that if there are only 2 eligible replicas, the success target should use routerConfig.routerPutSuccessTarget
populateReplicaList(2, ReplicaState.STANDBY);
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
OperationTracker ot = getOperationTracker(true, 1, 2, RouterOperation.PutOperation, true);
assertFalse("Operation should not have been done.", ot.isDone());
sendRequests(ot, 2, false);
// make one requests succeed, the other fail
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.FAILURE);
assertFalse("Operation should fail", ot.hasSucceeded());
assertTrue("Operation should be done", ot.isDone());
// add one more replica in INACTIVE state, now we have 2 STANDBY and 1 INACTIVE replicas
populateReplicaList(1, ReplicaState.INACTIVE);
repetitionTracker.clear();
ot = getOperationTracker(true, 1, 3, RouterOperation.PutOperation, true);
// issue PUT request
sendRequests(ot, replicasStateEnabled ? 2 : 3, false);
// make first request fail and rest requests succeed
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.FAILURE);
while (!inflightReplicas.isEmpty()) {
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
}
if (replicasStateEnabled) {
assertFalse("Operation should fail because only 2 replicas eligible and 1 has failed", ot.hasSucceeded());
} else {
assertTrue("Operation should succeed when there are 2 success", ot.hasSucceeded());
}
// add three more replicas: one in LEADER state, the other two in STANDBY state
populateReplicaList(1, ReplicaState.LEADER);
populateReplicaList(2, ReplicaState.STANDBY);
// now we have 6 replicas: 1 LEADER, 4 STANDBY and 1 INACTIVE. Number of eligible replicas = 1 + 4 = 5
repetitionTracker.clear();
ot = getOperationTracker(true, 1, 5, RouterOperation.PutOperation, true);
// issue PUT request, parallelism should be 5 when replicaState is enabled.
sendRequests(ot, 5, false);
// remaining test is for replicaState enabled operation tracker
if (replicasStateEnabled) {
// make first 3 requests succeed
for (int i = 0; i < 3; i++) {
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
// success target should be 4 when replicaState is enabled for operation tracker, so operation is not done yet
// even though it has succeeded on 3 replicas.
assertFalse("Operation should not be done", ot.isDone());
}
// make last 2 requests fail, then operation should be done and result should be failure
for (int i = 0; i < 2; i++) {
ot.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.FAILURE);
}
assertFalse("Operation should fail", ot.hasSucceeded());
assertTrue("Operation should be done", ot.isDone());
}
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class OperationTrackerTest method initializeWithCloudDcs.
/**
* Initialize 4 DCs, 2 disk datacenters, 2 cloud datacenters. Each disk datacenter has 3 replicas, and each cloud
* datacenter has 1 replica.
* @param makeCloudDcLocal {@code true} to make the local datacenter one of the cloud datacenters.
*/
private void initializeWithCloudDcs(boolean makeCloudDcLocal) {
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
mockPartition = new MockPartitionId();
List<MockDataNodeId> diskNodes = Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"));
populateReplicaList(3 * diskNodes.size(), ReplicaState.STANDBY, diskNodes);
List<MockDataNodeId> cloudNodes = Arrays.asList(new MockDataNodeId(portList, Collections.emptyList(), "cloud-dc-0"), new MockDataNodeId(portList, Collections.emptyList(), "cloud-dc-1"));
// only one cloud replica per cloud dc.
populateReplicaList(cloudNodes.size(), ReplicaState.STANDBY, cloudNodes);
datanodes = new ArrayList<>();
datanodes.addAll(diskNodes);
datanodes.addAll(cloudNodes);
localDcName = (makeCloudDcLocal ? cloudNodes : diskNodes).get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class OperationTrackerTest method initialize.
/**
* Initialize 4 DCs, each DC has 1 data node, which has 3 replicas.
*/
private void initialize() {
int replicaCount = 12;
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"), new MockDataNodeId(portList, mountPaths, "dc-2"), new MockDataNodeId(portList, mountPaths, "dc-3")));
mockPartition = new MockPartitionId();
populateReplicaList(replicaCount, ReplicaState.STANDBY);
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
}
use of com.github.ambry.clustermap.MockDataNodeId in project ambry by linkedin.
the class ServerTestUtil method checkReplicaTokens.
/**
* Repeatedly check the replication token file until a certain offset value on all nodes on a certain
* partition is found. Fail if {@code numTries} is exceeded or a token offset larger than the target
* is found.
* @param clusterMap the cluster map that contains the data node to inspect
* @param dataNodeId the data node to inspect
* @param targetOffset the token offset to look for in the {@code targetPartition}
* @param targetPartition the name of the partition to look for the {@code targetOffset}
* @throws Exception
*/
private static void checkReplicaTokens(MockClusterMap clusterMap, DataNodeId dataNodeId, long targetOffset, String targetPartition) throws Exception {
List<String> mountPaths = ((MockDataNodeId) dataNodeId).getMountPaths();
// we should have an entry for each partition - remote replica pair
Set<String> completeSetToCheck = new HashSet<>();
List<ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
int numRemoteNodes = 0;
for (ReplicaId replicaId : replicaIds) {
List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
if (replicaId.getPartitionId().isEqual(targetPartition)) {
numRemoteNodes = peerReplicas.size();
}
for (ReplicaId peerReplica : peerReplicas) {
completeSetToCheck.add(replicaId.getPartitionId().toString() + peerReplica.getDataNodeId().getHostname() + peerReplica.getDataNodeId().getPort());
}
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
int numTries = 4;
boolean foundTarget = false;
while (!foundTarget && numTries > 0) {
Thread.sleep(5000);
numTries--;
Set<String> setToCheck = new HashSet<String>(completeSetToCheck);
int numFound = 0;
for (String mountPath : mountPaths) {
File replicaTokenFile = new File(mountPath, "replicaTokens");
if (replicaTokenFile.exists()) {
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(replicaTokenFile));
DataInputStream dataInputStream = new DataInputStream(crcStream);
try {
short version = dataInputStream.readShort();
assertEquals(1, version);
while (dataInputStream.available() > 8) {
// read partition id
PartitionId partitionId = clusterMap.getPartitionIdFromStream(dataInputStream);
// read remote node host name
String hostname = Utils.readIntString(dataInputStream);
// read remote replica path
Utils.readIntString(dataInputStream);
// read remote port
int port = dataInputStream.readInt();
assertTrue(setToCheck.contains(partitionId.toString() + hostname + port));
setToCheck.remove(partitionId.toString() + hostname + port);
// read total bytes read from local store
dataInputStream.readLong();
// read replica type
ReplicaType replicaType = ReplicaType.values()[dataInputStream.readShort()];
// read replica token
StoreFindToken token = (StoreFindToken) factory.getFindToken(dataInputStream);
System.out.println("partitionId " + partitionId + " hostname " + hostname + " port " + port + " token " + token);
Offset endTokenOffset = token.getOffset();
long parsedToken = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
System.out.println("The parsed token is " + parsedToken);
if (partitionId.isEqual(targetPartition)) {
assertFalse("Parsed offset: " + parsedToken + " must not be larger than target value: " + targetOffset, parsedToken > targetOffset);
if (parsedToken == targetOffset) {
numFound++;
}
} else {
assertEquals("Tokens should remain at -1 offsets on unmodified partitions", -1, parsedToken);
}
}
long crc = crcStream.getValue();
assertEquals(crc, dataInputStream.readLong());
} catch (IOException e) {
fail();
} finally {
dataInputStream.close();
}
}
}
if (numFound == numRemoteNodes) {
foundTarget = true;
}
}
if (!foundTarget) {
fail("Could not find target token offset: " + targetOffset);
}
}
Aggregations