use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class AdaptiveOperationTrackerTest method partitionLevelAdaptiveTrackerTest.
/**
* Tests that adaptive tracker uses separate partition-level histogram to determine if inflight requests are past due.
* @throws Exception
*/
@Test
public void partitionLevelAdaptiveTrackerTest() throws Exception {
MockPartitionId mockPartition1 = new MockPartitionId(0L, MockClusterMap.DEFAULT_PARTITION_CLASS);
MockPartitionId mockPartition2 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS);
for (int i = 0; i < REPLICA_COUNT; i++) {
mockPartition1.replicaIds.add(new MockReplicaId(PORT, mockPartition1, datanodes.get(i % datanodes.size()), 1));
mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, datanodes.get(i % datanodes.size()), 2));
}
MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 3, Arrays.asList(mockPartition1, mockPartition2), localDcName);
trackerScope = OperationTrackerScope.Partition;
RouterConfig routerConfig = createRouterConfig(true, 2, 1, 6, null, true);
NonBlockingRouterMetrics originalMetrics = routerMetrics;
routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig);
Counter pastDueCount = routerMetrics.getBlobPastDueCount;
Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency;
Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency;
// mock different distribution of Histogram for two partitions
Histogram localHistogram1 = localColoMap.get(mockPartition1);
Histogram localHistogram2 = localColoMap.get(mockPartition2);
Histogram remoteHistogram1 = crossColoMap.get(mockPartition1);
primeTracker(localHistogram1, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L));
primeTracker(localHistogram2, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L));
primeTracker(remoteHistogram1, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L));
OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1);
OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2);
double localColoCutoff1 = localHistogram1.getSnapshot().getValue(QUANTILE);
double localColoCutoff2 = localHistogram2.getSnapshot().getValue(QUANTILE);
double crossColoCutoff1 = remoteHistogram1.getSnapshot().getValue(QUANTILE);
sendRequests(tracker2, 1);
sendRequests(tracker1, 1);
// partition1: 2-1-0-0, partition2: 2-1-0-0
time.sleep((long) localColoCutoff1 + 1);
// partition1 should send 2nd request, partition2 won't because its 1st request isn't past due.
sendRequests(tracker1, 1);
sendRequests(tracker2, 0);
// partition1: 1-2-0-0, partition2: 2-1-0-0
time.sleep((long) (localColoCutoff2 - localColoCutoff1) + 2);
// note that localColoCutoff2 > 2 * localColoCutoff1, then 2nd request of partition1 and 1st request of partition are both past due
sendRequests(tracker1, 1);
sendRequests(tracker2, 1);
// partition1: 0-3-0-0, partition2: 1-2-0-0
time.sleep((long) localColoCutoff1 + 1);
// 3rd local request of partition1 is past due and starts sending 1st cross-colo request
sendRequests(tracker1, 1);
sendRequests(tracker2, 0);
// partition1: 0-3-0-0(local), 2-1-0-0(remote); partition2: 1-2-0-0(local)
time.sleep((long) crossColoCutoff1 + 1);
// 1st cross-colo request of partition1 is past due and 2nd local request of partition2 is past due.
sendRequests(tracker1, 1);
sendRequests(tracker2, 1);
// generate response for each request to make them successful
for (int i = 0; i < 2; ++i) {
assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone());
tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS);
tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS);
}
assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded());
assertEquals("Past due counter is not expected", 4 + 2, pastDueCount.getCount());
// complete remaining inflight requests and test different final state of request
LinkedList<ReplicaId> inflightRequests1 = partitionAndInflightReplicas.get(mockPartition1);
LinkedList<ReplicaId> inflightRequests2 = partitionAndInflightReplicas.get(mockPartition2);
while (!inflightRequests1.isEmpty()) {
tracker1.onResponse(inflightRequests1.poll(), TrackedRequestFinalState.FAILURE);
}
while (!inflightRequests2.isEmpty()) {
tracker2.onResponse(inflightRequests2.poll(), TrackedRequestFinalState.TIMED_OUT);
}
// The number of data points in local colo histogram should be 5 (3 from partition1, 2 from partition2). Note that,
// 3rd request of partition2 timed out which shouldn't be added to histogram.
assertEquals("Mismatch in number of data points in local colo histogram", 5, routerMetrics.getBlobLocalDcLatencyMs.getCount());
// The number of data points in cross colo histogram should be 2 (both of them come from partition1)
assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount());
// additional test: mock new partition is dynamically added and adaptive operation track should be able to create
// histogram on demand.
MockPartitionId mockPartition3 = (MockPartitionId) clusterMap.createNewPartition(datanodes);
OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3);
// send 1st request
sendRequests(tracker3, 1);
// attempt to send 2nd request to make tracker check histogram and create a new one associated with this partition
// the oldest one hasn't passed due (because there are not enough data points in histogram), so 2nd is not sent
sendRequests(tracker3, 0);
tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS);
// now it should be able to send 2nd request
sendRequests(tracker3, 1);
tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS);
assertTrue("Operation should have succeeded", tracker3.hasSucceeded());
// restore the tracer scope and routerMetrics
trackerScope = OperationTrackerScope.Datacenter;
routerMetrics = originalMetrics;
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class AdaptiveOperationTrackerTest method diskLevelAdaptiveTrackerTest.
/**
* Tests that adaptive tracker uses separate disk-level histogram to determine if inflight requests are past due.
* Mock a partition layout as follows for this test. This test also tests the case where new nodes and new partition
* are dynamically added.
* | | Partition 1 | Partition 2 | Partition 3 (added at runtime)
* -------------------------------------------------------------------------------------
* LocalHost1 | Disk0 | Replica_1 | |
* | Disk1 | | Replica_1 |
* -------------------------------------------------------------------------------------
* RemoteHost1 | Disk0 | Replica_2 | Replica_2 |
* | Disk1 | Replica_3 | Replica_3 |
* -------------------------------------------------------------------------------------
* NewNode1 | Disk0 | | | Replica_1
* | Disk1 | | |
* -------------------------------------------------------------------------------------
* NewNod2 | Disk0 | | |
* | Disk1 | | | Replica_2
* @throws Exception
*/
@Test
public void diskLevelAdaptiveTrackerTest() throws Exception {
MockPartitionId mockPartition1 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS);
MockPartitionId mockPartition2 = new MockPartitionId(2L, MockClusterMap.DEFAULT_PARTITION_CLASS);
// create a new list mock datanodes instead of using the default class member
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Arrays.asList("mockMountPath0", "mockMountPath1");
MockDataNodeId localHost1 = new MockDataNodeId("LocalHost1", portList, mountPaths, "dc-0");
MockDataNodeId remoteHost1 = new MockDataNodeId("RemoteHost1", portList, mountPaths, "dc-1");
List<MockDataNodeId> datanodes = new ArrayList<>(Arrays.asList(localHost1, remoteHost1));
// distribute replicas to nodes (Note that localDC name is still "dc-0" in current setup)
ReplicaId partition1Replica1 = new MockReplicaId(PORT, mockPartition1, localHost1, 0);
ReplicaId partition1Replica2 = new MockReplicaId(PORT, mockPartition1, remoteHost1, 0);
ReplicaId partition1Replica3 = new MockReplicaId(PORT, mockPartition1, remoteHost1, 1);
ReplicaId partition2Replica1 = new MockReplicaId(PORT, mockPartition2, localHost1, 1);
mockPartition1.replicaIds.add(partition1Replica1);
mockPartition1.replicaIds.add(partition1Replica2);
mockPartition1.replicaIds.add(partition1Replica3);
mockPartition2.replicaIds.add(partition2Replica1);
mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 0));
mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 1));
MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 2, Arrays.asList(mockPartition1, mockPartition2), localDcName);
trackerScope = OperationTrackerScope.Disk;
RouterConfig routerConfig = createRouterConfig(true, 1, 1, 6, null, true);
NonBlockingRouterMetrics originalMetrics = routerMetrics;
routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig);
Counter pastDueCount = routerMetrics.getBlobPastDueCount;
Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency;
Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency;
// mock different latency distribution of different disks
Histogram localHostDisk0Histogram = localColoMap.get(partition1Replica1.getDiskId());
Histogram localHostDisk1Histogram = localColoMap.get(partition2Replica1.getDiskId());
Histogram remoteHostDisk0Histogram = crossColoMap.get(partition1Replica2.getDiskId());
Histogram remoteHostDisk1Histogram = crossColoMap.get(partition1Replica3.getDiskId());
primeTracker(localHostDisk0Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L));
primeTracker(localHostDisk1Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L));
primeTracker(remoteHostDisk0Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L));
primeTracker(remoteHostDisk1Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L));
double localHostDisk0Cutoff = localHostDisk0Histogram.getSnapshot().getValue(QUANTILE);
double localHostDisk1Cutoff = localHostDisk1Histogram.getSnapshot().getValue(QUANTILE);
double remoteHostDisk0Cutoff = remoteHostDisk0Histogram.getSnapshot().getValue(QUANTILE);
OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1);
OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2);
// issue first request for both partitions in local DC
sendRequests(tracker2, 1);
sendRequests(tracker1, 1);
// partition1: 0-1-0-0, partition2: 0-1-0-0
time.sleep((long) localHostDisk0Cutoff + 1);
// partition1 should send 2nd request to RemoteNode1, partition2 won't because its 1st request isn't past due.
sendRequests(tracker1, 1);
sendRequests(tracker2, 0);
// partition1: 0-1-0-0(local), 1-1-0-0(remote); partition2: 0-1-0-0(local), 2-0-0-0(remote)
time.sleep((long) (localHostDisk1Cutoff - localHostDisk0Cutoff) + 2);
// partition2 Replica1 on localhost Disk1 is past due, so the request should be sent to remote host
sendRequests(tracker1, 0);
sendRequests(tracker2, 1);
// partition1: 0-1-0-0(local), 1-1-0-0(remote); partition2: 0-1-0-0(local), 2-0-0-0(remote)
time.sleep((long) remoteHostDisk0Cutoff + 1);
// both requests are past due (Note that they have same latency histogram)
sendRequests(tracker1, 1);
sendRequests(tracker2, 1);
assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone());
// make local requests successful
tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS);
tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS);
// make remote requests failed
tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.FAILURE);
tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.TIMED_OUT);
tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.TIMED_OUT);
tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.FAILURE);
assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded());
// past due count should be 4 because for each partition there were one local and one remote request that didn't get
// response within threshold. In total, it should be 2 * (1 + 1) = 4
assertEquals("Past due counter is not expected", 4, pastDueCount.getCount());
// number of data points in local colo histogram should be 2 because both requests finally succeeded
assertEquals("Mismatch in number of data points in local colo histogram", 2, routerMetrics.getBlobLocalDcLatencyMs.getCount());
// number of data points in cross colo histogram should be 2 because two timed-out requests should be counted
assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount());
// additional test: dynamically add 1 new partition and 2 new nodes. Each new node hosts a replica from new partition
MockDataNodeId newNode1 = clusterMap.createNewDataNodes(1, "dc-0").get(0);
MockDataNodeId newNode2 = clusterMap.createNewDataNodes(1, "dc-1").get(0);
MockPartitionId mockPartition3 = new MockPartitionId(3L, MockClusterMap.DEFAULT_PARTITION_CLASS);
mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode1, 0));
mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode2, 1));
OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3);
// send 1st request
sendRequests(tracker3, 1);
// attempt to send 2nd one. This will trigger router metrics to create a histogram that associated with new disk
// However, there is no 2nd request out because new created histogram doesn't of enough data points.
sendRequests(tracker3, 0);
// make the 1st request fail
tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.FAILURE);
// 2nd request is sent
sendRequests(tracker3, 1);
// make the 2nd request succeed
tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS);
assertTrue("Operation should have succeeded", tracker3.hasSucceeded());
// restore the tracer scope and routerMetrics
trackerScope = OperationTrackerScope.Datacenter;
routerMetrics = originalMetrics;
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class AdaptiveOperationTrackerTest method nodeLevelAdaptiveTrackerTest.
/**
* Tests that adaptive tracker uses separate node-level histogram to determine if inflight requests are past due.
* @throws Exception
*/
@Test
public void nodeLevelAdaptiveTrackerTest() throws Exception {
// Mock a simple partition layout for this test: Partition1 has two replicas, one on LocalHost1 and the other on RemoteHost1;
// Similarly, Partition2 has two replicas, one on LocalHost2 and the other on RemoteHost1.
MockPartitionId mockPartition1 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS);
MockPartitionId mockPartition2 = new MockPartitionId(2L, MockClusterMap.DEFAULT_PARTITION_CLASS);
// create a new list mock datanodes instead of using the default class member
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Arrays.asList("mockMountPath0", "mockMountPath1", "mockMountPath2");
MockDataNodeId localHost1 = new MockDataNodeId("LocalHost1", portList, mountPaths, "dc-0");
MockDataNodeId localHost2 = new MockDataNodeId("LocalHost2", portList, mountPaths, "dc-0");
MockDataNodeId remoteHost1 = new MockDataNodeId("RemoteHost1", portList, mountPaths, "dc-1");
List<MockDataNodeId> datanodes = new ArrayList<>(Arrays.asList(localHost1, localHost2, remoteHost1));
// distribute replicas to nodes (Note that localDC name is still "dc-0" in current setup)
mockPartition1.replicaIds.add(new MockReplicaId(PORT, mockPartition1, localHost1, 1));
mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, localHost2, 2));
mockPartition1.replicaIds.add(new MockReplicaId(PORT, mockPartition1, remoteHost1, 1));
mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 2));
MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 3, Arrays.asList(mockPartition1, mockPartition2), localDcName);
trackerScope = OperationTrackerScope.DataNode;
RouterConfig routerConfig = createRouterConfig(true, 1, 1, 6, null, true);
NonBlockingRouterMetrics originalMetrics = routerMetrics;
routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig);
Counter pastDueCount = routerMetrics.getBlobPastDueCount;
Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency;
Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency;
// mock different latency distribution of local hosts and remote host
Histogram localHistogram1 = localColoMap.get(localHost1);
Histogram localHistogram2 = localColoMap.get(localHost2);
primeTracker(localHistogram1, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L));
primeTracker(localHistogram2, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L));
double localHostCutoff1 = localHistogram1.getSnapshot().getValue(QUANTILE);
double localHostCutoff2 = localHistogram2.getSnapshot().getValue(QUANTILE);
OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1);
OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2);
// issue first request for both partitions in local DC
sendRequests(tracker2, 1);
sendRequests(tracker1, 1);
// partition1: 0-1-0-0, partition2: 0-1-0-0
time.sleep((long) localHostCutoff1 + 1);
// partition1 should send 2nd request to RemoteNode1, partition2 won't because its 1st request isn't past due.
sendRequests(tracker1, 1);
sendRequests(tracker2, 0);
// partition1: 0-1-0-0(local), 0-1-0-0(remote); partition2: 0-1-0-0(local), 1-0-0-0(remote)
time.sleep((long) (localHostCutoff2 - localHostCutoff1) + 2);
sendRequests(tracker1, 0);
sendRequests(tracker2, 1);
// partition1: 0-1-0-0(local), 0-1-0-0(remote); partition2: 0-1-0-0(local), 0-1-0-0(remote)
assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone());
// make local requests failed
tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.TIMED_OUT);
tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.FAILURE);
// make remote requests successful
tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS);
tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS);
assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded());
// past due count should be 2 because requests to two local nodes didn't get response within threshold
assertEquals("Past due counter is not expected", 2, pastDueCount.getCount());
// number of data points in local colo histogram should be 1 because LocalHost2 finally responded FAILURE which would
// update the histogram. Note that request to LocalHost1 became TIMED_OUT in the end which should not be counted.
assertEquals("Mismatch in number of data points in local colo histogram", 1, routerMetrics.getBlobLocalDcLatencyMs.getCount());
// number of data points in cross colo histogram should be 2 because both requests to RemoteHost1 succeeded and histogram
// should be updated twice in this case.
assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount());
// additional test: dynamically add 1 new partition and 2 new nodes. Each new node hosts a replica from new partition
MockDataNodeId newNode1 = clusterMap.createNewDataNodes(1, "dc-0").get(0);
MockDataNodeId newNode2 = clusterMap.createNewDataNodes(1, "dc-1").get(0);
MockPartitionId mockPartition3 = new MockPartitionId(3L, MockClusterMap.DEFAULT_PARTITION_CLASS);
mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode1, 1));
mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode2, 2));
OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3);
// send 1st request
sendRequests(tracker3, 1);
// attempt to send 2nd one. This will trigger router metrics to create a histogram that associated with new node
// However, there is no 2nd request out because new created histogram doesn't of enough data points.
sendRequests(tracker3, 0);
// make the 1st request fail
tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.FAILURE);
// 2nd request is sent
sendRequests(tracker3, 1);
// make the 2nd request succeed
tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS);
assertTrue("Operation should have succeeded", tracker3.hasSucceeded());
// restore the tracer scope and routerMetrics
trackerScope = OperationTrackerScope.Datacenter;
routerMetrics = originalMetrics;
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class OperationTrackerTest method testReplicaDown.
/**
* Test replica down scenario
* @param totalReplicaCount total replicas for the partition.
* @param downReplicaCount partitions to be marked down.
*/
private void testReplicaDown(int totalReplicaCount, int downReplicaCount) {
List<Boolean> downStatus = new ArrayList<>(totalReplicaCount);
for (int i = 0; i < downReplicaCount; i++) {
downStatus.add(true);
}
for (int i = downReplicaCount; i < totalReplicaCount; i++) {
downStatus.add(false);
}
Collections.shuffle(downStatus);
List<ReplicaId> mockReplicaIds = mockPartition.getReplicaIds();
for (int i = 0; i < totalReplicaCount; i++) {
((MockReplicaId) mockReplicaIds.get(i)).markReplicaDownStatus(downStatus.get(i));
}
localDcName = datanodes.get(0).getDatacenterName();
OperationTracker ot = getOperationTracker(true, 2, 3, RouterOperation.GetBlobOperation, true);
// The iterator should return all replicas, with the first half being the up replicas
// and the last half being the down replicas.
Iterator<ReplicaId> itr = ot.getReplicaIterator();
int count = 0;
while (itr.hasNext()) {
ReplicaId nextReplica = itr.next();
if (count < totalReplicaCount - downReplicaCount) {
assertFalse(nextReplica.isDown());
} else {
assertTrue(nextReplica.isDown());
}
count++;
}
assertEquals("Total replica count did not match expected", totalReplicaCount, count);
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class ServerTestUtil method endToEndTest.
static void endToEndTest(Port targetPort, String routerDatacenter, MockCluster cluster, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, Properties routerProps, boolean testEncryption) {
try {
MockClusterMap clusterMap = cluster.getClusterMap();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
byte[] userMetadata = new byte[1000];
byte[] data = new byte[31870];
byte[] encryptionKey = new byte[100];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, testEncryption, cluster.time.milliseconds());
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
if (testEncryption) {
TestUtils.RANDOM.nextBytes(encryptionKey);
}
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId3 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId4 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId5 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
ConnectedChannel channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
DataInputStream putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
PutResponse response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response.getError());
// put blob 2 with an expiry time and apply TTL update later
BlobProperties propertiesForTtlUpdate = new BlobProperties(31870, "serviceid1", "ownerid", "image/png", false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null);
long ttlUpdateBlobExpiryTimeMs = getExpiryTimeMs(propertiesForTtlUpdate);
PutRequest putRequest2 = new PutRequest(1, "client1", blobId2, propertiesForTtlUpdate, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
// put blob 3
PutRequest putRequest3 = new PutRequest(1, "client1", blobId3, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest3).getInputStream();
PutResponse response3 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response3.getError());
// put blob 4 that is expired
BlobProperties propertiesExpired = new BlobProperties(31870, "serviceid1", "ownerid", "jpeg", false, 0, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null);
PutRequest putRequest4 = new PutRequest(1, "client1", blobId4, propertiesExpired, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest4).getInputStream();
PutResponse response4 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response4.getError());
cluster.time.sleep(10000);
// get blob properties
ArrayList<BlobId> ids = new ArrayList<>();
MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get blob properties with expired flag set
ids = new ArrayList<BlobId>();
partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
partitionRequestInfoList = new ArrayList<>();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_Expired_Blobs);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get blob properties for expired blob
// 1. With no flag
ArrayList<BlobId> idsExpired = new ArrayList<>();
MockPartitionId partitionExpired = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
idsExpired.add(blobId4);
ArrayList<PartitionRequestInfo> partitionRequestInfoListExpired = new ArrayList<>();
PartitionRequestInfo partitionRequestInfoExpired = new PartitionRequestInfo(partitionExpired, idsExpired);
partitionRequestInfoListExpired.add(partitionRequestInfoExpired);
GetRequest getRequestExpired = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoListExpired, GetOption.None);
DataInputStream streamExpired = channel.sendAndReceive(getRequestExpired).getInputStream();
GetResponse respExpired = GetResponse.readFrom(streamExpired, clusterMap);
assertEquals(ServerErrorCode.Blob_Expired, respExpired.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(streamExpired);
// 2. With Include_Expired flag
idsExpired = new ArrayList<>();
partitionExpired = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
idsExpired.add(blobId4);
partitionRequestInfoListExpired = new ArrayList<>();
partitionRequestInfoExpired = new PartitionRequestInfo(partitionExpired, idsExpired);
partitionRequestInfoListExpired.add(partitionRequestInfoExpired);
getRequestExpired = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoListExpired, GetOption.Include_Expired_Blobs);
streamExpired = channel.sendAndReceive(getRequestExpired).getInputStream();
respExpired = GetResponse.readFrom(streamExpired, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(respExpired.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("ownerid", propertyOutput.getOwnerId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(streamExpired);
} catch (MessageFormatException e) {
fail();
}
// get user metadata
GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest2).getInputStream();
GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
assertArrayEquals(userMetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get blob info
GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobInfo, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest3).getInputStream();
GetResponse resp3 = GetResponse.readFrom(stream, clusterMap);
InputStream responseStream = resp3.getInputStream();
// verify blob properties.
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(responseStream);
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
// verify user metadata
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(responseStream);
assertArrayEquals(userMetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
releaseNettyBufUnderneathStream(stream);
// get blob all
GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest4).getInputStream();
GetResponse resp4 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp4.getInputStream();
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
// verify content
assertArrayEquals("Content mismatch.", data, actualBlobData);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
releaseNettyBufUnderneathStream(stream);
// encryptionKey in this test doesn't have any relation to the content. Both are random bytes for test purposes.
if (!testEncryption) {
// Use router to get the blob
Properties routerProperties = getRouterProps(routerDatacenter);
routerProperties.putAll(routerProps);
VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
AccountService accountService = new InMemAccountService(false, true);
Router router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(clusterMap), getSSLFactoryIfRequired(routerVerifiableProps), accountService).getRouter();
checkBlobId(router, blobId1, data);
router.close();
}
checkTtlUpdateStatus(channel, clusterMap, blobIdFactory, blobId2, data, false, ttlUpdateBlobExpiryTimeMs);
updateBlobTtl(channel, blobId2, cluster.time.milliseconds());
checkTtlUpdateStatus(channel, clusterMap, blobIdFactory, blobId2, data, true, Utils.Infinite_Time);
// fetch blob that does not exist
// get blob properties
ids = new ArrayList<>();
partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partition, false, BlobId.BlobDataType.DATACHUNK));
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest5).getInputStream();
GetResponse resp5 = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// stop the store via AdminRequest
System.out.println("Begin to stop a BlobStore");
AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionIds.get(0), 1, "clientid2");
BlobStoreControlAdminRequest controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StopStore, adminRequest);
stream = channel.sendAndReceive(controlRequest).getInputStream();
AdminResponse adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Stop store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
// put a blob on a stopped store, which should fail
putRequest = new PutRequest(1, "client1", blobId5, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, response.getError());
// get a blob properties on a stopped store, which should fail
ids = new ArrayList<>();
partition = (MockPartitionId) blobId1.getPartition();
ids.add(blobId1);
partitionRequestInfoList = new ArrayList<>();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
assertEquals("Get blob properties on stopped store should fail", ServerErrorCode.Replica_Unavailable, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// delete a blob on a stopped store, which should fail
DeleteRequest deleteRequest = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, deleteResponse.getError());
// start the store via AdminRequest
System.out.println("Begin to restart the BlobStore");
adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionIds.get(0), 1, "clientid2");
controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StartStore, adminRequest);
stream = channel.sendAndReceive(controlRequest).getInputStream();
adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Start store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
List<? extends ReplicaId> replicaIds = partitionIds.get(0).getReplicaIds();
for (ReplicaId replicaId : replicaIds) {
// forcibly mark replicas and disks as up.
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
mockReplicaId.markReplicaDownStatus(false);
((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, false);
}
// put a blob on a restarted store , which should succeed
PutRequest putRequest5 = new PutRequest(1, "client1", blobId5, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest5).getInputStream();
PutResponse response5 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on restarted store should succeed", ServerErrorCode.No_Error, response5.getError());
// get a blob on a restarted store , which should succeed
ids = new ArrayList<>();
PartitionId partitionId = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
partitionRequestInfoList = new ArrayList<>();
partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp1.getInputStream();
blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
// undelete a not-deleted blob should return fail
UndeleteRequest undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(undeleteRequest).getInputStream();
UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Undelete blob should succeed", ServerErrorCode.Blob_Not_Deleted, undeleteResponse.getError());
// delete a blob on a restarted store , which should succeed
deleteRequest = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on restarted store should succeed", ServerErrorCode.No_Error, deleteResponse.getError());
// undelete a deleted blob, which should succeed
undeleteRequest = new UndeleteRequest(2, "undeleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(undeleteRequest).getInputStream();
undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Undelete blob should succeed", ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals("Undelete life version mismatch", undeleteResponse.getLifeVersion(), (short) 1);
// undelete an already undeleted blob, which should fail
undeleteRequest = new UndeleteRequest(3, "undeleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(undeleteRequest).getInputStream();
undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Undelete blob should fail", ServerErrorCode.Blob_Already_Undeleted, undeleteResponse.getError());
assertEquals("LifeVersion Mismatch", (short) 1, undeleteResponse.getLifeVersion());
// get an undeleted blob, which should succeed
getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp1.getInputStream();
blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
// Bounce servers to make them read the persisted token file.
cluster.stopServers();
cluster.reinitServers();
channel.disconnect();
channel.connect();
// get an undeleted blob after restart, which should succeed
getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp1.getInputStream();
blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
channel.disconnect();
} catch (Exception e) {
e.printStackTrace();
assertNull(e);
} finally {
List<? extends ReplicaId> replicaIds = cluster.getClusterMap().getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0).getReplicaIds();
for (ReplicaId replicaId : replicaIds) {
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, true);
}
}
}
Aggregations