use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class AmbryRequestsTest method catchupStatusSuccessTest.
/**
* Tests for the response received on a {@link CatchupStatusAdminRequest} for different cases
* @throws InterruptedException
* @throws IOException
*/
@Test
public void catchupStatusSuccessTest() throws InterruptedException, IOException {
List<? extends PartitionId> partitionIds = clusterMap.getAllPartitionIds();
assertTrue("This test needs more than one partition to work", partitionIds.size() > 1);
PartitionId id = partitionIds.get(0);
ReplicaId thisPartRemoteRep = getRemoteReplicaId(id);
ReplicaId otherPartRemoteRep = getRemoteReplicaId(partitionIds.get(1));
List<? extends ReplicaId> replicaIds = id.getReplicaIds();
assertTrue("This test needs more than one replica for the first partition to work", replicaIds.size() > 1);
long acceptableLagInBytes = 100;
// cases with a given partition id
// all replicas of given partition < acceptableLag
generateLagOverrides(0, acceptableLagInBytes - 1);
doCatchupStatusTest(id, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, true);
// all replicas of given partition = acceptableLag
generateLagOverrides(acceptableLagInBytes, acceptableLagInBytes);
doCatchupStatusTest(id, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, true);
// 1 replica of some other partition > acceptableLag
String key = MockReplicationManager.getPartitionLagKey(otherPartRemoteRep.getPartitionId(), otherPartRemoteRep.getDataNodeId().getHostname(), otherPartRemoteRep.getReplicaPath());
replicationManager.lagOverrides.put(key, acceptableLagInBytes + 1);
doCatchupStatusTest(id, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, true);
// 1 replica of this partition > acceptableLag
key = MockReplicationManager.getPartitionLagKey(id, thisPartRemoteRep.getDataNodeId().getHostname(), thisPartRemoteRep.getReplicaPath());
replicationManager.lagOverrides.put(key, acceptableLagInBytes + 1);
doCatchupStatusTest(id, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, false);
// same result if num expected replicas == total count -1.
doCatchupStatusTest(id, acceptableLagInBytes, (short) (replicaIds.size() - 1), ServerErrorCode.No_Error, false);
// caught up if num expected replicas == total count - 2
doCatchupStatusTest(id, acceptableLagInBytes, (short) (replicaIds.size() - 2), ServerErrorCode.No_Error, true);
// caught up if num expected replicas == total count - 3
doCatchupStatusTest(id, acceptableLagInBytes, (short) (replicaIds.size() - 3), ServerErrorCode.No_Error, true);
// all replicas of this partition > acceptableLag
generateLagOverrides(acceptableLagInBytes + 1, acceptableLagInBytes + 1);
doCatchupStatusTest(id, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, false);
// cases with no partition id provided
// all replicas of all partitions < acceptableLag
generateLagOverrides(0, acceptableLagInBytes - 1);
doCatchupStatusTest(null, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, true);
// all replicas of all partitions = acceptableLag
generateLagOverrides(acceptableLagInBytes, acceptableLagInBytes);
doCatchupStatusTest(null, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, true);
// 1 replica of one partition > acceptableLag
key = MockReplicationManager.getPartitionLagKey(id, thisPartRemoteRep.getDataNodeId().getHostname(), thisPartRemoteRep.getReplicaPath());
replicationManager.lagOverrides.put(key, acceptableLagInBytes + 1);
doCatchupStatusTest(null, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, false);
// same result if num expected replicas == total count -1.
doCatchupStatusTest(null, acceptableLagInBytes, (short) (replicaIds.size() - 1), ServerErrorCode.No_Error, false);
// caught up if num expected replicas == total count - 2
doCatchupStatusTest(null, acceptableLagInBytes, (short) (replicaIds.size() - 2), ServerErrorCode.No_Error, true);
// caught up if num expected replicas == total count - 3
doCatchupStatusTest(null, acceptableLagInBytes, (short) (replicaIds.size() - 3), ServerErrorCode.No_Error, true);
// all replicas of all partitions > acceptableLag
generateLagOverrides(acceptableLagInBytes + 1, acceptableLagInBytes + 1);
doCatchupStatusTest(null, acceptableLagInBytes, Short.MAX_VALUE, ServerErrorCode.No_Error, false);
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class AmbryRequestsTest method generateLagOverrides.
/**
* Generates lag overrides in {@code replicationManager} with each lag a number between {@code base} and
* {@code upperBound} both inclusive.
* @param base the minimum value of lag (inclusive)
* @param upperBound the maximum value of lag (inclusive)
*/
private void generateLagOverrides(long base, long upperBound) {
replicationManager.lagOverrides = new HashMap<>();
for (PartitionId partitionId : clusterMap.getAllPartitionIds()) {
for (ReplicaId replicaId : partitionId.getReplicaIds()) {
String key = MockReplicationManager.getPartitionLagKey(partitionId, replicaId.getDataNodeId().getHostname(), replicaId.getReplicaPath());
Long value = base + Utils.getRandomLong(TestUtils.RANDOM, upperBound - base + 1);
replicationManager.lagOverrides.put(key, value);
}
}
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class StorageManagerTest method scheduleAndDisableCompactionTest.
/**
* Tests that schedule compaction and disable compaction in StorageManager
* @throws Exception
*/
@Test
public void scheduleAndDisableCompactionTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, dataNodes, 0);
List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
StorageManager storageManager = createStorageManager(replicas, metricRegistry);
storageManager.start();
// add invalid replica id
replicas.add(invalidPartitionReplicas.get(0));
for (int i = 0; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
if (i == replicas.size() - 1) {
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
assertFalse("Disable compaction should fail", storageManager.disableCompactionForBlobStore(id));
} else {
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
}
}
ReplicaId replica = replicas.get(0);
PartitionId id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.disableCompactionForBlobStore(id));
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
replica = replicas.get(1);
id = replica.getPartitionId();
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
replicas.remove(replicas.size() - 1);
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class ServerAdminTool method sendRequestGetResponse.
/**
* Sends {@code request} to {@code dataNodeId} and returns the response as a {@link ByteBuffer}.
* @param dataNodeId the {@link DataNodeId} to contact.
* @param partitionId the {@link PartitionId} associated with request.
* @param request the request to send.
* @return the response as a {@link ResponseInfo} if the response was successfully received. {@code null} otherwise.
* @throws TimeoutException
*/
private ResponseInfo sendRequestGetResponse(DataNodeId dataNodeId, PartitionId partitionId, SendWithCorrelationId request) throws TimeoutException {
ReplicaId replicaId = getReplicaFromNode(dataNodeId, partitionId);
String hostname = dataNodeId.getHostname();
Port port = dataNodeId.getPortToConnectTo();
String identifier = hostname + ":" + port.getPort();
RequestInfo requestInfo = new RequestInfo(hostname, port, request, replicaId, null);
List<RequestInfo> requestInfos = Collections.singletonList(requestInfo);
ResponseInfo responseInfo = null;
long startTimeMs = time.milliseconds();
do {
if (time.milliseconds() - startTimeMs > OPERATION_TIMEOUT_MS) {
throw new TimeoutException(identifier + ": Operation did not complete within " + OPERATION_TIMEOUT_MS + " ms");
}
List<ResponseInfo> responseInfos = networkClient.sendAndPoll(requestInfos, Collections.emptySet(), POLL_TIMEOUT_MS);
if (responseInfos.size() > 1) {
// May need to relax this check because response list may contain more than 1 response
throw new IllegalStateException("Received more than one response even though a single request was sent");
} else if (!responseInfos.isEmpty()) {
responseInfo = responseInfos.get(0);
}
requestInfos = Collections.emptyList();
} while (responseInfo == null);
if (responseInfo.getError() != null) {
throw new IllegalStateException(identifier + ": Encountered error while trying to send request - " + responseInfo.getError());
}
return responseInfo;
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class BlobValidator method validateBlobOnAllReplicas.
/**
* Validates {@code blobId} on all of its replicas.
* @param blobId the {@link BlobId} to operate on.
* @param getOption the {@link GetOption} to use with the {@link com.github.ambry.protocol.GetRequest}.
* @param clusterMap the {@link ClusterMap} instance to use.
* @param storeKeyFactory the {@link StoreKeyFactory} to use.
* @return a list of details if there are mismatches. Zero sized list if there aren't any mismatches.
* @throws InterruptedException
*/
private List<String> validateBlobOnAllReplicas(BlobId blobId, GetOption getOption, ClusterMap clusterMap, StoreKeyFactory storeKeyFactory) throws InterruptedException {
Map<DataNodeId, ServerResponse> dataNodeIdBlobContentMap = new HashMap<>();
for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
ServerResponse response = getRecordFromNode(replicaId.getDataNodeId(), blobId, getOption, clusterMap, storeKeyFactory);
dataNodeIdBlobContentMap.put(replicaId.getDataNodeId(), response);
}
return getMismatchDetails(blobId.getID(), dataNodeIdBlobContentMap);
}
Aggregations