use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class GetReplicasHandler method getReplicas.
/**
* Extracts the blob ID provided by the client and figures out the partition that the blob ID would belong to
* based on the cluster map. Using the partition information, returns the list of replicas as a part of a JSONObject.
* @param blobId the blob ID whose replicas are required.
* @return A {@link JSONObject} that wraps the replica list.
* @throws RestServiceException if there were missing or invalid arguments or if there was a {@link JSONException}
* or any other while building the response
*/
private JSONObject getReplicas(String blobId) throws RestServiceException {
try {
PartitionId partitionId = new BlobId(blobId, clusterMap).getPartition();
if (partitionId == null) {
metrics.invalidBlobIdError.inc();
logger.warn("Partition for blob id {} is null. The blob id might be invalid", blobId);
throw new RestServiceException("Partition for blob id " + blobId + " is null. The id might be invalid", RestServiceErrorCode.NotFound);
}
return packageResult(partitionId.getReplicaIds());
} catch (IllegalArgumentException e) {
metrics.invalidBlobIdError.inc();
throw new RestServiceException("Invalid blob id received for getReplicasForBlob request - " + blobId, e, RestServiceErrorCode.NotFound);
} catch (IOException | JSONException e) {
metrics.responseConstructionError.inc();
throw new RestServiceException("Could not create response for GET of replicas for " + blobId, e, RestServiceErrorCode.InternalServerError);
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class FrontendUtilsTest method testGetBlobIdFromString.
/**
* Tests {@link FrontendUtils#getBlobIdFromString(String, ClusterMap)}
* @throws IOException
* @throws RestServiceException
*/
@Test
public void testGetBlobIdFromString() throws IOException, RestServiceException {
// good path
byte[] bytes = new byte[2];
ClusterMap referenceClusterMap = new MockClusterMap();
TestUtils.RANDOM.nextBytes(bytes);
BlobId.BlobIdType referenceType = TestUtils.RANDOM.nextBoolean() ? BlobId.BlobIdType.NATIVE : BlobId.BlobIdType.CRAFTED;
TestUtils.RANDOM.nextBytes(bytes);
byte referenceDatacenterId = bytes[0];
short referenceAccountId = getRandomShort(TestUtils.RANDOM);
short referenceContainerId = getRandomShort(TestUtils.RANDOM);
PartitionId referencePartitionId = referenceClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
boolean referenceIsEncrypted = TestUtils.RANDOM.nextBoolean();
List<Short> versions = Arrays.stream(BlobId.getAllValidVersions()).filter(version -> version >= BlobId.BLOB_ID_V3).collect(Collectors.toList());
for (short version : versions) {
BlobId blobId = new BlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, referenceIsEncrypted, BlobId.BlobDataType.DATACHUNK);
BlobId regeneratedBlobId = FrontendUtils.getBlobIdFromString(blobId.getID(), referenceClusterMap);
assertEquals("BlobId mismatch", blobId, regeneratedBlobId);
assertBlobIdFieldValues(regeneratedBlobId, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, version >= BlobId.BLOB_ID_V4 && referenceIsEncrypted);
// bad path
try {
FrontendUtils.getBlobIdFromString(blobId.getID().substring(1), referenceClusterMap);
fail("Should have thrown exception for bad blobId ");
} catch (RestServiceException e) {
assertEquals("RestServiceErrorCode mismatch", RestServiceErrorCode.BadRequest, e.getErrorCode());
}
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryRequests method handleReplicaMetadataRequest.
@Override
public void handleReplicaMetadataRequest(NetworkRequest request) throws IOException, InterruptedException {
if (replicationEngine == null) {
throw new UnsupportedOperationException("Replication not supported on this node.");
}
ReplicaMetadataRequest replicaMetadataRequest = ReplicaMetadataRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap, findTokenHelper);
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.replicaMetadataRequestQueueTimeInMs.update(requestQueueTime);
metrics.replicaMetadataRequestRate.mark();
List<ReplicaMetadataRequestInfo> replicaMetadataRequestInfoList = replicaMetadataRequest.getReplicaMetadataRequestInfoList();
int partitionCnt = replicaMetadataRequestInfoList.size();
long startTimeInMs = SystemTime.getInstance().milliseconds();
ReplicaMetadataResponse response = null;
try {
List<ReplicaMetadataResponseInfo> replicaMetadataResponseList = new ArrayList<>(partitionCnt);
for (ReplicaMetadataRequestInfo replicaMetadataRequestInfo : replicaMetadataRequestInfoList) {
long partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
PartitionId partitionId = replicaMetadataRequestInfo.getPartitionId();
ReplicaType replicaType = replicaMetadataRequestInfo.getReplicaType();
ServerErrorCode error = validateRequest(partitionId, RequestOrResponseType.ReplicaMetadataRequest, false);
logger.trace("{} Time used to validate metadata request: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
if (error != ServerErrorCode.No_Error) {
logger.error("Validating replica metadata request failed with error {} for partition {}", error, partitionId);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, replicaType, error, ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
replicaMetadataResponseList.add(replicaMetadataResponseInfo);
} else {
try {
FindToken findToken = replicaMetadataRequestInfo.getToken();
String hostName = replicaMetadataRequestInfo.getHostName();
String replicaPath = replicaMetadataRequestInfo.getReplicaPath();
Store store = storeManager.getStore(partitionId);
partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
FindInfo findInfo = store.findEntriesSince(findToken, replicaMetadataRequest.getMaxTotalSizeOfEntriesInBytes(), hostName, replicaPath);
logger.trace("{} Time used to find entry since: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
long totalBytesRead = findInfo.getFindToken().getBytesRead();
replicationEngine.updateTotalBytesReadByRemoteReplica(partitionId, hostName, replicaPath, totalBytesRead);
logger.trace("{} Time used to update total bytes read: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
logger.trace("{} Time used to get remote replica lag in bytes: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, replicaType, findInfo.getFindToken(), findInfo.getMessageEntries(), getRemoteReplicaLag(store, totalBytesRead), ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
if (replicaMetadataResponseInfo.getTotalSizeOfAllMessages() > 5 * replicaMetadataRequest.getMaxTotalSizeOfEntriesInBytes()) {
logger.debug("{} generated a metadata response {} where the cumulative size of messages is {}", replicaMetadataRequest, replicaMetadataResponseInfo, replicaMetadataResponseInfo.getTotalSizeOfAllMessages());
metrics.replicationResponseMessageSizeTooHigh.inc();
}
replicaMetadataResponseList.add(replicaMetadataResponseInfo);
metrics.replicaMetadataTotalSizeOfMessages.update(replicaMetadataResponseInfo.getTotalSizeOfAllMessages());
} catch (StoreException e) {
logger.error("Store exception on a replica metadata request with error code {} for partition {}", e.getErrorCode(), partitionId, e);
if (e.getErrorCode() == StoreErrorCodes.IOError) {
metrics.storeIOError.inc();
} else {
metrics.unExpectedStoreFindEntriesError.inc();
}
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, replicaType, ErrorMapping.getStoreErrorMapping(e.getErrorCode()), ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
replicaMetadataResponseList.add(replicaMetadataResponseInfo);
}
}
}
response = new ReplicaMetadataResponse(replicaMetadataRequest.getCorrelationId(), replicaMetadataRequest.getClientId(), ServerErrorCode.No_Error, replicaMetadataResponseList, ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
} catch (Exception e) {
logger.error("Unknown exception for request {}", replicaMetadataRequest, e);
response = new ReplicaMetadataResponse(replicaMetadataRequest.getCorrelationId(), replicaMetadataRequest.getClientId(), ServerErrorCode.Unknown_Error, ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTimeInMs;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", replicaMetadataRequest, response, processingTime);
logger.trace("{} {} processingTime {}", replicaMetadataRequest, response, processingTime);
metrics.replicaMetadataRequestProcessingTimeInMs.update(processingTime);
// client id now has dc name at the end, for example: ClientId=replication-metadata-abc.example.com[dc1]
String[] clientStrs = replicaMetadataRequest.getClientId().split("\\[");
if (clientStrs.length > 1) {
String clientDc = clientStrs[1].substring(0, clientStrs[1].length() - 1);
if (!currentNode.getDatacenterName().equals(clientDc)) {
metrics.updateCrossColoMetadataExchangeBytesRate(clientDc, response != null ? response.sizeInBytes() : 0L);
}
}
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.replicaMetadataResponseQueueTimeInMs, metrics.replicaMetadataSendTimeInMs, metrics.replicaMetadataTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class PartitionResponseInfo method readFrom.
public static PartitionResponseInfo readFrom(DataInputStream stream, ClusterMap map, short getResponseVersion) throws IOException {
PartitionId partitionId = map.getPartitionIdFromStream(stream);
MessageInfoAndMetadataListSerde messageInfoAndMetadataList = MessageInfoAndMetadataListSerde.deserializeMessageInfoAndMetadataList(stream, map, getMessageInfoAndMetadataListSerDeVersion(getResponseVersion));
ServerErrorCode error = ServerErrorCode.values()[stream.readShort()];
if (error != ServerErrorCode.No_Error) {
return new PartitionResponseInfo(partitionId, new ArrayList<>(), new ArrayList<>(), error, getResponseVersion);
} else {
return new PartitionResponseInfo(partitionId, messageInfoAndMetadataList.getMessageInfoList(), messageInfoAndMetadataList.getMessageMetadataList(), ServerErrorCode.No_Error, getResponseVersion);
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class RequestResponseTest method catchupStatusAdminRequestTest.
/**
* Tests the ser/de of {@link CatchupStatusAdminRequest} and checks for equality of fields with reference data.
* @throws IOException
*/
@Test
public void catchupStatusAdminRequestTest() throws IOException {
MockClusterMap clusterMap = new MockClusterMap();
PartitionId id = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
int correlationId = 1234;
String clientId = "client";
// request
long acceptableLag = Utils.getRandomLong(TestUtils.RANDOM, 10000);
short numCaughtUpPerPartition = Utils.getRandomShort(TestUtils.RANDOM);
AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.CatchupStatus, id, correlationId, clientId);
CatchupStatusAdminRequest catchupStatusRequest = new CatchupStatusAdminRequest(acceptableLag, numCaughtUpPerPartition, adminRequest);
DataInputStream requestStream = serAndPrepForRead(catchupStatusRequest, -1, true);
AdminRequest deserializedAdminRequest = deserAdminRequestAndVerify(requestStream, clusterMap, correlationId, clientId, AdminRequestOrResponseType.CatchupStatus, id);
CatchupStatusAdminRequest deserializedCatchupStatusRequest = CatchupStatusAdminRequest.readFrom(requestStream, deserializedAdminRequest);
Assert.assertEquals("Acceptable lag not as set", acceptableLag, deserializedCatchupStatusRequest.getAcceptableLagInBytes());
Assert.assertEquals("Num caught up per partition not as set", numCaughtUpPerPartition, deserializedCatchupStatusRequest.getNumReplicasCaughtUpPerPartition());
catchupStatusRequest.release();
// response
boolean isCaughtUp = TestUtils.RANDOM.nextBoolean();
ServerErrorCode[] values = ServerErrorCode.values();
int indexToPick = TestUtils.RANDOM.nextInt(values.length);
ServerErrorCode responseErrorCode = values[indexToPick];
AdminResponse adminResponse = new AdminResponse(correlationId, clientId, responseErrorCode);
CatchupStatusAdminResponse catchupStatusResponse = new CatchupStatusAdminResponse(isCaughtUp, adminResponse);
DataInputStream responseStream = serAndPrepForRead(catchupStatusResponse, -1, false);
CatchupStatusAdminResponse deserializedCatchupStatusResponse = CatchupStatusAdminResponse.readFrom(responseStream);
Assert.assertEquals(deserializedCatchupStatusResponse.getCorrelationId(), correlationId);
Assert.assertEquals(deserializedCatchupStatusResponse.getClientId(), clientId);
Assert.assertEquals(deserializedCatchupStatusResponse.getError(), responseErrorCode);
Assert.assertEquals(deserializedCatchupStatusResponse.isCaughtUp(), isCaughtUp);
catchupStatusResponse.release();
}
Aggregations