use of com.github.ambry.network.ServerNetworkResponseMetrics in project ambry by linkedin.
the class AmbryRequests method handleDeleteRequest.
public void handleDeleteRequest(Request request) throws IOException, InterruptedException {
DeleteRequest deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.deleteBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
DeleteResponse response = null;
try {
ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new DeleteMessageFormatInputStream(deleteRequest.getBlobId(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
MessageInfo info = new MessageInfo(deleteRequest.getBlobId(), stream.getSize(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToDelete = storageManager.getStore(deleteRequest.getBlobId().getPartition());
storeToDelete.delete(writeset);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), deleteRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.deleteAuthorizationFailure.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreDeleteError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
} else {
logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
}
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for delete request " + deleteRequest, e);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreDeleteError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
metrics.deleteBlobProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.network.ServerNetworkResponseMetrics in project ambry by linkedin.
the class AmbryRequests method sendGetResponse.
private void sendGetResponse(RequestResponseChannel requestResponseChannel, GetResponse response, NetworkRequest request, Histogram responseQueueTime, Histogram responseSendTime, Histogram requestTotalTime, long totalTimeSpent, long blobSize, MessageFormatFlags flags, ServerMetrics metrics) throws InterruptedException {
if (blobSize <= ServerMetrics.smallBlob) {
if (flags == MessageFormatFlags.Blob || flags == MessageFormatFlags.All) {
if (response.getError() == ServerErrorCode.No_Error) {
metrics.markGetBlobRequestRateBySize(blobSize);
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, metrics.getSmallBlobSendTimeInMs, metrics.getSmallBlobTotalTimeInMs, totalTimeSpent));
} else {
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, null, null, totalTimeSpent));
}
} else {
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, null, null, totalTimeSpent));
}
} else if (blobSize <= ServerMetrics.mediumBlob) {
if (flags == MessageFormatFlags.Blob || flags == MessageFormatFlags.All) {
if (response.getError() == ServerErrorCode.No_Error) {
metrics.markGetBlobRequestRateBySize(blobSize);
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, metrics.getMediumBlobSendTimeInMs, metrics.getMediumBlobTotalTimeInMs, totalTimeSpent));
} else {
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, null, null, totalTimeSpent));
}
} else {
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, null, null, totalTimeSpent));
}
} else {
if (flags == MessageFormatFlags.Blob || flags == MessageFormatFlags.All) {
if (response.getError() == ServerErrorCode.No_Error) {
metrics.markGetBlobRequestRateBySize(blobSize);
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, metrics.getLargeBlobSendTimeInMs, metrics.getLargeBlobTotalTimeInMs, totalTimeSpent));
} else {
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, null, null, totalTimeSpent));
}
} else {
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTime, responseSendTime, requestTotalTime, null, null, totalTimeSpent));
}
}
}
use of com.github.ambry.network.ServerNetworkResponseMetrics in project ambry by linkedin.
the class AmbryRequests method handleTtlUpdateRequest.
@Override
public void handleTtlUpdateRequest(NetworkRequest request) throws IOException, InterruptedException {
TtlUpdateRequest updateRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handleTtlUpdateRequest is called when frontends are talking to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
updateRequest = (TtlUpdateRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
} else {
updateRequest = TtlUpdateRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.updateBlobTtlRequestQueueTimeInMs.update(requestQueueTime);
metrics.updateBlobTtlRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
TtlUpdateResponse response = null;
try {
ServerErrorCode error = validateRequest(updateRequest.getBlobId().getPartition(), RequestOrResponseType.TtlUpdateRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating TtlUpdateRequest failed with error {} for request {}", error, updateRequest);
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), error);
} else {
BlobId convertedStoreKey = (BlobId) getConvertedStoreKeys(Collections.singletonList(updateRequest.getBlobId())).get(0);
MessageInfo info = new MessageInfo.Builder(convertedStoreKey, -1, convertedStoreKey.getAccountId(), convertedStoreKey.getContainerId(), updateRequest.getOperationTimeInMs()).isTtlUpdated(true).expirationTimeInMs(updateRequest.getExpiresAtMs()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
Store store = storeManager.getStore(updateRequest.getBlobId().getPartition());
store.updateTtl(Collections.singletonList(info));
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaUpdated(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY, UpdateType.TTL_UPDATE, info);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.ttlUpdateAuthorizationFailure.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Already_Updated) {
metrics.ttlAlreadyUpdatedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Update_Not_Allowed) {
metrics.ttlUpdateRejectedError.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreTtlUpdateError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
} else {
logger.trace("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
}
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for TTL update request {}", updateRequest, e);
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreTtlUpdateError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", updateRequest, response, processingTime);
metrics.updateBlobTtlProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.updateBlobTtlResponseQueueTimeInMs, metrics.updateBlobTtlSendTimeInMs, metrics.updateBlobTtlTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.network.ServerNetworkResponseMetrics in project ambry by linkedin.
the class AmbryRequests method handleDeleteRequest.
@Override
public void handleDeleteRequest(NetworkRequest request) throws IOException, InterruptedException {
DeleteRequest deleteRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handleDeleteRequest is called when frontends are talking to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
deleteRequest = (DeleteRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
} else {
deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.deleteBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
DeleteResponse response = null;
try {
StoreKey convertedStoreKey = getConvertedStoreKeys(Collections.singletonList(deleteRequest.getBlobId())).get(0);
ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
} else {
BlobId convertedBlobId = (BlobId) convertedStoreKey;
MessageInfo info = new MessageInfo.Builder(convertedBlobId, -1, convertedBlobId.getAccountId(), convertedBlobId.getContainerId(), deleteRequest.getDeletionTimeInMs()).isDeleted(true).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
Store storeToDelete = storeManager.getStore(deleteRequest.getBlobId().getPartition());
storeToDelete.delete(Collections.singletonList(info));
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.deleteAuthorizationFailure.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreDeleteError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
} else {
logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
}
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for delete request {}", deleteRequest, e);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreDeleteError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
metrics.deleteBlobProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.network.ServerNetworkResponseMetrics in project ambry by linkedin.
the class AmbryServerRequests method handleAdminRequest.
/**
* Handles an administration request. These requests can query for or change the internal state of the server.
* @param request the request that needs to be handled.
* @throws InterruptedException if response sending is interrupted.
* @throws IOException if there are I/O errors carrying our the required operation.
*/
@Override
public void handleAdminRequest(NetworkRequest request) throws InterruptedException, IOException {
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
long startTime = SystemTime.getInstance().milliseconds();
DataInputStream requestStream = new DataInputStream(request.getInputStream());
AdminRequest adminRequest = AdminRequest.readFrom(requestStream, clusterMap);
Histogram processingTimeHistogram = null;
Histogram responseQueueTimeHistogram = null;
Histogram responseSendTimeHistogram = null;
Histogram requestTotalTimeHistogram = null;
AdminResponse response = null;
try {
switch(adminRequest.getType()) {
case TriggerCompaction:
metrics.triggerCompactionRequestQueueTimeInMs.update(requestQueueTime);
metrics.triggerCompactionRequestRate.mark();
processingTimeHistogram = metrics.triggerCompactionResponseQueueTimeInMs;
responseQueueTimeHistogram = metrics.triggerCompactionResponseQueueTimeInMs;
responseSendTimeHistogram = metrics.triggerCompactionResponseSendTimeInMs;
requestTotalTimeHistogram = metrics.triggerCompactionRequestTotalTimeInMs;
response = handleTriggerCompactionRequest(adminRequest);
break;
case RequestControl:
metrics.requestControlRequestQueueTimeInMs.update(requestQueueTime);
metrics.requestControlRequestRate.mark();
processingTimeHistogram = metrics.requestControlResponseQueueTimeInMs;
responseQueueTimeHistogram = metrics.requestControlResponseQueueTimeInMs;
responseSendTimeHistogram = metrics.requestControlResponseSendTimeInMs;
requestTotalTimeHistogram = metrics.requestControlRequestTotalTimeInMs;
response = handleRequestControlRequest(requestStream, adminRequest);
break;
case ReplicationControl:
metrics.replicationControlRequestQueueTimeInMs.update(requestQueueTime);
metrics.replicationControlRequestRate.mark();
processingTimeHistogram = metrics.replicationControlResponseQueueTimeInMs;
responseQueueTimeHistogram = metrics.replicationControlResponseQueueTimeInMs;
responseSendTimeHistogram = metrics.replicationControlResponseSendTimeInMs;
requestTotalTimeHistogram = metrics.replicationControlRequestTotalTimeInMs;
response = handleReplicationControlRequest(requestStream, adminRequest);
break;
case CatchupStatus:
metrics.catchupStatusRequestQueueTimeInMs.update(requestQueueTime);
metrics.catchupStatusRequestRate.mark();
processingTimeHistogram = metrics.catchupStatusResponseQueueTimeInMs;
responseQueueTimeHistogram = metrics.catchupStatusResponseQueueTimeInMs;
responseSendTimeHistogram = metrics.catchupStatusResponseSendTimeInMs;
requestTotalTimeHistogram = metrics.catchupStatusRequestTotalTimeInMs;
response = handleCatchupStatusRequest(requestStream, adminRequest);
break;
case BlobStoreControl:
metrics.blobStoreControlRequestQueueTimeInMs.update(requestQueueTime);
metrics.blobStoreControlRequestRate.mark();
processingTimeHistogram = metrics.blobStoreControlRequestQueueTimeInMs;
responseQueueTimeHistogram = metrics.blobStoreControlRequestQueueTimeInMs;
responseSendTimeHistogram = metrics.blobStoreControlResponseSendTimeInMs;
requestTotalTimeHistogram = metrics.blobStoreControlRequestTotalTimeInMs;
response = handleBlobStoreControlRequest(requestStream, adminRequest);
break;
}
} catch (Exception e) {
logger.error("Unknown exception for admin request {}", adminRequest, e);
metrics.unExpectedAdminOperationError.inc();
response = new AdminResponse(adminRequest.getCorrelationId(), adminRequest.getClientId(), ServerErrorCode.Unknown_Error);
switch(adminRequest.getType()) {
case CatchupStatus:
response = new CatchupStatusAdminResponse(false, response);
break;
}
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", adminRequest, response, processingTime);
processingTimeHistogram.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(responseQueueTimeHistogram, responseSendTimeHistogram, requestTotalTimeHistogram, null, null, totalTimeSpent));
}
Aggregations