use of com.github.ambry.commons.ServerErrorCode in project ambry by linkedin.
the class AmbryRequests method handleReplicationControlRequest.
/**
* Handles {@link com.github.ambry.protocol.AdminRequestOrResponseType#ReplicationControl}.
* @param requestStream the serialized bytes of the request.
* @param adminRequest the {@link AdminRequest} received.
* @return the {@link AdminResponse} to the request.
* @throws IOException if there is any I/O error reading from the {@code requestStream}.
*/
private AdminResponse handleReplicationControlRequest(DataInputStream requestStream, AdminRequest adminRequest) throws IOException {
Collection<PartitionId> partitionIds;
ServerErrorCode error = ServerErrorCode.No_Error;
ReplicationControlAdminRequest replControlRequest = ReplicationControlAdminRequest.readFrom(requestStream, adminRequest);
if (replControlRequest.getPartitionId() != null) {
error = validateRequest(replControlRequest.getPartitionId(), RequestOrResponseType.AdminRequest);
partitionIds = Collections.singletonList(replControlRequest.getPartitionId());
} else {
partitionIds = partitionsInCurrentNode;
}
if (!error.equals(ServerErrorCode.Partition_Unknown)) {
if (replicationManager.controlReplicationForPartitions(partitionIds, replControlRequest.getOrigins(), replControlRequest.shouldEnable())) {
error = ServerErrorCode.No_Error;
} else {
logger.error("Could not set enable status for replication of {} from {} to {}. Check partition validity and" + " origins list", partitionIds, replControlRequest.getOrigins(), replControlRequest.shouldEnable());
error = ServerErrorCode.Bad_Request;
}
}
return new AdminResponse(adminRequest.getCorrelationId(), adminRequest.getClientId(), error);
}
use of com.github.ambry.commons.ServerErrorCode in project ambry by linkedin.
the class AmbryRequests method handleGetRequest.
public void handleGetRequest(Request request) throws IOException, InterruptedException {
GetRequest getRequest = GetRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
Histogram responseQueueTime = null;
Histogram responseSendTime = null;
Histogram responseTotalTime = null;
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
metrics.getBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobRequestRate.mark();
responseQueueTime = metrics.getBlobResponseQueueTimeInMs;
responseSendTime = metrics.getBlobSendTimeInMs;
responseTotalTime = metrics.getBlobTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
metrics.getBlobPropertiesRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobPropertiesRequestRate.mark();
responseQueueTime = metrics.getBlobPropertiesResponseQueueTimeInMs;
responseSendTime = metrics.getBlobPropertiesSendTimeInMs;
responseTotalTime = metrics.getBlobPropertiesTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
metrics.getBlobUserMetadataRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobUserMetadataRequestRate.mark();
responseQueueTime = metrics.getBlobUserMetadataResponseQueueTimeInMs;
responseSendTime = metrics.getBlobUserMetadataSendTimeInMs;
responseTotalTime = metrics.getBlobUserMetadataTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
metrics.getBlobInfoRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobInfoRequestRate.mark();
responseQueueTime = metrics.getBlobInfoResponseQueueTimeInMs;
responseSendTime = metrics.getBlobInfoSendTimeInMs;
responseTotalTime = metrics.getBlobInfoTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
metrics.getBlobAllRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobAllRequestRate.mark();
responseQueueTime = metrics.getBlobAllResponseQueueTimeInMs;
responseSendTime = metrics.getBlobAllSendTimeInMs;
responseTotalTime = metrics.getBlobAllTotalTimeInMs;
}
long startTime = SystemTime.getInstance().milliseconds();
GetResponse response = null;
try {
List<Send> messagesToSendList = new ArrayList<Send>(getRequest.getPartitionInfoList().size());
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>(getRequest.getPartitionInfoList().size());
for (PartitionRequestInfo partitionRequestInfo : getRequest.getPartitionInfoList()) {
ServerErrorCode error = validateRequest(partitionRequestInfo.getPartition(), RequestOrResponseType.GetRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating get request failed for partition {} with error {}", partitionRequestInfo.getPartition(), error);
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), error);
partitionResponseInfoList.add(partitionResponseInfo);
} else {
try {
Store storeToGet = storageManager.getStore(partitionRequestInfo.getPartition());
EnumSet<StoreGetOptions> storeGetOptions = EnumSet.noneOf(StoreGetOptions.class);
// Currently only one option is supported.
if (getRequest.getGetOption() == GetOption.Include_Expired_Blobs) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Expired);
}
if (getRequest.getGetOption() == GetOption.Include_Deleted_Blobs) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted);
}
if (getRequest.getGetOption() == GetOption.Include_All) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted, StoreGetOptions.Store_Include_Expired);
}
StoreInfo info = storeToGet.get(partitionRequestInfo.getBlobIds(), storeGetOptions);
MessageFormatSend blobsToSend = new MessageFormatSend(info.getMessageReadSet(), getRequest.getMessageFormatFlag(), messageFormatMetrics, storeKeyFactory);
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), info.getMessageReadSetInfo(), blobsToSend.getMessageMetadataList());
messagesToSendList.add(blobsToSend);
partitionResponseInfoList.add(partitionResponseInfo);
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.getAuthorizationFailure.inc();
} else {
metrics.unExpectedStoreGetError.inc();
logInErrorLevel = true;
}
if (logInErrorLevel) {
logger.error("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
} else {
logger.trace("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
}
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
partitionResponseInfoList.add(partitionResponseInfo);
} catch (MessageFormatException e) {
logger.error("Message format exception on a get with error code " + e.getErrorCode() + " for partitionRequestInfo " + partitionRequestInfo, e);
if (e.getErrorCode() == MessageFormatErrorCodes.Data_Corrupt) {
metrics.dataCorruptError.inc();
} else if (e.getErrorCode() == MessageFormatErrorCodes.Unknown_Format_Version) {
metrics.unknownFormatError.inc();
}
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getMessageFormatErrorMapping(e.getErrorCode()));
partitionResponseInfoList.add(partitionResponseInfo);
}
}
}
CompositeSend compositeSend = new CompositeSend(messagesToSendList);
response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, compositeSend, ServerErrorCode.No_Error);
} catch (Exception e) {
logger.error("Unknown exception for request " + getRequest, e);
response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", getRequest, response, processingTime);
if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
metrics.getBlobProcessingTimeInMs.update(processingTime);
metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
metrics.getBlobPropertiesProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
metrics.getBlobUserMetadataProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
metrics.getBlobInfoProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
metrics.getBlobAllProcessingTimeInMs.update(processingTime);
metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
}
}
sendGetResponse(requestResponseChannel, response, request, responseQueueTime, responseSendTime, responseTotalTime, totalTimeSpent, response.sizeInBytes(), getRequest.getMessageFormatFlag(), metrics);
}
use of com.github.ambry.commons.ServerErrorCode in project ambry by linkedin.
the class ReplicaMetadataResponseInfo method readFrom.
public static ReplicaMetadataResponseInfo readFrom(DataInputStream stream, FindTokenFactory factory, ClusterMap clusterMap, short replicaMetadataResponseVersion) throws IOException {
PartitionId partitionId = clusterMap.getPartitionIdFromStream(stream);
ServerErrorCode error = ServerErrorCode.values()[stream.readShort()];
if (error != ServerErrorCode.No_Error) {
return new ReplicaMetadataResponseInfo(partitionId, error);
} else {
FindToken token = factory.getFindToken(stream);
Pair<List<MessageInfo>, List<MessageMetadata>> messageInfoAndMetadataList = MessageInfoAndMetadataListSerde.deserializeMessageInfoAndMetadataList(stream, clusterMap, getMessageInfoAndMetadataListSerDeVersion(replicaMetadataResponseVersion));
long remoteReplicaLag = stream.readLong();
return new ReplicaMetadataResponseInfo(partitionId, token, messageInfoAndMetadataList.getFirst(), remoteReplicaLag, replicaMetadataResponseVersion);
}
}
use of com.github.ambry.commons.ServerErrorCode in project ambry by linkedin.
the class AmbryRequests method handleDeleteRequest.
public void handleDeleteRequest(Request request) throws IOException, InterruptedException {
DeleteRequest deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.deleteBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
DeleteResponse response = null;
try {
ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new DeleteMessageFormatInputStream(deleteRequest.getBlobId(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
MessageInfo info = new MessageInfo(deleteRequest.getBlobId(), stream.getSize(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToDelete = storageManager.getStore(deleteRequest.getBlobId().getPartition());
storeToDelete.delete(writeset);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), deleteRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.deleteAuthorizationFailure.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreDeleteError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
} else {
logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
}
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for delete request " + deleteRequest, e);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreDeleteError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
metrics.deleteBlobProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.commons.ServerErrorCode in project ambry by linkedin.
the class AmbryRequests method handlePutRequest.
public void handlePutRequest(Request request) throws IOException, InterruptedException {
PutRequest.ReceivedPutRequest receivedRequest = PutRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.putBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
PutResponse response = null;
try {
ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
MessageInfo info = new MessageInfo(receivedRequest.getBlobId(), stream.getSize(), false, Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), receivedRequest.getBlobProperties().getTimeToLiveInSeconds()), receivedRequest.getCrc(), receivedRequest.getBlobProperties().getAccountId(), receivedRequest.getBlobProperties().getContainerId(), receivedRequest.getBlobProperties().getCreationTimeInMs());
ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToPut = storageManager.getStore(receivedRequest.getBlobId().getPartition());
storeToPut.put(writeset);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
if (notification != null) {
notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
logger.error("Store exception on a put with error code " + e.getErrorCode() + " for request " + receivedRequest, e);
if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
metrics.idAlreadyExistError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.IOError) {
metrics.storeIOError.inc();
} else {
metrics.unExpectedStorePutError.inc();
}
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception on a put for request " + receivedRequest, e);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
metrics.putBlobProcessingTimeInMs.update(processingTime);
metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
}
sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
Aggregations