use of com.github.ambry.messageformat.MessageFormatWriteSet in project ambry by linkedin.
the class BlobStore method updateTtl.
@Override
public void updateTtl(List<MessageInfo> infosToUpdate) throws StoreException {
checkStarted();
checkDuplicates(infosToUpdate);
final Timer.Context context = metrics.ttlUpdateResponse.time();
try {
List<IndexValue> indexValuesToUpdate = new ArrayList<>();
List<Short> lifeVersions = new ArrayList<>();
Offset indexEndOffsetBeforeCheck = index.getCurrentEndOffset();
for (MessageInfo info : infosToUpdate) {
if (info.getExpirationTimeInMs() != Utils.Infinite_Time) {
throw new StoreException("BlobStore only supports removing the expiration time", StoreErrorCodes.Update_Not_Allowed);
}
IndexValue value = index.findKey(info.getStoreKey(), new FileSpan(index.getStartOffset(), indexEndOffsetBeforeCheck));
if (value == null) {
throw new StoreException("Cannot update TTL of " + info.getStoreKey() + " since it's not in the index", StoreErrorCodes.ID_Not_Found);
} else if (!info.getStoreKey().isAccountContainerMatch(value.getAccountId(), value.getContainerId())) {
if (config.storeValidateAuthorization) {
throw new StoreException("UPDATE authorization failure. Key: " + info.getStoreKey() + " AccountId in store: " + value.getAccountId() + " ContainerId in store: " + value.getContainerId(), StoreErrorCodes.Authorization_Failure);
} else {
logger.warn("UPDATE authorization failure. Key: {} AccountId in store: {} ContainerId in store: {}", info.getStoreKey(), value.getAccountId(), value.getContainerId());
metrics.ttlUpdateAuthorizationFailureCount.inc();
}
} else if (value.isDelete()) {
throw new StoreException("Cannot update TTL of " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
} else if (value.isTtlUpdate()) {
throw new StoreException("TTL of " + info.getStoreKey() + " is already updated in the index.", StoreErrorCodes.Already_Updated);
} else if (!IndexValue.hasLifeVersion(info.getLifeVersion()) && value.getExpiresAtMs() != Utils.Infinite_Time && value.getExpiresAtMs() < info.getOperationTimeMs() + ttlUpdateBufferTimeMs) {
// When the request is from replication, we don't care about the operation time.
throw new StoreException("TTL of " + info.getStoreKey() + " cannot be updated because it is too close to expiry. Op time (ms): " + info.getOperationTimeMs() + ". ExpiresAtMs: " + value.getExpiresAtMs(), StoreErrorCodes.Update_Not_Allowed);
}
indexValuesToUpdate.add(value);
lifeVersions.add(value.getLifeVersion());
}
synchronized (storeWriteLock) {
Offset currentIndexEndOffset = index.getCurrentEndOffset();
if (!currentIndexEndOffset.equals(indexEndOffsetBeforeCheck)) {
FileSpan fileSpan = new FileSpan(indexEndOffsetBeforeCheck, currentIndexEndOffset);
for (MessageInfo info : infosToUpdate) {
IndexValue value = index.findKey(info.getStoreKey(), fileSpan, EnumSet.allOf(PersistentIndex.IndexEntryType.class));
if (value != null) {
if (value.isDelete()) {
throw new StoreException("Cannot update TTL of " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
} else if (value.isTtlUpdate()) {
throw new StoreException("TTL of " + info.getStoreKey() + " is already updated in the index.", StoreErrorCodes.Already_Updated);
}
}
}
}
List<InputStream> inputStreams = new ArrayList<>(infosToUpdate.size());
List<MessageInfo> updatedInfos = new ArrayList<>(infosToUpdate.size());
int i = 0;
for (MessageInfo info : infosToUpdate) {
MessageFormatInputStream stream = new TtlUpdateMessageFormatInputStream(info.getStoreKey(), info.getAccountId(), info.getContainerId(), info.getExpirationTimeInMs(), info.getOperationTimeMs(), lifeVersions.get(i));
// we only need change the stream size.
updatedInfos.add(new MessageInfo(info.getStoreKey(), stream.getSize(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), info.getLifeVersion()));
inputStreams.add(stream);
i++;
}
Offset endOffsetOfLastMessage = log.getEndOffset();
MessageFormatWriteSet writeSet = new MessageFormatWriteSet(new SequenceInputStream(Collections.enumeration(inputStreams)), updatedInfos, false);
writeSet.writeTo(log);
logger.trace("Store : {} ttl update mark written to log", dataDir);
int correspondingPutIndex = 0;
for (MessageInfo info : updatedInfos) {
FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfLastMessage, info.getSize());
// Ttl update should aways use the same lifeVersion as it's previous value of the same key, that's why we are
// using LIFE_VERSION_FROM_FRONTEND here no matter the lifeVersion from the message info.
IndexValue ttlUpdateValue = index.markAsPermanent(info.getStoreKey(), fileSpan, null, info.getOperationTimeMs(), MessageInfo.LIFE_VERSION_FROM_FRONTEND);
endOffsetOfLastMessage = fileSpan.getEndOffset();
blobStoreStats.handleNewTtlUpdateEntry(info.getStoreKey(), ttlUpdateValue, indexValuesToUpdate.get(correspondingPutIndex++));
}
logger.trace("Store : {} ttl update has been marked in the index ", dataDir);
}
onSuccess();
} catch (StoreException e) {
if (e.getErrorCode() == StoreErrorCodes.IOError) {
onError();
}
throw e;
} catch (Exception e) {
throw new StoreException("Unknown error while trying to update ttl of blobs from store " + dataDir, e, StoreErrorCodes.Unknown_Error);
} finally {
context.stop();
}
}
use of com.github.ambry.messageformat.MessageFormatWriteSet in project ambry by linkedin.
the class StoreCopier method copy.
/**
* Copies data starting from {@code startToken} until all the data is copied.
* @param startToken the {@link FindToken} to start copying from. It is expected that start token does not cause
* the copier to attempt to copy blobs that have already been copied. If that happens, the boolean
* in the return value will be {@code true}.
* @return a {@link Pair} of the {@link FindToken} until which data has been copied and a {@link Boolean} indicating
* whether the source had problems that were skipped over - like duplicates ({@code true} indicates that there were).
* @throws Exception if there is any exception during processing
*/
public Pair<FindToken, Boolean> copy(FindToken startToken) throws Exception {
boolean sourceHasProblems = false;
FindToken lastToken;
FindToken token = startToken;
do {
lastToken = token;
FindInfo findInfo = src.findEntriesSince(lastToken, fetchSizeInBytes, null, null);
List<MessageInfo> messageInfos = findInfo.getMessageEntries();
for (Transformer transformer : transformers) {
transformer.warmup(messageInfos);
}
for (MessageInfo messageInfo : messageInfos) {
logger.trace("Processing {} - isDeleted: {}, isExpired {}", messageInfo.getStoreKey(), messageInfo.isDeleted(), messageInfo.isExpired());
if (!messageInfo.isExpired() && !messageInfo.isDeleted()) {
if (tgt.findMissingKeys(Collections.singletonList(messageInfo.getStoreKey())).size() == 1) {
StoreInfo storeInfo = src.get(Collections.singletonList(messageInfo.getStoreKey()), EnumSet.allOf(StoreGetOptions.class));
MessageReadSet readSet = storeInfo.getMessageReadSet();
if (readSet.sizeInBytes(0) > Integer.MAX_VALUE) {
throw new IllegalStateException("Cannot copy blobs whose size > Integer.MAX_VALUE");
}
int size = (int) readSet.sizeInBytes(0);
byte[] buf = new byte[size];
readSet.writeTo(0, new ByteBufferChannel(ByteBuffer.wrap(buf)), 0, size);
Message message = new Message(storeInfo.getMessageReadSetInfo().get(0), new ByteArrayInputStream(buf));
for (Transformer transformer : transformers) {
TransformationOutput tfmOutput = transformer.transform(message);
if (tfmOutput.getException() != null) {
throw tfmOutput.getException();
} else {
message = tfmOutput.getMsg();
}
if (message == null) {
break;
}
}
if (message == null) {
logger.trace("Dropping {} because the transformers did not return a message", messageInfo.getStoreKey());
continue;
}
MessageFormatWriteSet writeSet = new MessageFormatWriteSet(message.getStream(), Collections.singletonList(message.getMessageInfo()), false);
tgt.put(writeSet);
MessageInfo tgtMsgInfo = message.getMessageInfo();
if (tgtMsgInfo.isTtlUpdated()) {
MessageInfo updateMsgInfo = new MessageInfo(tgtMsgInfo.getStoreKey(), 0, false, true, tgtMsgInfo.getExpirationTimeInMs(), tgtMsgInfo.getAccountId(), tgtMsgInfo.getContainerId(), tgtMsgInfo.getOperationTimeMs());
tgt.updateTtl(Collections.singletonList(updateMsgInfo));
}
logger.trace("Copied {} as {}", messageInfo.getStoreKey(), tgtMsgInfo.getStoreKey());
} else if (!messageInfo.isTtlUpdated()) {
logger.warn("Found a duplicate entry for {} while copying data", messageInfo.getStoreKey());
sourceHasProblems = true;
}
}
}
token = findInfo.getFindToken();
double percentBytesRead = src.isEmpty() ? 100.0 : token.getBytesRead() * 100.0 / src.getSizeInBytes();
logger.info("[{}] [{}] {}% copied", Thread.currentThread().getName(), storeId, df.format(percentBytesRead));
} while (!token.equals(lastToken));
return new Pair<>(token, sourceHasProblems);
}
use of com.github.ambry.messageformat.MessageFormatWriteSet in project ambry by linkedin.
the class ReplicaThread method writeMessagesToLocalStoreAndAdvanceTokens.
/**
* Writes the messages (if any) to the local stores from the remote stores for the missing keys, and advances tokens.
* @param exchangeMetadataResponseList The list of metadata response from the remote node
* @param getResponse The {@link GetResponse} that contains the missing messages. This may be null if there are no
* missing messages to write as per the exchange metadata response. In that case this method will
* simply advance the tokens for every store.
* @param replicasToReplicatePerNode The list of remote replicas for the remote node
* @param remoteNode The remote node from which replication needs to happen
*/
private void writeMessagesToLocalStoreAndAdvanceTokens(List<ExchangeMetadataResponse> exchangeMetadataResponseList, GetResponse getResponse, List<RemoteReplicaInfo> replicasToReplicatePerNode, DataNodeId remoteNode) throws IOException {
int partitionResponseInfoIndex = 0;
long totalBytesFixed = 0;
long totalBlobsFixed = 0;
long startTime = SystemTime.getInstance().milliseconds();
for (int i = 0; i < exchangeMetadataResponseList.size(); i++) {
ExchangeMetadataResponse exchangeMetadataResponse = exchangeMetadataResponseList.get(i);
RemoteReplicaInfo remoteReplicaInfo = replicasToReplicatePerNode.get(i);
if (exchangeMetadataResponse.serverErrorCode == ServerErrorCode.No_Error) {
if (exchangeMetadataResponse.missingStoreKeys.size() > 0) {
PartitionResponseInfo partitionResponseInfo = getResponse.getPartitionResponseInfoList().get(partitionResponseInfoIndex);
responseHandler.onEvent(remoteReplicaInfo.getReplicaId(), partitionResponseInfo.getErrorCode());
partitionResponseInfoIndex++;
if (partitionResponseInfo.getPartition().compareTo(remoteReplicaInfo.getReplicaId().getPartitionId()) != 0) {
throw new IllegalStateException("The partition id from partitionResponseInfo " + partitionResponseInfo.getPartition() + " and from remoteReplicaInfo " + remoteReplicaInfo.getReplicaId().getPartitionId() + " are not the same");
}
if (partitionResponseInfo.getErrorCode() == ServerErrorCode.No_Error) {
try {
List<MessageInfo> messageInfoList = partitionResponseInfo.getMessageInfoList();
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Messages to fix: {} " + "Partition: {} Local mount path: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), exchangeMetadataResponse.missingStoreKeys, remoteReplicaInfo.getReplicaId().getPartitionId(), remoteReplicaInfo.getLocalReplicaId().getMountPath());
MessageFormatWriteSet writeset = null;
if (validateMessageStream) {
MessageSievingInputStream validMessageDetectionInputStream = new MessageSievingInputStream(getResponse.getInputStream(), messageInfoList, storeKeyFactory, metricRegistry);
if (validMessageDetectionInputStream.hasInvalidMessages()) {
replicationMetrics.incrementInvalidMessageError(partitionResponseInfo.getPartition());
logger.error("Out of " + (messageInfoList.size()) + " messages, " + (messageInfoList.size() - validMessageDetectionInputStream.getValidMessageInfoList().size()) + " invalid messages were found in message stream from " + remoteReplicaInfo.getReplicaId());
}
messageInfoList = validMessageDetectionInputStream.getValidMessageInfoList();
if (messageInfoList.size() == 0) {
logger.error("MessageInfoList is of size 0 as all messages are invalidated ");
} else {
writeset = new MessageFormatWriteSet(validMessageDetectionInputStream, messageInfoList, false);
remoteReplicaInfo.getLocalStore().put(writeset);
}
} else {
writeset = new MessageFormatWriteSet(getResponse.getInputStream(), messageInfoList, true);
remoteReplicaInfo.getLocalStore().put(writeset);
}
for (MessageInfo messageInfo : messageInfoList) {
totalBytesFixed += messageInfo.getSize();
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Message replicated: {} Partition: {} " + "Local mount path: {} Message size: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey(), remoteReplicaInfo.getReplicaId().getPartitionId(), remoteReplicaInfo.getLocalReplicaId().getMountPath(), messageInfo.getSize());
if (notification != null) {
notification.onBlobReplicaCreated(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
}
}
totalBlobsFixed += messageInfoList.size();
remoteReplicaInfo.setToken(exchangeMetadataResponse.remoteToken);
remoteReplicaInfo.setLocalLagFromRemoteInBytes(exchangeMetadataResponse.localLagFromRemoteInBytes);
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Token after speaking to remote node: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), exchangeMetadataResponse.remoteToken);
} catch (StoreException e) {
if (e.getErrorCode() != StoreErrorCodes.Already_Exist) {
replicationMetrics.updateLocalStoreError(remoteReplicaInfo.getReplicaId());
logger.error("Remote node: " + remoteNode + " Thread name: " + threadName + " Remote replica: " + remoteReplicaInfo.getReplicaId(), e);
}
}
} else if (partitionResponseInfo.getErrorCode() == ServerErrorCode.Blob_Deleted) {
replicationMetrics.blobDeletedOnGetCount.inc();
logger.trace("One of the blobs to GET is deleted: Remote node: {} Thread name: {} Remote replica: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId());
} else {
replicationMetrics.updateGetRequestError(remoteReplicaInfo.getReplicaId());
logger.error("Remote node: {} Thread name: {} Remote replica: {} Server error: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), partitionResponseInfo.getErrorCode());
}
} else {
// There are no missing keys. We just advance the token
remoteReplicaInfo.setToken(exchangeMetadataResponse.remoteToken);
remoteReplicaInfo.setLocalLagFromRemoteInBytes(exchangeMetadataResponse.localLagFromRemoteInBytes);
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Token after speaking to remote node: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), exchangeMetadataResponse.remoteToken);
}
}
}
long batchStoreWriteTime = SystemTime.getInstance().milliseconds() - startTime;
replicationMetrics.updateBatchStoreWriteTime(batchStoreWriteTime, totalBytesFixed, totalBlobsFixed, replicatingFromRemoteColo, replicatingOverSsl, datacenterName);
}
use of com.github.ambry.messageformat.MessageFormatWriteSet in project ambry by linkedin.
the class ReplicaThread method processReplicaMetadataResponse.
/**
* Takes the missing keys and the message list from the remote store and identifies messages that are deleted
* on the remote store and updates them locally. Also, if the message that is missing is deleted in the remote
* store, we remove the message from the list of missing keys
* @param missingStoreKeys The list of keys missing from the local store
* @param replicaMetadataResponseInfo The replica metadata response from the remote store
* @param remoteReplicaInfo The remote replica that is being replicated from
* @param remoteNode The remote node from which replication needs to happen
* @throws IOException
* @throws StoreException
* @throws MessageFormatException
*/
private void processReplicaMetadataResponse(Set<StoreKey> missingStoreKeys, ReplicaMetadataResponseInfo replicaMetadataResponseInfo, RemoteReplicaInfo remoteReplicaInfo, DataNodeId remoteNode) throws IOException, StoreException, MessageFormatException {
long startTime = SystemTime.getInstance().milliseconds();
List<MessageInfo> messageInfoList = replicaMetadataResponseInfo.getMessageInfoList();
for (MessageInfo messageInfo : messageInfoList) {
BlobId blobId = (BlobId) messageInfo.getStoreKey();
if (remoteReplicaInfo.getLocalReplicaId().getPartitionId().compareTo(blobId.getPartition()) != 0) {
throw new IllegalStateException("Blob id is not in the expected partition Actual partition " + blobId.getPartition() + " Expected partition " + remoteReplicaInfo.getLocalReplicaId().getPartitionId());
}
if (!missingStoreKeys.contains(messageInfo.getStoreKey())) {
// deleted yet locally
if (messageInfo.isDeleted() && !remoteReplicaInfo.getLocalStore().isKeyDeleted(messageInfo.getStoreKey())) {
MessageFormatInputStream deleteStream = new DeleteMessageFormatInputStream(messageInfo.getStoreKey(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs());
MessageInfo info = new MessageInfo(messageInfo.getStoreKey(), deleteStream.getSize(), true, messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs());
ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(deleteStream, infoList, false);
try {
remoteReplicaInfo.getLocalStore().delete(writeset);
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key deleted. mark for deletion id: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
} catch (StoreException e) {
// messages are received from different replicas around the same time.
if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key already deleted: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
} else {
throw e;
}
}
// as long as the Delete is guaranteed to have taken effect locally.
if (notification != null) {
notification.onBlobReplicaDeleted(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
}
}
} else {
if (messageInfo.isDeleted()) {
// if the key is not present locally and if the remote replica has the message in deleted state,
// it is not considered missing locally.
missingStoreKeys.remove(messageInfo.getStoreKey());
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key in deleted state remotely: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
// as long as the Delete is guaranteed to have taken effect locally.
if (notification != null) {
notification.onBlobReplicaDeleted(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
}
} else if (messageInfo.isExpired()) {
// if the key is not present locally and if the remote replica has the key as expired,
// it is not considered missing locally.
missingStoreKeys.remove(messageInfo.getStoreKey());
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key in expired state remotely {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
}
}
}
if (replicatingFromRemoteColo) {
replicationMetrics.interColoProcessMetadataResponseTime.get(datacenterName).update(SystemTime.getInstance().milliseconds() - startTime);
} else {
replicationMetrics.intraColoProcessMetadataResponseTime.update(SystemTime.getInstance().milliseconds() - startTime);
}
}
use of com.github.ambry.messageformat.MessageFormatWriteSet in project ambry by linkedin.
the class AmbryRequests method handleDeleteRequest.
public void handleDeleteRequest(Request request) throws IOException, InterruptedException {
DeleteRequest deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.deleteBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
DeleteResponse response = null;
try {
ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new DeleteMessageFormatInputStream(deleteRequest.getBlobId(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
MessageInfo info = new MessageInfo(deleteRequest.getBlobId(), stream.getSize(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToDelete = storageManager.getStore(deleteRequest.getBlobId().getPartition());
storeToDelete.delete(writeset);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), deleteRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.deleteAuthorizationFailure.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreDeleteError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
} else {
logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
}
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for delete request " + deleteRequest, e);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreDeleteError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
metrics.deleteBlobProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
Aggregations