use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class AmbryRequests method handleReplicaMetadataRequest.
public void handleReplicaMetadataRequest(Request request) throws IOException, InterruptedException {
ReplicaMetadataRequest replicaMetadataRequest = ReplicaMetadataRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap, findTokenFactory);
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.replicaMetadataRequestQueueTimeInMs.update(requestQueueTime);
metrics.replicaMetadataRequestRate.mark();
List<ReplicaMetadataRequestInfo> replicaMetadataRequestInfoList = replicaMetadataRequest.getReplicaMetadataRequestInfoList();
int partitionCnt = replicaMetadataRequestInfoList.size();
long startTimeInMs = SystemTime.getInstance().milliseconds();
ReplicaMetadataResponse response = null;
try {
List<ReplicaMetadataResponseInfo> replicaMetadataResponseList = new ArrayList<ReplicaMetadataResponseInfo>(partitionCnt);
for (ReplicaMetadataRequestInfo replicaMetadataRequestInfo : replicaMetadataRequestInfoList) {
long partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
PartitionId partitionId = replicaMetadataRequestInfo.getPartitionId();
ServerErrorCode error = validateRequest(partitionId, RequestOrResponseType.ReplicaMetadataRequest);
logger.trace("{} Time used to validate metadata request: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
if (error != ServerErrorCode.No_Error) {
logger.error("Validating replica metadata request failed with error {} for partition {}", error, partitionId);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, error);
replicaMetadataResponseList.add(replicaMetadataResponseInfo);
} else {
try {
FindToken findToken = replicaMetadataRequestInfo.getToken();
String hostName = replicaMetadataRequestInfo.getHostName();
String replicaPath = replicaMetadataRequestInfo.getReplicaPath();
Store store = storageManager.getStore(partitionId);
partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
FindInfo findInfo = store.findEntriesSince(findToken, replicaMetadataRequest.getMaxTotalSizeOfEntriesInBytes());
logger.trace("{} Time used to find entry since: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
replicationManager.updateTotalBytesReadByRemoteReplica(partitionId, hostName, replicaPath, findInfo.getFindToken().getBytesRead());
logger.trace("{} Time used to update total bytes read: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
long remoteReplicaLagInBytes = replicationManager.getRemoteReplicaLagFromLocalInBytes(partitionId, hostName, replicaPath);
logger.trace("{} Time used to get remote replica lag in bytes: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, findInfo.getFindToken(), findInfo.getMessageEntries(), remoteReplicaLagInBytes);
replicaMetadataResponseList.add(replicaMetadataResponseInfo);
} catch (StoreException e) {
logger.error("Store exception on a replica metadata request with error code " + e.getErrorCode() + " for partition " + partitionId, e);
if (e.getErrorCode() == StoreErrorCodes.IOError) {
metrics.storeIOError.inc();
} else {
metrics.unExpectedStoreFindEntriesError.inc();
}
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
replicaMetadataResponseList.add(replicaMetadataResponseInfo);
}
}
}
response = new ReplicaMetadataResponse(replicaMetadataRequest.getCorrelationId(), replicaMetadataRequest.getClientId(), ServerErrorCode.No_Error, replicaMetadataResponseList);
} catch (Exception e) {
logger.error("Unknown exception for request " + replicaMetadataRequest, e);
response = new ReplicaMetadataResponse(replicaMetadataRequest.getCorrelationId(), replicaMetadataRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTimeInMs;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", replicaMetadataRequest, response, processingTime);
logger.trace("{} {} processingTime {}", replicaMetadataRequest, response, processingTime);
metrics.replicaMetadataRequestProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.replicaMetadataResponseQueueTimeInMs, metrics.replicaMetadataSendTimeInMs, metrics.replicaMetadataTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class AmbryRequests method handleGetRequest.
public void handleGetRequest(Request request) throws IOException, InterruptedException {
GetRequest getRequest = GetRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
Histogram responseQueueTime = null;
Histogram responseSendTime = null;
Histogram responseTotalTime = null;
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
metrics.getBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobRequestRate.mark();
responseQueueTime = metrics.getBlobResponseQueueTimeInMs;
responseSendTime = metrics.getBlobSendTimeInMs;
responseTotalTime = metrics.getBlobTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
metrics.getBlobPropertiesRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobPropertiesRequestRate.mark();
responseQueueTime = metrics.getBlobPropertiesResponseQueueTimeInMs;
responseSendTime = metrics.getBlobPropertiesSendTimeInMs;
responseTotalTime = metrics.getBlobPropertiesTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
metrics.getBlobUserMetadataRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobUserMetadataRequestRate.mark();
responseQueueTime = metrics.getBlobUserMetadataResponseQueueTimeInMs;
responseSendTime = metrics.getBlobUserMetadataSendTimeInMs;
responseTotalTime = metrics.getBlobUserMetadataTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
metrics.getBlobInfoRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobInfoRequestRate.mark();
responseQueueTime = metrics.getBlobInfoResponseQueueTimeInMs;
responseSendTime = metrics.getBlobInfoSendTimeInMs;
responseTotalTime = metrics.getBlobInfoTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
metrics.getBlobAllRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobAllRequestRate.mark();
responseQueueTime = metrics.getBlobAllResponseQueueTimeInMs;
responseSendTime = metrics.getBlobAllSendTimeInMs;
responseTotalTime = metrics.getBlobAllTotalTimeInMs;
}
long startTime = SystemTime.getInstance().milliseconds();
GetResponse response = null;
try {
List<Send> messagesToSendList = new ArrayList<Send>(getRequest.getPartitionInfoList().size());
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>(getRequest.getPartitionInfoList().size());
for (PartitionRequestInfo partitionRequestInfo : getRequest.getPartitionInfoList()) {
ServerErrorCode error = validateRequest(partitionRequestInfo.getPartition(), RequestOrResponseType.GetRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating get request failed for partition {} with error {}", partitionRequestInfo.getPartition(), error);
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), error);
partitionResponseInfoList.add(partitionResponseInfo);
} else {
try {
Store storeToGet = storageManager.getStore(partitionRequestInfo.getPartition());
EnumSet<StoreGetOptions> storeGetOptions = EnumSet.noneOf(StoreGetOptions.class);
// Currently only one option is supported.
if (getRequest.getGetOption() == GetOption.Include_Expired_Blobs) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Expired);
}
if (getRequest.getGetOption() == GetOption.Include_Deleted_Blobs) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted);
}
if (getRequest.getGetOption() == GetOption.Include_All) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted, StoreGetOptions.Store_Include_Expired);
}
StoreInfo info = storeToGet.get(partitionRequestInfo.getBlobIds(), storeGetOptions);
MessageFormatSend blobsToSend = new MessageFormatSend(info.getMessageReadSet(), getRequest.getMessageFormatFlag(), messageFormatMetrics, storeKeyFactory);
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), info.getMessageReadSetInfo(), blobsToSend.getMessageMetadataList());
messagesToSendList.add(blobsToSend);
partitionResponseInfoList.add(partitionResponseInfo);
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.getAuthorizationFailure.inc();
} else {
metrics.unExpectedStoreGetError.inc();
logInErrorLevel = true;
}
if (logInErrorLevel) {
logger.error("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
} else {
logger.trace("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
}
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
partitionResponseInfoList.add(partitionResponseInfo);
} catch (MessageFormatException e) {
logger.error("Message format exception on a get with error code " + e.getErrorCode() + " for partitionRequestInfo " + partitionRequestInfo, e);
if (e.getErrorCode() == MessageFormatErrorCodes.Data_Corrupt) {
metrics.dataCorruptError.inc();
} else if (e.getErrorCode() == MessageFormatErrorCodes.Unknown_Format_Version) {
metrics.unknownFormatError.inc();
}
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getMessageFormatErrorMapping(e.getErrorCode()));
partitionResponseInfoList.add(partitionResponseInfo);
}
}
}
CompositeSend compositeSend = new CompositeSend(messagesToSendList);
response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, compositeSend, ServerErrorCode.No_Error);
} catch (Exception e) {
logger.error("Unknown exception for request " + getRequest, e);
response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", getRequest, response, processingTime);
if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
metrics.getBlobProcessingTimeInMs.update(processingTime);
metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
metrics.getBlobPropertiesProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
metrics.getBlobUserMetadataProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
metrics.getBlobInfoProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
metrics.getBlobAllProcessingTimeInMs.update(processingTime);
metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
}
}
sendGetResponse(requestResponseChannel, response, request, responseQueueTime, responseSendTime, responseTotalTime, totalTimeSpent, response.sizeInBytes(), getRequest.getMessageFormatFlag(), metrics);
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class StatsManager method collectAndAggregate.
/**
* Fetch and aggregate stats from a given {@link Store}
* @param aggregatedSnapshot the {@link StatsSnapshot} to hold the aggregated result
* @param partitionId specifies the {@link Store} to be fetched from
* @param unreachableStores a {@link List} containing partition Ids that were unable to successfully fetch from
*/
void collectAndAggregate(StatsSnapshot aggregatedSnapshot, PartitionId partitionId, List<String> unreachableStores) {
Store store = storageManager.getStore(partitionId);
if (store == null) {
unreachableStores.add(partitionId.toString());
} else {
try {
long fetchAndAggregatePerStoreStartTimeMs = time.milliseconds();
StatsSnapshot statsSnapshot = store.getStoreStats().getStatsSnapshot(time.milliseconds());
StatsSnapshot.aggregate(aggregatedSnapshot, statsSnapshot);
metrics.fetchAndAggregateTimePerStoreMs.update(time.milliseconds() - fetchAndAggregatePerStoreStartTimeMs);
} catch (StoreException e) {
unreachableStores.add(partitionId.toString());
}
}
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class StatsManager method collectAndAggregateAccountStorageStats.
/**
* Fetch and aggregate account stats from a given {@link Store}
* @param hostStorageStatsMap map from partition id to container storage stats.
* @param partitionId specifies the {@link Store} to be fetched from
* @param unreachablePartitions a {@link List} containing partition Ids that were unable to successfully fetch from
*/
void collectAndAggregateAccountStorageStats(Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> hostStorageStatsMap, PartitionId partitionId, List<PartitionId> unreachablePartitions) {
Store store = storageManager.getStore(partitionId, false);
if (store == null) {
unreachablePartitions.add(partitionId);
} else {
try {
long fetchAndAggregatePerStoreStartTimeMs = time.milliseconds();
StoreStats storeStats = store.getStoreStats();
Map<Short, Map<Short, ContainerStorageStats>> containerStatsMap = storeStats.getContainerStorageStats(time.milliseconds(), publishExcludeAccountIds);
hostStorageStatsMap.put(partitionId.getId(), containerStatsMap);
metrics.fetchAndAggregateTimePerStoreMs.update(time.milliseconds() - fetchAndAggregatePerStoreStartTimeMs);
// update delete tombstone stats
updateDeleteTombstoneStats(storeStats);
} catch (StoreException e) {
unreachablePartitions.add(partitionId);
}
}
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class StatsManager method collectAndAggregatePartitionClassStorageStats.
/**
* Fetch and aggregate partition class stats from a given {@link Store}
* @param hostPartitionClassStorageStatsMap map from partition class to all partition storage stats.
* @param partitionId specifies the {@link Store} to be fetched from
* @param unreachablePartitions a {@link List} containing partition Ids that were unable to successfully fetch from
*/
void collectAndAggregatePartitionClassStorageStats(Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> hostPartitionClassStorageStatsMap, PartitionId partitionId, List<PartitionId> unreachablePartitions) {
Store store = storageManager.getStore(partitionId, false);
if (store == null) {
unreachablePartitions.add(partitionId);
} else {
try {
long fetchAndAggregatePerStoreStartTimeMs = time.milliseconds();
StoreStats storeStats = store.getStoreStats();
Map<Short, Map<Short, ContainerStorageStats>> containerStatsMap = storeStats.getContainerStorageStats(time.milliseconds(), publishExcludeAccountIds);
String partitionClassName = partitionId.getPartitionClass();
hostPartitionClassStorageStatsMap.computeIfAbsent(partitionClassName, k -> new HashMap<>()).put(partitionId.getId(), containerStatsMap);
metrics.fetchAndAggregateTimePerStoreMs.update(time.milliseconds() - fetchAndAggregatePerStoreStartTimeMs);
} catch (StoreException e) {
unreachablePartitions.add(partitionId);
}
}
}
Aggregations