Search in sources :

Example 1 with ServerErrorCode

use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.

the class AmbryRequests method handleUndeleteRequest.

@Override
public void handleUndeleteRequest(NetworkRequest request) throws IOException, InterruptedException {
    UndeleteRequest undeleteRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handleUndeleteRequest is called when frontends are talking to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        undeleteRequest = (UndeleteRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
    } else {
        undeleteRequest = UndeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.undeleteBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.undeleteBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    UndeleteResponse response = null;
    Store storeToUndelete;
    StoreKey convertedStoreKey;
    try {
        convertedStoreKey = getConvertedStoreKeys(Collections.singletonList(undeleteRequest.getBlobId())).get(0);
        ServerErrorCode error = validateRequest(undeleteRequest.getBlobId().getPartition(), RequestOrResponseType.UndeleteRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating undelete request failed with error {} for request {}", error, undeleteRequest);
            response = new UndeleteResponse(undeleteRequest.getCorrelationId(), undeleteRequest.getClientId(), error);
        } else {
            BlobId convertedBlobId = (BlobId) convertedStoreKey;
            MessageInfo info = new MessageInfo.Builder(convertedBlobId, -1, convertedBlobId.getAccountId(), convertedBlobId.getContainerId(), undeleteRequest.getOperationTimeMs()).isUndeleted(true).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            storeToUndelete = storeManager.getStore(undeleteRequest.getBlobId().getPartition());
            short lifeVersion = storeToUndelete.undelete(info);
            response = new UndeleteResponse(undeleteRequest.getCorrelationId(), undeleteRequest.getClientId(), lifeVersion);
            if (notification != null) {
                notification.onBlobReplicaUndeleted(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        boolean logInErrorLevel = false;
        if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
            metrics.idNotFoundError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
            metrics.ttlExpiredError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted_Permanently) {
            metrics.idDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Life_Version_Conflict) {
            metrics.lifeVersionConflictError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Not_Deleted) {
            metrics.idNotDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Undeleted) {
            metrics.idUndeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
            metrics.undeleteAuthorizationFailure.inc();
        } else {
            logInErrorLevel = true;
            metrics.unExpectedStoreUndeleteError.inc();
        }
        if (logInErrorLevel) {
            logger.error("Store exception on a undelete with error code {} for request {}", e.getErrorCode(), undeleteRequest, e);
        } else {
            logger.trace("Store exception on a undelete with error code {} for request {}", e.getErrorCode(), undeleteRequest, e);
        }
        if (e.getErrorCode() == StoreErrorCodes.ID_Undeleted) {
            if (e instanceof IdUndeletedStoreException) {
                response = new UndeleteResponse(undeleteRequest.getCorrelationId(), undeleteRequest.getClientId(), ((IdUndeletedStoreException) e).getLifeVersion(), ServerErrorCode.Blob_Already_Undeleted);
            } else {
                response = new UndeleteResponse(undeleteRequest.getCorrelationId(), undeleteRequest.getClientId(), MessageInfo.LIFE_VERSION_FROM_FRONTEND, ServerErrorCode.Blob_Already_Undeleted);
            }
        } else {
            response = new UndeleteResponse(undeleteRequest.getCorrelationId(), undeleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
        }
    } catch (Exception e) {
        logger.error("Unknown exception for undelete request {}", undeleteRequest, e);
        response = new UndeleteResponse(undeleteRequest.getCorrelationId(), undeleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
        metrics.unExpectedStoreUndeleteError.inc();
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", undeleteRequest, response, processingTime);
        metrics.undeleteBlobProcessingTimeInMs.update(processingTime);
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.undeleteBlobResponseQueueTimeInMs, metrics.undeleteBlobSendTimeInMs, metrics.undeleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) Store(com.github.ambry.store.Store) DataInputStream(java.io.DataInputStream) StoreKey(com.github.ambry.store.StoreKey) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) BlobId(com.github.ambry.commons.BlobId)

Example 2 with ServerErrorCode

use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.

the class AmbryRequests method handleGetRequest.

@Override
public void handleGetRequest(NetworkRequest request) throws IOException, InterruptedException {
    GetRequest getRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handleGetRequest is called when frontends are reading from Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        getRequest = (GetRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
    } else {
        getRequest = GetRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    }
    Histogram responseQueueTime = null;
    Histogram responseSendTime = null;
    Histogram responseTotalTime = null;
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    boolean isReplicaRequest = getRequest.getClientId().startsWith(GetRequest.Replication_Client_Id_Prefix);
    if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
        metrics.getBlobRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobRequestRate.mark();
        responseQueueTime = metrics.getBlobResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobSendTimeInMs;
        responseTotalTime = metrics.getBlobTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
        metrics.getBlobPropertiesRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobPropertiesRequestRate.mark();
        responseQueueTime = metrics.getBlobPropertiesResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobPropertiesSendTimeInMs;
        responseTotalTime = metrics.getBlobPropertiesTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
        metrics.getBlobUserMetadataRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobUserMetadataRequestRate.mark();
        responseQueueTime = metrics.getBlobUserMetadataResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobUserMetadataSendTimeInMs;
        responseTotalTime = metrics.getBlobUserMetadataTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
        metrics.getBlobInfoRequestQueueTimeInMs.update(requestQueueTime);
        metrics.getBlobInfoRequestRate.mark();
        responseQueueTime = metrics.getBlobInfoResponseQueueTimeInMs;
        responseSendTime = metrics.getBlobInfoSendTimeInMs;
        responseTotalTime = metrics.getBlobInfoTotalTimeInMs;
    } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
        if (isReplicaRequest) {
            metrics.getBlobAllByReplicaRequestQueueTimeInMs.update(requestQueueTime);
            metrics.getBlobAllByReplicaRequestRate.mark();
            responseQueueTime = metrics.getBlobAllByReplicaResponseQueueTimeInMs;
            responseSendTime = metrics.getBlobAllByReplicaSendTimeInMs;
            responseTotalTime = metrics.getBlobAllByReplicaTotalTimeInMs;
        } else {
            metrics.getBlobAllRequestQueueTimeInMs.update(requestQueueTime);
            metrics.getBlobAllRequestRate.mark();
            responseQueueTime = metrics.getBlobAllResponseQueueTimeInMs;
            responseSendTime = metrics.getBlobAllSendTimeInMs;
            responseTotalTime = metrics.getBlobAllTotalTimeInMs;
        }
    }
    long startTime = SystemTime.getInstance().milliseconds();
    GetResponse response = null;
    try {
        List<Send> messagesToSendList = new ArrayList<>(getRequest.getPartitionInfoList().size());
        List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<>(getRequest.getPartitionInfoList().size());
        for (PartitionRequestInfo partitionRequestInfo : getRequest.getPartitionInfoList()) {
            ServerErrorCode error = validateRequest(partitionRequestInfo.getPartition(), RequestOrResponseType.GetRequest, false);
            if (error != ServerErrorCode.No_Error) {
                logger.error("Validating get request failed for partition {} with error {}", partitionRequestInfo.getPartition(), error);
                PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), error);
                partitionResponseInfoList.add(partitionResponseInfo);
            } else {
                try {
                    Store storeToGet = storeManager.getStore(partitionRequestInfo.getPartition());
                    EnumSet<StoreGetOptions> storeGetOptions = EnumSet.noneOf(StoreGetOptions.class);
                    // Currently only one option is supported.
                    if (getRequest.getGetOption() == GetOption.Include_Expired_Blobs) {
                        storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Expired);
                    }
                    if (getRequest.getGetOption() == GetOption.Include_Deleted_Blobs) {
                        storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted);
                    }
                    if (getRequest.getGetOption() == GetOption.Include_All) {
                        storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted, StoreGetOptions.Store_Include_Expired);
                    }
                    List<StoreKey> convertedStoreKeys = getConvertedStoreKeys(partitionRequestInfo.getBlobIds());
                    List<StoreKey> dedupedStoreKeys = convertedStoreKeys.size() > 1 ? convertedStoreKeys.stream().distinct().collect(Collectors.toList()) : convertedStoreKeys;
                    StoreInfo info = storeToGet.get(dedupedStoreKeys, storeGetOptions);
                    MessageFormatSend blobsToSend = new MessageFormatSend(info.getMessageReadSet(), getRequest.getMessageFormatFlag(), messageFormatMetrics, storeKeyFactory);
                    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), info.getMessageReadSetInfo(), blobsToSend.getMessageMetadataList());
                    messagesToSendList.add(blobsToSend);
                    partitionResponseInfoList.add(partitionResponseInfo);
                } catch (StoreException e) {
                    boolean logInErrorLevel = false;
                    if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
                        metrics.idNotFoundError.inc();
                    } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
                        metrics.ttlExpiredError.inc();
                    } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
                        metrics.idDeletedError.inc();
                    } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
                        metrics.getAuthorizationFailure.inc();
                    } else {
                        metrics.unExpectedStoreGetError.inc();
                        logInErrorLevel = true;
                    }
                    if (logInErrorLevel) {
                        logger.error("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
                    } else {
                        logger.trace("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
                    }
                    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
                    partitionResponseInfoList.add(partitionResponseInfo);
                } catch (MessageFormatException e) {
                    logger.error("Message format exception on a get with error code {} for partitionRequestInfo {}", e.getErrorCode(), partitionRequestInfo, e);
                    if (e.getErrorCode() == MessageFormatErrorCodes.Data_Corrupt) {
                        metrics.dataCorruptError.inc();
                    } else if (e.getErrorCode() == MessageFormatErrorCodes.Unknown_Format_Version) {
                        metrics.unknownFormatError.inc();
                    }
                    PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getMessageFormatErrorMapping(e.getErrorCode()));
                    partitionResponseInfoList.add(partitionResponseInfo);
                }
            }
        }
        CompositeSend compositeSend = new CompositeSend(messagesToSendList);
        response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, compositeSend, ServerErrorCode.No_Error);
    } catch (Exception e) {
        logger.error("Unknown exception for request {}", getRequest, e);
        response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), ServerErrorCode.Unknown_Error);
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", getRequest, response, processingTime);
        long responseSize = response != null ? response.sizeInBytes() : 0;
        if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
            metrics.getBlobProcessingTimeInMs.update(processingTime);
            metrics.updateGetBlobProcessingTimeBySize(responseSize, processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
            metrics.getBlobPropertiesProcessingTimeInMs.update(processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
            metrics.getBlobUserMetadataProcessingTimeInMs.update(processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
            metrics.getBlobInfoProcessingTimeInMs.update(processingTime);
        } else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
            if (isReplicaRequest) {
                metrics.getBlobAllByReplicaProcessingTimeInMs.update(processingTime);
                // client id now has dc name at the end, for example: ClientId=replication-fetch-abc.example.com[dc1]
                String[] clientStrs = getRequest.getClientId().split("\\[");
                if (clientStrs.length > 1) {
                    String clientDc = clientStrs[1].substring(0, clientStrs[1].length() - 1);
                    if (!currentNode.getDatacenterName().equals(clientDc)) {
                        metrics.updateCrossColoFetchBytesRate(clientDc, responseSize);
                    }
                }
            } else {
                metrics.getBlobAllProcessingTimeInMs.update(processingTime);
                metrics.updateGetBlobProcessingTimeBySize(responseSize, processingTime);
            }
        }
    }
    sendGetResponse(requestResponseChannel, response, request, responseQueueTime, responseSendTime, responseTotalTime, totalTimeSpent, response.sizeInBytes(), getRequest.getMessageFormatFlag(), metrics);
}
Also used : Histogram(com.codahale.metrics.Histogram) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) StoreInfo(com.github.ambry.store.StoreInfo) MessageFormatSend(com.github.ambry.messageformat.MessageFormatSend) Send(com.github.ambry.network.Send) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) StoreGetOptions(com.github.ambry.store.StoreGetOptions) MessageFormatSend(com.github.ambry.messageformat.MessageFormatSend) DataInputStream(java.io.DataInputStream) StoreKey(com.github.ambry.store.StoreKey) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest)

Example 3 with ServerErrorCode

use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.

the class AmbryRequests method handleReplicaMetadataRequest.

@Override
public void handleReplicaMetadataRequest(NetworkRequest request) throws IOException, InterruptedException {
    if (replicationEngine == null) {
        throw new UnsupportedOperationException("Replication not supported on this node.");
    }
    ReplicaMetadataRequest replicaMetadataRequest = ReplicaMetadataRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap, findTokenHelper);
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.replicaMetadataRequestQueueTimeInMs.update(requestQueueTime);
    metrics.replicaMetadataRequestRate.mark();
    List<ReplicaMetadataRequestInfo> replicaMetadataRequestInfoList = replicaMetadataRequest.getReplicaMetadataRequestInfoList();
    int partitionCnt = replicaMetadataRequestInfoList.size();
    long startTimeInMs = SystemTime.getInstance().milliseconds();
    ReplicaMetadataResponse response = null;
    try {
        List<ReplicaMetadataResponseInfo> replicaMetadataResponseList = new ArrayList<>(partitionCnt);
        for (ReplicaMetadataRequestInfo replicaMetadataRequestInfo : replicaMetadataRequestInfoList) {
            long partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
            PartitionId partitionId = replicaMetadataRequestInfo.getPartitionId();
            ReplicaType replicaType = replicaMetadataRequestInfo.getReplicaType();
            ServerErrorCode error = validateRequest(partitionId, RequestOrResponseType.ReplicaMetadataRequest, false);
            logger.trace("{} Time used to validate metadata request: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
            if (error != ServerErrorCode.No_Error) {
                logger.error("Validating replica metadata request failed with error {} for partition {}", error, partitionId);
                ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, replicaType, error, ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
                replicaMetadataResponseList.add(replicaMetadataResponseInfo);
            } else {
                try {
                    FindToken findToken = replicaMetadataRequestInfo.getToken();
                    String hostName = replicaMetadataRequestInfo.getHostName();
                    String replicaPath = replicaMetadataRequestInfo.getReplicaPath();
                    Store store = storeManager.getStore(partitionId);
                    partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
                    FindInfo findInfo = store.findEntriesSince(findToken, replicaMetadataRequest.getMaxTotalSizeOfEntriesInBytes(), hostName, replicaPath);
                    logger.trace("{} Time used to find entry since: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
                    partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
                    long totalBytesRead = findInfo.getFindToken().getBytesRead();
                    replicationEngine.updateTotalBytesReadByRemoteReplica(partitionId, hostName, replicaPath, totalBytesRead);
                    logger.trace("{} Time used to update total bytes read: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
                    partitionStartTimeInMs = SystemTime.getInstance().milliseconds();
                    logger.trace("{} Time used to get remote replica lag in bytes: {}", partitionId, (SystemTime.getInstance().milliseconds() - partitionStartTimeInMs));
                    ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, replicaType, findInfo.getFindToken(), findInfo.getMessageEntries(), getRemoteReplicaLag(store, totalBytesRead), ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
                    if (replicaMetadataResponseInfo.getTotalSizeOfAllMessages() > 5 * replicaMetadataRequest.getMaxTotalSizeOfEntriesInBytes()) {
                        logger.debug("{} generated a metadata response {} where the cumulative size of messages is {}", replicaMetadataRequest, replicaMetadataResponseInfo, replicaMetadataResponseInfo.getTotalSizeOfAllMessages());
                        metrics.replicationResponseMessageSizeTooHigh.inc();
                    }
                    replicaMetadataResponseList.add(replicaMetadataResponseInfo);
                    metrics.replicaMetadataTotalSizeOfMessages.update(replicaMetadataResponseInfo.getTotalSizeOfAllMessages());
                } catch (StoreException e) {
                    logger.error("Store exception on a replica metadata request with error code {} for partition {}", e.getErrorCode(), partitionId, e);
                    if (e.getErrorCode() == StoreErrorCodes.IOError) {
                        metrics.storeIOError.inc();
                    } else {
                        metrics.unExpectedStoreFindEntriesError.inc();
                    }
                    ReplicaMetadataResponseInfo replicaMetadataResponseInfo = new ReplicaMetadataResponseInfo(partitionId, replicaType, ErrorMapping.getStoreErrorMapping(e.getErrorCode()), ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
                    replicaMetadataResponseList.add(replicaMetadataResponseInfo);
                }
            }
        }
        response = new ReplicaMetadataResponse(replicaMetadataRequest.getCorrelationId(), replicaMetadataRequest.getClientId(), ServerErrorCode.No_Error, replicaMetadataResponseList, ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
    } catch (Exception e) {
        logger.error("Unknown exception for request {}", replicaMetadataRequest, e);
        response = new ReplicaMetadataResponse(replicaMetadataRequest.getCorrelationId(), replicaMetadataRequest.getClientId(), ServerErrorCode.Unknown_Error, ReplicaMetadataResponse.getCompatibleResponseVersion(replicaMetadataRequest.getVersionId()));
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTimeInMs;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", replicaMetadataRequest, response, processingTime);
        logger.trace("{} {} processingTime {}", replicaMetadataRequest, response, processingTime);
        metrics.replicaMetadataRequestProcessingTimeInMs.update(processingTime);
        // client id now has dc name at the end, for example: ClientId=replication-metadata-abc.example.com[dc1]
        String[] clientStrs = replicaMetadataRequest.getClientId().split("\\[");
        if (clientStrs.length > 1) {
            String clientDc = clientStrs[1].substring(0, clientStrs[1].length() - 1);
            if (!currentNode.getDatacenterName().equals(clientDc)) {
                metrics.updateCrossColoMetadataExchangeBytesRate(clientDc, response != null ? response.sizeInBytes() : 0L);
            }
        }
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.replicaMetadataResponseQueueTimeInMs, metrics.replicaMetadataSendTimeInMs, metrics.replicaMetadataTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) DataInputStream(java.io.DataInputStream) PartitionId(com.github.ambry.clustermap.PartitionId) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) ReplicaType(com.github.ambry.clustermap.ReplicaType) FindToken(com.github.ambry.replication.FindToken) FindInfo(com.github.ambry.store.FindInfo)

Example 4 with ServerErrorCode

use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.

the class PartitionResponseInfo method readFrom.

public static PartitionResponseInfo readFrom(DataInputStream stream, ClusterMap map, short getResponseVersion) throws IOException {
    PartitionId partitionId = map.getPartitionIdFromStream(stream);
    MessageInfoAndMetadataListSerde messageInfoAndMetadataList = MessageInfoAndMetadataListSerde.deserializeMessageInfoAndMetadataList(stream, map, getMessageInfoAndMetadataListSerDeVersion(getResponseVersion));
    ServerErrorCode error = ServerErrorCode.values()[stream.readShort()];
    if (error != ServerErrorCode.No_Error) {
        return new PartitionResponseInfo(partitionId, new ArrayList<>(), new ArrayList<>(), error, getResponseVersion);
    } else {
        return new PartitionResponseInfo(partitionId, messageInfoAndMetadataList.getMessageInfoList(), messageInfoAndMetadataList.getMessageMetadataList(), ServerErrorCode.No_Error, getResponseVersion);
    }
}
Also used : PartitionId(com.github.ambry.clustermap.PartitionId) ServerErrorCode(com.github.ambry.server.ServerErrorCode)

Example 5 with ServerErrorCode

use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.

the class PutResponse method readFrom.

public static PutResponse readFrom(DataInputStream stream) throws IOException {
    RequestOrResponseType type = RequestOrResponseType.values()[stream.readShort()];
    if (type != RequestOrResponseType.PutResponse) {
        throw new IllegalArgumentException("The type of request response is not compatible: " + type);
    }
    Short versionId = stream.readShort();
    int correlationId = stream.readInt();
    String clientId = Utils.readIntString(stream);
    ServerErrorCode error = ServerErrorCode.values()[stream.readShort()];
    // ignore version for now
    return new PutResponse(correlationId, clientId, error);
}
Also used : ServerErrorCode(com.github.ambry.server.ServerErrorCode)

Aggregations

ServerErrorCode (com.github.ambry.server.ServerErrorCode)56 ArrayList (java.util.ArrayList)21 Test (org.junit.Test)20 DataInputStream (java.io.DataInputStream)16 BlobProperties (com.github.ambry.messageformat.BlobProperties)12 IOException (java.io.IOException)11 HashMap (java.util.HashMap)11 BlobId (com.github.ambry.commons.BlobId)10 VerifiableProperties (com.github.ambry.config.VerifiableProperties)10 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)9 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)9 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)8 LoggingNotificationSystem (com.github.ambry.commons.LoggingNotificationSystem)8 RouterConfig (com.github.ambry.config.RouterConfig)8 Map (java.util.Map)8 Properties (java.util.Properties)8 DataNodeId (com.github.ambry.clustermap.DataNodeId)7 PartitionId (com.github.ambry.clustermap.PartitionId)7 MessageInfo (com.github.ambry.store.MessageInfo)7 IdUndeletedStoreException (com.github.ambry.store.IdUndeletedStoreException)6