Search in sources :

Example 11 with MessageFormatInputStream

use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.

the class BlobStore method updateTtl.

@Override
public void updateTtl(List<MessageInfo> infosToUpdate) throws StoreException {
    checkStarted();
    checkDuplicates(infosToUpdate);
    final Timer.Context context = metrics.ttlUpdateResponse.time();
    try {
        List<IndexValue> indexValuesToUpdate = new ArrayList<>();
        List<Short> lifeVersions = new ArrayList<>();
        Offset indexEndOffsetBeforeCheck = index.getCurrentEndOffset();
        for (MessageInfo info : infosToUpdate) {
            if (info.getExpirationTimeInMs() != Utils.Infinite_Time) {
                throw new StoreException("BlobStore only supports removing the expiration time", StoreErrorCodes.Update_Not_Allowed);
            }
            IndexValue value = index.findKey(info.getStoreKey(), new FileSpan(index.getStartOffset(), indexEndOffsetBeforeCheck));
            if (value == null) {
                throw new StoreException("Cannot update TTL of " + info.getStoreKey() + " since it's not in the index", StoreErrorCodes.ID_Not_Found);
            } else if (!info.getStoreKey().isAccountContainerMatch(value.getAccountId(), value.getContainerId())) {
                if (config.storeValidateAuthorization) {
                    throw new StoreException("UPDATE authorization failure. Key: " + info.getStoreKey() + " AccountId in store: " + value.getAccountId() + " ContainerId in store: " + value.getContainerId(), StoreErrorCodes.Authorization_Failure);
                } else {
                    logger.warn("UPDATE authorization failure. Key: {} AccountId in store: {} ContainerId in store: {}", info.getStoreKey(), value.getAccountId(), value.getContainerId());
                    metrics.ttlUpdateAuthorizationFailureCount.inc();
                }
            } else if (value.isDelete()) {
                throw new StoreException("Cannot update TTL of " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
            } else if (value.isTtlUpdate()) {
                throw new StoreException("TTL of " + info.getStoreKey() + " is already updated in the index.", StoreErrorCodes.Already_Updated);
            } else if (!IndexValue.hasLifeVersion(info.getLifeVersion()) && value.getExpiresAtMs() != Utils.Infinite_Time && value.getExpiresAtMs() < info.getOperationTimeMs() + ttlUpdateBufferTimeMs) {
                // When the request is from replication, we don't care about the operation time.
                throw new StoreException("TTL of " + info.getStoreKey() + " cannot be updated because it is too close to expiry. Op time (ms): " + info.getOperationTimeMs() + ". ExpiresAtMs: " + value.getExpiresAtMs(), StoreErrorCodes.Update_Not_Allowed);
            }
            indexValuesToUpdate.add(value);
            lifeVersions.add(value.getLifeVersion());
        }
        synchronized (storeWriteLock) {
            Offset currentIndexEndOffset = index.getCurrentEndOffset();
            if (!currentIndexEndOffset.equals(indexEndOffsetBeforeCheck)) {
                FileSpan fileSpan = new FileSpan(indexEndOffsetBeforeCheck, currentIndexEndOffset);
                for (MessageInfo info : infosToUpdate) {
                    IndexValue value = index.findKey(info.getStoreKey(), fileSpan, EnumSet.allOf(PersistentIndex.IndexEntryType.class));
                    if (value != null) {
                        if (value.isDelete()) {
                            throw new StoreException("Cannot update TTL of " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
                        } else if (value.isTtlUpdate()) {
                            throw new StoreException("TTL of " + info.getStoreKey() + " is already updated in the index.", StoreErrorCodes.Already_Updated);
                        }
                    }
                }
            }
            List<InputStream> inputStreams = new ArrayList<>(infosToUpdate.size());
            List<MessageInfo> updatedInfos = new ArrayList<>(infosToUpdate.size());
            int i = 0;
            for (MessageInfo info : infosToUpdate) {
                MessageFormatInputStream stream = new TtlUpdateMessageFormatInputStream(info.getStoreKey(), info.getAccountId(), info.getContainerId(), info.getExpirationTimeInMs(), info.getOperationTimeMs(), lifeVersions.get(i));
                // we only need change the stream size.
                updatedInfos.add(new MessageInfo(info.getStoreKey(), stream.getSize(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), info.getLifeVersion()));
                inputStreams.add(stream);
                i++;
            }
            Offset endOffsetOfLastMessage = log.getEndOffset();
            MessageFormatWriteSet writeSet = new MessageFormatWriteSet(new SequenceInputStream(Collections.enumeration(inputStreams)), updatedInfos, false);
            writeSet.writeTo(log);
            logger.trace("Store : {} ttl update mark written to log", dataDir);
            int correspondingPutIndex = 0;
            for (MessageInfo info : updatedInfos) {
                FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfLastMessage, info.getSize());
                // Ttl update should aways use the same lifeVersion as it's previous value of the same key, that's why we are
                // using LIFE_VERSION_FROM_FRONTEND here no matter the lifeVersion from the message info.
                IndexValue ttlUpdateValue = index.markAsPermanent(info.getStoreKey(), fileSpan, null, info.getOperationTimeMs(), MessageInfo.LIFE_VERSION_FROM_FRONTEND);
                endOffsetOfLastMessage = fileSpan.getEndOffset();
                blobStoreStats.handleNewTtlUpdateEntry(info.getStoreKey(), ttlUpdateValue, indexValuesToUpdate.get(correspondingPutIndex++));
            }
            logger.trace("Store : {} ttl update has been marked in the index ", dataDir);
        }
        onSuccess();
    } catch (StoreException e) {
        if (e.getErrorCode() == StoreErrorCodes.IOError) {
            onError();
        }
        throw e;
    } catch (Exception e) {
        throw new StoreException("Unknown error while trying to update ttl of blobs from store " + dataDir, e, StoreErrorCodes.Unknown_Error);
    } finally {
        context.stop();
    }
}
Also used : DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) SequenceInputStream(java.io.SequenceInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) IOException(java.io.IOException) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) Timer(com.codahale.metrics.Timer) SequenceInputStream(java.io.SequenceInputStream) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Example 12 with MessageFormatInputStream

use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.

the class ReplicaThread method processReplicaMetadataResponse.

/**
 * Takes the missing keys and the message list from the remote store and identifies messages that are deleted
 * on the remote store and updates them locally. Also, if the message that is missing is deleted in the remote
 * store, we remove the message from the list of missing keys
 * @param missingStoreKeys The list of keys missing from the local store
 * @param replicaMetadataResponseInfo The replica metadata response from the remote store
 * @param remoteReplicaInfo The remote replica that is being replicated from
 * @param remoteNode The remote node from which replication needs to happen
 * @throws IOException
 * @throws StoreException
 * @throws MessageFormatException
 */
private void processReplicaMetadataResponse(Set<StoreKey> missingStoreKeys, ReplicaMetadataResponseInfo replicaMetadataResponseInfo, RemoteReplicaInfo remoteReplicaInfo, DataNodeId remoteNode) throws IOException, StoreException, MessageFormatException {
    long startTime = SystemTime.getInstance().milliseconds();
    List<MessageInfo> messageInfoList = replicaMetadataResponseInfo.getMessageInfoList();
    for (MessageInfo messageInfo : messageInfoList) {
        BlobId blobId = (BlobId) messageInfo.getStoreKey();
        if (remoteReplicaInfo.getLocalReplicaId().getPartitionId().compareTo(blobId.getPartition()) != 0) {
            throw new IllegalStateException("Blob id is not in the expected partition Actual partition " + blobId.getPartition() + " Expected partition " + remoteReplicaInfo.getLocalReplicaId().getPartitionId());
        }
        if (!missingStoreKeys.contains(messageInfo.getStoreKey())) {
            // deleted yet locally
            if (messageInfo.isDeleted() && !remoteReplicaInfo.getLocalStore().isKeyDeleted(messageInfo.getStoreKey())) {
                MessageFormatInputStream deleteStream = new DeleteMessageFormatInputStream(messageInfo.getStoreKey(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs());
                MessageInfo info = new MessageInfo(messageInfo.getStoreKey(), deleteStream.getSize(), true, messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs());
                ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
                infoList.add(info);
                MessageFormatWriteSet writeset = new MessageFormatWriteSet(deleteStream, infoList, false);
                try {
                    remoteReplicaInfo.getLocalStore().delete(writeset);
                    logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key deleted. mark for deletion id: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
                } catch (StoreException e) {
                    // messages are received from different replicas around the same time.
                    if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
                        logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key already deleted: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
                    } else {
                        throw e;
                    }
                }
                // as long as the Delete is guaranteed to have taken effect locally.
                if (notification != null) {
                    notification.onBlobReplicaDeleted(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
                }
            }
        } else {
            if (messageInfo.isDeleted()) {
                // if the key is not present locally and if the remote replica has the message in deleted state,
                // it is not considered missing locally.
                missingStoreKeys.remove(messageInfo.getStoreKey());
                logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key in deleted state remotely: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
                // as long as the Delete is guaranteed to have taken effect locally.
                if (notification != null) {
                    notification.onBlobReplicaDeleted(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
                }
            } else if (messageInfo.isExpired()) {
                // if the key is not present locally and if the remote replica has the key as expired,
                // it is not considered missing locally.
                missingStoreKeys.remove(messageInfo.getStoreKey());
                logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key in expired state remotely {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
            }
        }
    }
    if (replicatingFromRemoteColo) {
        replicationMetrics.interColoProcessMetadataResponseTime.get(datacenterName).update(SystemTime.getInstance().milliseconds() - startTime);
    } else {
        replicationMetrics.intraColoProcessMetadataResponseTime.update(SystemTime.getInstance().milliseconds() - startTime);
    }
}
Also used : ArrayList(java.util.ArrayList) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) BlobId(com.github.ambry.commons.BlobId) MessageInfo(com.github.ambry.store.MessageInfo) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet) StoreException(com.github.ambry.store.StoreException)

Example 13 with MessageFormatInputStream

use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.

the class AmbryRequests method handleDeleteRequest.

public void handleDeleteRequest(Request request) throws IOException, InterruptedException {
    DeleteRequest deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.deleteBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    DeleteResponse response = null;
    try {
        ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
            response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
        } else {
            MessageFormatInputStream stream = new DeleteMessageFormatInputStream(deleteRequest.getBlobId(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
            MessageInfo info = new MessageInfo(deleteRequest.getBlobId(), stream.getSize(), deleteRequest.getAccountId(), deleteRequest.getContainerId(), deleteRequest.getDeletionTimeInMs());
            ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
            infoList.add(info);
            MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
            Store storeToDelete = storageManager.getStore(deleteRequest.getBlobId().getPartition());
            storeToDelete.delete(writeset);
            response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
            if (notification != null) {
                notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), deleteRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        boolean logInErrorLevel = false;
        if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
            metrics.idNotFoundError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
            metrics.ttlExpiredError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
            metrics.idDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
            metrics.deleteAuthorizationFailure.inc();
        } else {
            logInErrorLevel = true;
            metrics.unExpectedStoreDeleteError.inc();
        }
        if (logInErrorLevel) {
            logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
        } else {
            logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
        }
        response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception for delete request " + deleteRequest, e);
        response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
        metrics.unExpectedStoreDeleteError.inc();
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
        metrics.deleteBlobProcessingTimeInMs.update(processingTime);
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.commons.ServerErrorCode) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) StoreException(com.github.ambry.store.StoreException) DeleteResponse(com.github.ambry.protocol.DeleteResponse) DeleteRequest(com.github.ambry.protocol.DeleteRequest) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Example 14 with MessageFormatInputStream

use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.

the class AmbryRequests method handlePutRequest.

public void handlePutRequest(Request request) throws IOException, InterruptedException {
    PutRequest.ReceivedPutRequest receivedRequest = PutRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.putBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    PutResponse response = null;
    try {
        ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
            response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
        } else {
            MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
            MessageInfo info = new MessageInfo(receivedRequest.getBlobId(), stream.getSize(), false, Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), receivedRequest.getBlobProperties().getTimeToLiveInSeconds()), receivedRequest.getCrc(), receivedRequest.getBlobProperties().getAccountId(), receivedRequest.getBlobProperties().getContainerId(), receivedRequest.getBlobProperties().getCreationTimeInMs());
            ArrayList<MessageInfo> infoList = new ArrayList<MessageInfo>();
            infoList.add(info);
            MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
            Store storeToPut = storageManager.getStore(receivedRequest.getBlobId().getPartition());
            storeToPut.put(writeset);
            response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
            metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
            metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
            if (notification != null) {
                notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        logger.error("Store exception on a put with error code " + e.getErrorCode() + " for request " + receivedRequest, e);
        if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
            metrics.idAlreadyExistError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.IOError) {
            metrics.storeIOError.inc();
        } else {
            metrics.unExpectedStorePutError.inc();
        }
        response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception on a put for request " + receivedRequest, e);
        response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
        metrics.putBlobProcessingTimeInMs.update(processingTime);
        metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
    }
    sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
Also used : ArrayList(java.util.ArrayList) PutRequest(com.github.ambry.protocol.PutRequest) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) Store(com.github.ambry.store.Store) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) DataInputStream(java.io.DataInputStream) PutResponse(com.github.ambry.protocol.PutResponse) ServerErrorCode(com.github.ambry.commons.ServerErrorCode) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) StoreException(com.github.ambry.store.StoreException) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Example 15 with MessageFormatInputStream

use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.

the class AmbryRequests method handlePutRequest.

@Override
public void handlePutRequest(NetworkRequest request) throws IOException, InterruptedException {
    PutRequest receivedRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handlePutRequest is called when frontends are writing to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        PutRequest sentRequest = (PutRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
        // However, we will create a new PutRequest object to represent the received Put request since the blob content
        // 'buffer' in PutRequest is accessed as 'stream' while writing to Store. Also, crc value for this request
        // would be null since it is only calculated (on the fly) when sending the request to network. It might be okay to
        // use null crc here since the scenario for which we are using crc (i.e. possibility of collisions due to fast
        // replication) as described in this PR https://github.com/linkedin/ambry/pull/549 might not be applicable when
        // frontends are talking to Azure.
        receivedRequest = new PutRequest(sentRequest.getCorrelationId(), sentRequest.getClientId(), sentRequest.getBlobId(), sentRequest.getBlobProperties(), sentRequest.getUsermetadata(), sentRequest.getBlobSize(), sentRequest.getBlobType(), sentRequest.getBlobEncryptionKey(), new ByteBufInputStream(sentRequest.getBlob()), null);
    } else {
        InputStream is = request.getInputStream();
        DataInputStream dis = is instanceof DataInputStream ? (DataInputStream) is : new DataInputStream(is);
        receivedRequest = PutRequest.readFrom(dis, clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.putBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    PutResponse response = null;
    try {
        ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
            response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
        } else {
            MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
            BlobProperties properties = receivedRequest.getBlobProperties();
            long expirationTime = Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), properties.getTimeToLiveInSeconds());
            MessageInfo info = new MessageInfo.Builder(receivedRequest.getBlobId(), stream.getSize(), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs()).expirationTimeInMs(expirationTime).crc(receivedRequest.getCrc()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            ArrayList<MessageInfo> infoList = new ArrayList<>();
            infoList.add(info);
            MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
            Store storeToPut = storeManager.getStore(receivedRequest.getBlobId().getPartition());
            storeToPut.put(writeset);
            response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
            metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
            metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
            if (notification != null) {
                notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        logger.error("Store exception on a put with error code {} for request {}", e.getErrorCode(), receivedRequest, e);
        if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
            metrics.idAlreadyExistError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.IOError) {
            metrics.storeIOError.inc();
        } else {
            metrics.unExpectedStorePutError.inc();
        }
        response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception on a put for request {}", receivedRequest, e);
        response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
        metrics.putBlobProcessingTimeInMs.update(processingTime);
        metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
    }
    sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
Also used : MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) DataInputStream(java.io.DataInputStream) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) Store(com.github.ambry.store.Store) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) BlobProperties(com.github.ambry.messageformat.BlobProperties) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Aggregations

MessageFormatInputStream (com.github.ambry.messageformat.MessageFormatInputStream)16 DeleteMessageFormatInputStream (com.github.ambry.messageformat.DeleteMessageFormatInputStream)15 MessageFormatWriteSet (com.github.ambry.messageformat.MessageFormatWriteSet)10 TtlUpdateMessageFormatInputStream (com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream)10 UndeleteMessageFormatInputStream (com.github.ambry.messageformat.UndeleteMessageFormatInputStream)10 PutMessageFormatInputStream (com.github.ambry.messageformat.PutMessageFormatInputStream)9 MessageInfo (com.github.ambry.store.MessageInfo)9 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)9 StoreException (com.github.ambry.store.StoreException)7 InputStream (java.io.InputStream)5 SequenceInputStream (java.io.SequenceInputStream)4 Timer (com.codahale.metrics.Timer)3 BlobProperties (com.github.ambry.messageformat.BlobProperties)3 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)3 Store (com.github.ambry.store.Store)3 DataInputStream (java.io.DataInputStream)3 ServerErrorCode (com.github.ambry.commons.ServerErrorCode)2 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)2 BlobId (com.github.ambry.commons.BlobId)1