Search in sources :

Example 86 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class RequestResponseTest method doReplicaMetadataRequestTest.

private void doReplicaMetadataRequestTest(short responseVersionToUse, short requestVersionToUse, short messageInfoToUse, ReplicaType replicaType) throws IOException {
    MessageInfoAndMetadataListSerde.AUTO_VERSION = messageInfoToUse;
    MockClusterMap clusterMap = new MockClusterMap();
    List<ReplicaMetadataRequestInfo> replicaMetadataRequestInfoList = new ArrayList<ReplicaMetadataRequestInfo>();
    ReplicaMetadataRequestInfo replicaMetadataRequestInfo = new ReplicaMetadataRequestInfo(new MockPartitionId(), new MockFindToken(0, 1000), "localhost", "path", replicaType, requestVersionToUse);
    replicaMetadataRequestInfoList.add(replicaMetadataRequestInfo);
    ReplicaMetadataRequest request = new ReplicaMetadataRequest(1, "id", replicaMetadataRequestInfoList, 1000, requestVersionToUse);
    DataInputStream requestStream = serAndPrepForRead(request, -1, true);
    ReplicaMetadataRequest replicaMetadataRequestFromBytes = ReplicaMetadataRequest.readFrom(requestStream, new MockClusterMap(), new MockFindTokenHelper());
    Assert.assertEquals(replicaMetadataRequestFromBytes.getMaxTotalSizeOfEntriesInBytes(), 1000);
    Assert.assertEquals(replicaMetadataRequestFromBytes.getReplicaMetadataRequestInfoList().size(), 1);
    request.release();
    try {
        new ReplicaMetadataRequest(1, "id", null, 12, requestVersionToUse);
        Assert.fail("Serializing should have failed");
    } catch (IllegalArgumentException e) {
    // expected. Nothing to do
    }
    try {
        new ReplicaMetadataRequestInfo(new MockPartitionId(), null, "localhost", "path", replicaType, requestVersionToUse);
        Assert.fail("Construction should have failed");
    } catch (IllegalArgumentException e) {
    // expected. Nothing to do
    }
    long operationTimeMs = SystemTime.getInstance().milliseconds() + TestUtils.RANDOM.nextInt();
    int numResponseInfos = 5;
    int numMessagesInEachResponseInfo = 200;
    List<ReplicaMetadataResponseInfo> replicaMetadataResponseInfoList = new ArrayList<>();
    for (int j = 0; j < numResponseInfos; j++) {
        List<MessageInfo> messageInfoList = new ArrayList<MessageInfo>();
        int totalSizeOfAllMessages = 0;
        for (int i = 0; i < numMessagesInEachResponseInfo; i++) {
            int msgSize = TestUtils.RANDOM.nextInt(1000) + 1;
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            BlobId id = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), false, BlobId.BlobDataType.DATACHUNK);
            MessageInfo messageInfo = new MessageInfo(id, msgSize, false, false, true, Utils.Infinite_Time, null, accountId, containerId, operationTimeMs, (short) 1);
            messageInfoList.add(messageInfo);
            totalSizeOfAllMessages += msgSize;
        }
        ReplicaMetadataResponseInfo responseInfo = new ReplicaMetadataResponseInfo(clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), replicaType, new MockFindToken(0, 1000), messageInfoList, 1000, responseVersionToUse);
        Assert.assertEquals("Total size of messages not as expected", totalSizeOfAllMessages, responseInfo.getTotalSizeOfAllMessages());
        replicaMetadataResponseInfoList.add(responseInfo);
    }
    ReplicaMetadataResponse response = new ReplicaMetadataResponse(1234, "clientId", ServerErrorCode.No_Error, replicaMetadataResponseInfoList, responseVersionToUse);
    requestStream = serAndPrepForRead(response, -1, false);
    ReplicaMetadataResponse deserializedReplicaMetadataResponse = ReplicaMetadataResponse.readFrom(requestStream, new MockFindTokenHelper(), clusterMap);
    Assert.assertEquals(deserializedReplicaMetadataResponse.getCorrelationId(), 1234);
    Assert.assertEquals(deserializedReplicaMetadataResponse.getError(), ServerErrorCode.No_Error);
    Assert.assertEquals("ReplicaMetadataResponse list size mismatch ", numResponseInfos, deserializedReplicaMetadataResponse.getReplicaMetadataResponseInfoList().size());
    for (int j = 0; j < replicaMetadataResponseInfoList.size(); j++) {
        ReplicaMetadataResponseInfo originalMetadataResponse = replicaMetadataResponseInfoList.get(j);
        ReplicaMetadataResponseInfo replicaMetadataResponseInfo = deserializedReplicaMetadataResponse.getReplicaMetadataResponseInfoList().get(j);
        Assert.assertEquals("MsgInfo list size in ReplicaMetadataResponse mismatch ", numMessagesInEachResponseInfo, replicaMetadataResponseInfo.getMessageInfoList().size());
        Assert.assertEquals("Total size of messages not as expected", originalMetadataResponse.getTotalSizeOfAllMessages(), replicaMetadataResponseInfo.getTotalSizeOfAllMessages());
        List<MessageInfo> deserializedMsgInfoList = replicaMetadataResponseInfo.getMessageInfoList();
        for (int i = 0; i < originalMetadataResponse.getMessageInfoList().size(); i++) {
            MessageInfo originalMsgInfo = originalMetadataResponse.getMessageInfoList().get(i);
            MessageInfo msgInfo = deserializedMsgInfoList.get(i);
            Assert.assertEquals("MsgInfo size mismatch ", originalMsgInfo.getSize(), msgInfo.getSize());
            Assert.assertEquals("MsgInfo key mismatch ", originalMsgInfo.getStoreKey(), msgInfo.getStoreKey());
            Assert.assertEquals("MsgInfo expiration value mismatch ", Utils.Infinite_Time, msgInfo.getExpirationTimeInMs());
            if (response.getVersionId() >= ReplicaMetadataResponse.REPLICA_METADATA_RESPONSE_VERSION_V_3) {
                Assert.assertEquals("AccountId mismatch ", originalMsgInfo.getAccountId(), msgInfo.getAccountId());
                Assert.assertEquals("ContainerId mismatch ", originalMsgInfo.getContainerId(), msgInfo.getContainerId());
                Assert.assertEquals("OperationTime mismatch ", operationTimeMs, msgInfo.getOperationTimeMs());
            } else {
                Assert.assertEquals("AccountId mismatch ", UNKNOWN_ACCOUNT_ID, msgInfo.getAccountId());
                Assert.assertEquals("ContainerId mismatch ", UNKNOWN_CONTAINER_ID, msgInfo.getContainerId());
                Assert.assertEquals("OperationTime mismatch ", Utils.Infinite_Time, msgInfo.getOperationTimeMs());
            }
            if (messageInfoToUse >= MessageInfoAndMetadataListSerde.VERSION_6) {
                Assert.assertTrue(msgInfo.isUndeleted());
                Assert.assertEquals("LifeVersion mismatch", (short) 1, msgInfo.getLifeVersion());
            } else {
                Assert.assertFalse(msgInfo.isUndeleted());
                Assert.assertEquals("LifeVersion mismatch", (short) 0, msgInfo.getLifeVersion());
            }
        }
    }
    response.release();
    // to ensure that the toString() representation does not go overboard, a random bound check is executed here.
    // a rough estimate is that each response info should contribute about 500 chars to the toString() representation
    int maxLength = 100 + numResponseInfos * 500;
    Assert.assertTrue("toString() representation longer than " + maxLength + " characters", response.toString().length() < maxLength);
    // test toString() of a ReplicaMetadataResponseInfo without any messages
    ReplicaMetadataResponseInfo responseInfo = new ReplicaMetadataResponseInfo(clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), replicaType, new MockFindToken(0, 1000), Collections.emptyList(), 1000, responseVersionToUse);
    Assert.assertTrue("Length of toString() should be > 0", responseInfo.toString().length() > 0);
    // test toString() of a ReplicaMetadataResponse without any ReplicaMetadataResponseInfo
    response = new ReplicaMetadataResponse(1234, "clientId", ServerErrorCode.No_Error, Collections.emptyList(), responseVersionToUse);
    Assert.assertTrue("Length of toString() should be > 0", response.toString().length() > 0);
    response.release();
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ArrayList(java.util.ArrayList) DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) MessageInfo(com.github.ambry.store.MessageInfo) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Example 87 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class AmbryRequests method handleTtlUpdateRequest.

@Override
public void handleTtlUpdateRequest(NetworkRequest request) throws IOException, InterruptedException {
    TtlUpdateRequest updateRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handleTtlUpdateRequest is called when frontends are talking to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        updateRequest = (TtlUpdateRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
    } else {
        updateRequest = TtlUpdateRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.updateBlobTtlRequestQueueTimeInMs.update(requestQueueTime);
    metrics.updateBlobTtlRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    TtlUpdateResponse response = null;
    try {
        ServerErrorCode error = validateRequest(updateRequest.getBlobId().getPartition(), RequestOrResponseType.TtlUpdateRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating TtlUpdateRequest failed with error {} for request {}", error, updateRequest);
            response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), error);
        } else {
            BlobId convertedStoreKey = (BlobId) getConvertedStoreKeys(Collections.singletonList(updateRequest.getBlobId())).get(0);
            MessageInfo info = new MessageInfo.Builder(convertedStoreKey, -1, convertedStoreKey.getAccountId(), convertedStoreKey.getContainerId(), updateRequest.getOperationTimeInMs()).isTtlUpdated(true).expirationTimeInMs(updateRequest.getExpiresAtMs()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            Store store = storeManager.getStore(updateRequest.getBlobId().getPartition());
            store.updateTtl(Collections.singletonList(info));
            response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.No_Error);
            if (notification != null) {
                notification.onBlobReplicaUpdated(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY, UpdateType.TTL_UPDATE, info);
            }
        }
    } catch (StoreException e) {
        boolean logInErrorLevel = false;
        if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
            metrics.idNotFoundError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
            metrics.ttlExpiredError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
            metrics.idDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
            metrics.ttlUpdateAuthorizationFailure.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Already_Updated) {
            metrics.ttlAlreadyUpdatedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Update_Not_Allowed) {
            metrics.ttlUpdateRejectedError.inc();
        } else {
            logInErrorLevel = true;
            metrics.unExpectedStoreTtlUpdateError.inc();
        }
        if (logInErrorLevel) {
            logger.error("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
        } else {
            logger.trace("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
        }
        response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception for TTL update request {}", updateRequest, e);
        response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.Unknown_Error);
        metrics.unExpectedStoreTtlUpdateError.inc();
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", updateRequest, response, processingTime);
        metrics.updateBlobTtlProcessingTimeInMs.update(processingTime);
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.updateBlobTtlResponseQueueTimeInMs, metrics.updateBlobTtlSendTimeInMs, metrics.updateBlobTtlTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) Store(com.github.ambry.store.Store) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) BlobId(com.github.ambry.commons.BlobId)

Example 88 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class AmbryRequests method handleDeleteRequest.

@Override
public void handleDeleteRequest(NetworkRequest request) throws IOException, InterruptedException {
    DeleteRequest deleteRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handleDeleteRequest is called when frontends are talking to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        deleteRequest = (DeleteRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
    } else {
        deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.deleteBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    DeleteResponse response = null;
    try {
        StoreKey convertedStoreKey = getConvertedStoreKeys(Collections.singletonList(deleteRequest.getBlobId())).get(0);
        ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
            response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
        } else {
            BlobId convertedBlobId = (BlobId) convertedStoreKey;
            MessageInfo info = new MessageInfo.Builder(convertedBlobId, -1, convertedBlobId.getAccountId(), convertedBlobId.getContainerId(), deleteRequest.getDeletionTimeInMs()).isDeleted(true).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            Store storeToDelete = storeManager.getStore(deleteRequest.getBlobId().getPartition());
            storeToDelete.delete(Collections.singletonList(info));
            response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
            if (notification != null) {
                notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        boolean logInErrorLevel = false;
        if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
            metrics.idNotFoundError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
            metrics.ttlExpiredError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
            metrics.idDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
            metrics.deleteAuthorizationFailure.inc();
        } else {
            logInErrorLevel = true;
            metrics.unExpectedStoreDeleteError.inc();
        }
        if (logInErrorLevel) {
            logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
        } else {
            logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
        }
        response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception for delete request {}", deleteRequest, e);
        response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
        metrics.unExpectedStoreDeleteError.inc();
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
        metrics.deleteBlobProcessingTimeInMs.update(processingTime);
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) Store(com.github.ambry.store.Store) DataInputStream(java.io.DataInputStream) StoreKey(com.github.ambry.store.StoreKey) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) BlobId(com.github.ambry.commons.BlobId)

Example 89 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class ReplicaThread method processReplicaMetadataResponse.

/**
 * Takes the missing store messages and the message list from the remote store and identifies ones that are deleted
 * on the remote store and updates them locally. Also, if the message that is missing is deleted in the remote
 * store, we remove the message from the list of missing messages
 * @param missingRemoteStoreMessages The list of messages missing from the local store
 * @param replicaMetadataResponseInfo The replica metadata response from the remote store
 * @param remoteReplicaInfo The remote replica that is being replicated from
 * @param remoteNode The remote node from which replication needs to happen
 * @param remoteKeyToLocalKeyMap map mapping remote keys to local key equivalents
 * @throws StoreException
 */
void processReplicaMetadataResponse(Set<MessageInfo> missingRemoteStoreMessages, ReplicaMetadataResponseInfo replicaMetadataResponseInfo, RemoteReplicaInfo remoteReplicaInfo, DataNodeId remoteNode, Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap) throws StoreException {
    long startTime = time.milliseconds();
    List<MessageInfo> messageInfoList = replicaMetadataResponseInfo.getMessageInfoList();
    for (MessageInfo messageInfo : messageInfoList) {
        BlobId blobId = (BlobId) messageInfo.getStoreKey();
        if (remoteReplicaInfo.getLocalReplicaId().getPartitionId().compareTo(blobId.getPartition()) != 0) {
            throw new IllegalStateException("Blob id is not in the expected partition Actual partition " + blobId.getPartition() + " Expected partition " + remoteReplicaInfo.getLocalReplicaId().getPartitionId());
        }
        BlobId localKey = (BlobId) remoteKeyToLocalKeyMap.get(messageInfo.getStoreKey());
        if (localKey == null) {
            missingRemoteStoreMessages.remove(messageInfo);
            logger.trace("Remote node: {} Thread name: {} Remote replica: {} Remote key deprecated locally: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
        } else if (missingRemoteStoreMessages.contains(messageInfo)) {
            // nothing if the key is deleted or expired remotely.
            if (messageInfo.isDeleted()) {
                // if the key is not present locally and if the remote replica has the message in deleted state,
                // it is not considered missing locally.
                missingRemoteStoreMessages.remove(messageInfo);
                logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key in deleted state remotely: {} Local key: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey(), localKey);
                // as long as the Delete is guaranteed to have taken effect locally.
                if (notification != null) {
                    notification.onBlobReplicaDeleted(dataNodeId.getHostname(), dataNodeId.getPort(), localKey.getID(), BlobReplicaSourceType.REPAIRED);
                }
            } else if (messageInfo.isExpired()) {
                // if the key is not present locally and if the remote replica has the key as expired,
                // it is not considered missing locally.
                missingRemoteStoreMessages.remove(messageInfo);
                logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key in expired state remotely {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), localKey);
            }
        } else {
            // if the blob is from deprecated container, then nothing needs to be done.
            if (replicationConfig.replicationContainerDeletionEnabled && skipPredicate != null && skipPredicate.test(messageInfo)) {
                continue;
            }
            applyUpdatesToBlobInLocalStore(messageInfo, remoteReplicaInfo, localKey);
        }
    }
    if (replicatingFromRemoteColo) {
        replicationMetrics.interColoProcessMetadataResponseTime.get(datacenterName).update(time.milliseconds() - startTime);
    } else {
        replicationMetrics.intraColoProcessMetadataResponseTime.update(time.milliseconds() - startTime);
    }
}
Also used : BlobId(com.github.ambry.commons.BlobId) MessageInfo(com.github.ambry.store.MessageInfo)

Example 90 with MessageInfo

use of com.github.ambry.store.MessageInfo in project ambry by linkedin.

the class ReplicaThread method applyTtlUpdate.

/**
 * Applies a TTL update to the blob described by {@code messageInfo}.
 * @param messageInfo the {@link MessageInfo} that will be transformed into a TTL update
 * @param remoteReplicaInfo The remote replica that is being replicated from
 * @throws StoreException
 */
private void applyTtlUpdate(MessageInfo messageInfo, RemoteReplicaInfo remoteReplicaInfo) throws StoreException {
    DataNodeId remoteNode = remoteReplicaInfo.getReplicaId().getDataNodeId();
    try {
        // NOTE: It is possible that the key in question may have expired and this TTL update is being applied after it
        // is deemed expired. The store will accept the op (BlobStore looks at whether the op was valid to do at the time
        // of the op, not current time) but if compaction is running at the same time and has decided to clean up the
        // record before this ttl update was applied (and this didn't find the key missing because compaction has not yet
        // committed), then we have a bad situation where only a TTL update exists in the store. This problem has to be
        // addressed. This can only happen if replication is far behind (for e.g due to a server being down for a long
        // time). Won't happen if a server is being recreated.
        messageInfo = new MessageInfo.Builder(messageInfo).isTtlUpdated(true).build();
        remoteReplicaInfo.getLocalStore().updateTtl(Collections.singletonList(messageInfo));
        logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key ttl updated id: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
    } catch (StoreException e) {
        // The blob may be deleted or updated which is alright
        if (e.getErrorCode() == StoreErrorCodes.ID_Deleted || e.getErrorCode() == StoreErrorCodes.Already_Updated) {
            logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key already updated: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
        } else {
            throw e;
        }
    }
    // as long as the update is guaranteed to have taken effect locally.
    if (notification != null) {
        notification.onBlobReplicaUpdated(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED, UpdateType.TTL_UPDATE, messageInfo);
    }
}
Also used : DataNodeId(com.github.ambry.clustermap.DataNodeId) MessageInfo(com.github.ambry.store.MessageInfo) StoreException(com.github.ambry.store.StoreException)

Aggregations

MessageInfo (com.github.ambry.store.MessageInfo)109 ArrayList (java.util.ArrayList)49 StoreKey (com.github.ambry.store.StoreKey)42 ByteBuffer (java.nio.ByteBuffer)38 BlobId (com.github.ambry.commons.BlobId)36 StoreException (com.github.ambry.store.StoreException)30 DataInputStream (java.io.DataInputStream)23 Test (org.junit.Test)22 HashMap (java.util.HashMap)21 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)19 PartitionId (com.github.ambry.clustermap.PartitionId)19 IOException (java.io.IOException)19 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)18 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)18 InputStream (java.io.InputStream)17 List (java.util.List)16 ClusterMap (com.github.ambry.clustermap.ClusterMap)15 Map (java.util.Map)15 MockMessageWriteSet (com.github.ambry.store.MockMessageWriteSet)13 HashSet (java.util.HashSet)13