Search in sources :

Example 21 with Store

use of com.github.ambry.store.Store in project ambry by linkedin.

the class AmbryRequests method handlePutRequest.

@Override
public void handlePutRequest(NetworkRequest request) throws IOException, InterruptedException {
    PutRequest receivedRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handlePutRequest is called when frontends are writing to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        PutRequest sentRequest = (PutRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
        // However, we will create a new PutRequest object to represent the received Put request since the blob content
        // 'buffer' in PutRequest is accessed as 'stream' while writing to Store. Also, crc value for this request
        // would be null since it is only calculated (on the fly) when sending the request to network. It might be okay to
        // use null crc here since the scenario for which we are using crc (i.e. possibility of collisions due to fast
        // replication) as described in this PR https://github.com/linkedin/ambry/pull/549 might not be applicable when
        // frontends are talking to Azure.
        receivedRequest = new PutRequest(sentRequest.getCorrelationId(), sentRequest.getClientId(), sentRequest.getBlobId(), sentRequest.getBlobProperties(), sentRequest.getUsermetadata(), sentRequest.getBlobSize(), sentRequest.getBlobType(), sentRequest.getBlobEncryptionKey(), new ByteBufInputStream(sentRequest.getBlob()), null);
    } else {
        InputStream is = request.getInputStream();
        DataInputStream dis = is instanceof DataInputStream ? (DataInputStream) is : new DataInputStream(is);
        receivedRequest = PutRequest.readFrom(dis, clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.putBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    PutResponse response = null;
    try {
        ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
            response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
        } else {
            MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
            BlobProperties properties = receivedRequest.getBlobProperties();
            long expirationTime = Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), properties.getTimeToLiveInSeconds());
            MessageInfo info = new MessageInfo.Builder(receivedRequest.getBlobId(), stream.getSize(), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs()).expirationTimeInMs(expirationTime).crc(receivedRequest.getCrc()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            ArrayList<MessageInfo> infoList = new ArrayList<>();
            infoList.add(info);
            MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
            Store storeToPut = storeManager.getStore(receivedRequest.getBlobId().getPartition());
            storeToPut.put(writeset);
            response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
            metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
            metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
            if (notification != null) {
                notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        logger.error("Store exception on a put with error code {} for request {}", e.getErrorCode(), receivedRequest, e);
        if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
            metrics.idAlreadyExistError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.IOError) {
            metrics.storeIOError.inc();
        } else {
            metrics.unExpectedStorePutError.inc();
        }
        response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception on a put for request {}", receivedRequest, e);
        response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
        metrics.putBlobProcessingTimeInMs.update(processingTime);
        metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
    }
    sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
Also used : MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) DataInputStream(java.io.DataInputStream) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) Store(com.github.ambry.store.Store) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) BlobProperties(com.github.ambry.messageformat.BlobProperties) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Example 22 with Store

use of com.github.ambry.store.Store in project ambry by linkedin.

the class AmbryRequests method handleTtlUpdateRequest.

@Override
public void handleTtlUpdateRequest(NetworkRequest request) throws IOException, InterruptedException {
    TtlUpdateRequest updateRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handleTtlUpdateRequest is called when frontends are talking to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        updateRequest = (TtlUpdateRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
    } else {
        updateRequest = TtlUpdateRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.updateBlobTtlRequestQueueTimeInMs.update(requestQueueTime);
    metrics.updateBlobTtlRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    TtlUpdateResponse response = null;
    try {
        ServerErrorCode error = validateRequest(updateRequest.getBlobId().getPartition(), RequestOrResponseType.TtlUpdateRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating TtlUpdateRequest failed with error {} for request {}", error, updateRequest);
            response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), error);
        } else {
            BlobId convertedStoreKey = (BlobId) getConvertedStoreKeys(Collections.singletonList(updateRequest.getBlobId())).get(0);
            MessageInfo info = new MessageInfo.Builder(convertedStoreKey, -1, convertedStoreKey.getAccountId(), convertedStoreKey.getContainerId(), updateRequest.getOperationTimeInMs()).isTtlUpdated(true).expirationTimeInMs(updateRequest.getExpiresAtMs()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            Store store = storeManager.getStore(updateRequest.getBlobId().getPartition());
            store.updateTtl(Collections.singletonList(info));
            response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.No_Error);
            if (notification != null) {
                notification.onBlobReplicaUpdated(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY, UpdateType.TTL_UPDATE, info);
            }
        }
    } catch (StoreException e) {
        boolean logInErrorLevel = false;
        if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
            metrics.idNotFoundError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
            metrics.ttlExpiredError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
            metrics.idDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
            metrics.ttlUpdateAuthorizationFailure.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Already_Updated) {
            metrics.ttlAlreadyUpdatedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Update_Not_Allowed) {
            metrics.ttlUpdateRejectedError.inc();
        } else {
            logInErrorLevel = true;
            metrics.unExpectedStoreTtlUpdateError.inc();
        }
        if (logInErrorLevel) {
            logger.error("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
        } else {
            logger.trace("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
        }
        response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception for TTL update request {}", updateRequest, e);
        response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.Unknown_Error);
        metrics.unExpectedStoreTtlUpdateError.inc();
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", updateRequest, response, processingTime);
        metrics.updateBlobTtlProcessingTimeInMs.update(processingTime);
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.updateBlobTtlResponseQueueTimeInMs, metrics.updateBlobTtlSendTimeInMs, metrics.updateBlobTtlTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) Store(com.github.ambry.store.Store) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) BlobId(com.github.ambry.commons.BlobId)

Example 23 with Store

use of com.github.ambry.store.Store in project ambry by linkedin.

the class AmbryRequests method handleDeleteRequest.

@Override
public void handleDeleteRequest(NetworkRequest request) throws IOException, InterruptedException {
    DeleteRequest deleteRequest;
    if (request instanceof LocalChannelRequest) {
        // This is a case where handleDeleteRequest is called when frontends are talking to Azure. In this case, this method
        // is called by request handler threads running within the frontend router itself. So, the request can be directly
        // referenced as java objects without any need for deserialization.
        deleteRequest = (DeleteRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
    } else {
        deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
    }
    long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
    long totalTimeSpent = requestQueueTime;
    metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
    metrics.deleteBlobRequestRate.mark();
    long startTime = SystemTime.getInstance().milliseconds();
    DeleteResponse response = null;
    try {
        StoreKey convertedStoreKey = getConvertedStoreKeys(Collections.singletonList(deleteRequest.getBlobId())).get(0);
        ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest, false);
        if (error != ServerErrorCode.No_Error) {
            logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
            response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
        } else {
            BlobId convertedBlobId = (BlobId) convertedStoreKey;
            MessageInfo info = new MessageInfo.Builder(convertedBlobId, -1, convertedBlobId.getAccountId(), convertedBlobId.getContainerId(), deleteRequest.getDeletionTimeInMs()).isDeleted(true).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
            Store storeToDelete = storeManager.getStore(deleteRequest.getBlobId().getPartition());
            storeToDelete.delete(Collections.singletonList(info));
            response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
            if (notification != null) {
                notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY);
            }
        }
    } catch (StoreException e) {
        boolean logInErrorLevel = false;
        if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
            metrics.idNotFoundError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
            metrics.ttlExpiredError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
            metrics.idDeletedError.inc();
        } else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
            metrics.deleteAuthorizationFailure.inc();
        } else {
            logInErrorLevel = true;
            metrics.unExpectedStoreDeleteError.inc();
        }
        if (logInErrorLevel) {
            logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
        } else {
            logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
        }
        response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
    } catch (Exception e) {
        logger.error("Unknown exception for delete request {}", deleteRequest, e);
        response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
        metrics.unExpectedStoreDeleteError.inc();
    } finally {
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        totalTimeSpent += processingTime;
        publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
        metrics.deleteBlobProcessingTimeInMs.update(processingTime);
    }
    requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
Also used : ServerNetworkResponseMetrics(com.github.ambry.network.ServerNetworkResponseMetrics) Store(com.github.ambry.store.Store) DataInputStream(java.io.DataInputStream) StoreKey(com.github.ambry.store.StoreKey) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) IdUndeletedStoreException(com.github.ambry.store.IdUndeletedStoreException) StoreException(com.github.ambry.store.StoreException) LocalChannelRequest(com.github.ambry.network.LocalRequestResponseChannel.LocalChannelRequest) BlobId(com.github.ambry.commons.BlobId)

Example 24 with Store

use of com.github.ambry.store.Store in project ambry by linkedin.

the class CloudToStoreReplicationManager method addCloudReplica.

/**
 * Add a replica of given partition and its {@link RemoteReplicaInfo}s to backup list.
 * @param partitionName name of the partition of the replica to add.
 * @throws ReplicationException if replicas initialization failed.
 */
private void addCloudReplica(String partitionName) throws ReplicationException {
    // Adding cloud replica occurs when replica becomes leader from standby. Hence, if this a new added replica, it
    // should be present in storage manager already.
    ReplicaId localReplica = storeManager.getReplica(partitionName);
    if (localReplica == null) {
        logger.warn("Got partition leader notification for partition {} that is not present on the node", partitionName);
        return;
    }
    PartitionId partitionId = localReplica.getPartitionId();
    Store store = storeManager.getStore(partitionId);
    if (store == null) {
        logger.warn("Unable to add cloud replica for partition {} as store for the partition is not present or started.", partitionName);
        return;
    }
    DataNodeId cloudDataNode = getCloudDataNode();
    CloudReplica peerCloudReplica = new CloudReplica(partitionId, cloudDataNode);
    FindTokenFactory findTokenFactory = tokenHelper.getFindTokenFactoryFromReplicaType(peerCloudReplica.getReplicaType());
    RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerCloudReplica, localReplica, store, findTokenFactory.getNewFindToken(), storeConfig.storeDataFlushIntervalSeconds * SystemTime.MsPerSec * Replication_Delay_Multiplier, SystemTime.getInstance(), peerCloudReplica.getDataNodeId().getPortToConnectTo());
    replicationMetrics.addMetricsForRemoteReplicaInfo(remoteReplicaInfo, trackPerDatacenterLagInMetric);
    // Note that for each replica on a Ambry server node, there is only one cloud replica that it will be replicating from.
    List<RemoteReplicaInfo> remoteReplicaInfos = Collections.singletonList(remoteReplicaInfo);
    PartitionInfo partitionInfo = new PartitionInfo(remoteReplicaInfos, partitionId, store, localReplica);
    partitionToPartitionInfo.put(partitionId, partitionInfo);
    mountPathToPartitionInfos.computeIfAbsent(localReplica.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
    logger.info("Cloud Partition {} added to {}. CloudNode {} port {}", partitionName, dataNodeId, cloudDataNode, cloudDataNode.getPortToConnectTo());
    // Reload replication token if exist.
    reloadReplicationTokenIfExists(localReplica, remoteReplicaInfos);
    // Add remoteReplicaInfos to {@link ReplicaThread}.
    addRemoteReplicaInfoToReplicaThread(remoteReplicaInfos, true);
    if (replicationConfig.replicationTrackPerPartitionLagFromRemote) {
        replicationMetrics.addLagMetricForPartition(partitionId, true);
    }
    replicationMetrics.addCatchUpPointMetricForPartition(partitionId);
}
Also used : CloudReplica(com.github.ambry.clustermap.CloudReplica) NotificationContext(org.apache.helix.NotificationContext) StoreManager(com.github.ambry.server.StoreManager) ClusterMapUtils(com.github.ambry.clustermap.ClusterMapUtils) CloudReplica(com.github.ambry.clustermap.CloudReplica) DataNodeId(com.github.ambry.clustermap.DataNodeId) LiveInstance(org.apache.helix.model.LiveInstance) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) CloudConfig(com.github.ambry.config.CloudConfig) PortType(com.github.ambry.network.PortType) Map(java.util.Map) ClusterParticipant(com.github.ambry.clustermap.ClusterParticipant) SystemTime(com.github.ambry.utils.SystemTime) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) InstanceConfigChangeListener(org.apache.helix.api.listeners.InstanceConfigChangeListener) LinkedList(java.util.LinkedList) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ReplicationConfig(com.github.ambry.config.ReplicationConfig) NotificationSystem(com.github.ambry.notification.NotificationSystem) StateModelListenerType(com.github.ambry.clustermap.StateModelListenerType) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) StoreConfig(com.github.ambry.config.StoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) ConnectionPool(com.github.ambry.network.ConnectionPool) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) VcrClusterSpectator(com.github.ambry.clustermap.VcrClusterSpectator) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) Utils(com.github.ambry.utils.Utils) LiveInstanceChangeListener(org.apache.helix.api.listeners.LiveInstanceChangeListener) Collectors(java.util.stream.Collectors) InstanceConfig(org.apache.helix.model.InstanceConfig) TimeUnit(java.util.concurrent.TimeUnit) Store(com.github.ambry.store.Store) List(java.util.List) PartitionStateChangeListener(com.github.ambry.clustermap.PartitionStateChangeListener) ReplicaId(com.github.ambry.clustermap.ReplicaId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Port(com.github.ambry.network.Port) Collections(java.util.Collections) PartitionId(com.github.ambry.clustermap.PartitionId) Store(com.github.ambry.store.Store) PartitionId(com.github.ambry.clustermap.PartitionId) DataNodeId(com.github.ambry.clustermap.DataNodeId) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Example 25 with Store

use of com.github.ambry.store.Store in project ambry by linkedin.

the class ReplicationTest method expiryAfterMetadataExchangeTest.

/**
 * Test the case where a blob expires after a replication metadata exchange completes and identifies the blob as
 * a candidate. The subsequent GetRequest should succeed as Replication makes a Include_All call, and
 * fixMissingStoreKeys() should succeed without exceptions. The blob should not be put locally.
 */
@Test
public void expiryAfterMetadataExchangeTest() throws Exception {
    int batchSize = 400;
    ReplicationTestSetup testSetup = new ReplicationTestSetup(batchSize);
    List<PartitionId> partitionIds = testSetup.partitionIds;
    MockHost remoteHost = testSetup.remoteHost;
    MockHost localHost = testSetup.localHost;
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    Map<PartitionId, Set<StoreKey>> idsToExpectByPartition = new HashMap<>();
    for (int i = 0; i < partitionIds.size(); i++) {
        PartitionId partitionId = partitionIds.get(i);
        // add 5 messages to remote host only.
        Set<StoreKey> expectedIds = new HashSet<>(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 5));
        short accountId = Utils.getRandomShort(TestUtils.RANDOM);
        short containerId = Utils.getRandomShort(TestUtils.RANDOM);
        boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
        // add an expired message to the remote host only
        StoreKey id = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK);
        PutMsgInfoAndBuffer msgInfoAndBuffer = createPutMessage(id, accountId, containerId, toEncrypt);
        remoteHost.addMessage(partitionId, new MessageInfo(id, msgInfoAndBuffer.byteBuffer.remaining(), 1, accountId, containerId, msgInfoAndBuffer.messageInfo.getOperationTimeMs()), msgInfoAndBuffer.byteBuffer);
        // add 3 messages to the remote host only
        expectedIds.addAll(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3));
        // delete the very first blob in the remote host only (and delete it from expected list)
        Iterator<StoreKey> iter = expectedIds.iterator();
        addDeleteMessagesToReplicasOfPartition(partitionId, iter.next(), Collections.singletonList(remoteHost));
        iter.remove();
        // PUT and DELETE a blob in the remote host only
        id = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost));
        idsToExpectByPartition.put(partitionId, expectedIds);
    }
    // Do the replica metadata exchange.
    List<ReplicaThread.ExchangeMetadataResponse> responses = testSetup.replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), testSetup.replicasToReplicate.get(remoteHost.dataNodeId));
    Assert.assertEquals("Actual keys in Exchange Metadata Response different from expected", idsToExpectByPartition.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), responses.stream().map(k -> k.getMissingStoreKeys()).flatMap(Collection::stream).collect(Collectors.toSet()));
    // Now expire a message in the remote before doing the Get requests (for every partition). Remove these keys from
    // expected key set. Even though they are requested, they should not go into the local store. However, this cycle
    // of replication must be successful.
    PartitionId partitionId = idsToExpectByPartition.keySet().iterator().next();
    Iterator<StoreKey> keySet = idsToExpectByPartition.get(partitionId).iterator();
    StoreKey keyToExpire = keySet.next();
    keySet.remove();
    MessageInfo msgInfoToExpire = null;
    for (MessageInfo info : remoteHost.infosByPartition.get(partitionId)) {
        if (info.getStoreKey().equals(keyToExpire)) {
            msgInfoToExpire = info;
            break;
        }
    }
    int i = remoteHost.infosByPartition.get(partitionId).indexOf(msgInfoToExpire);
    remoteHost.infosByPartition.get(partitionId).set(i, new MessageInfo(msgInfoToExpire.getStoreKey(), msgInfoToExpire.getSize(), msgInfoToExpire.isDeleted(), msgInfoToExpire.isTtlUpdated(), msgInfoToExpire.isUndeleted(), 1, null, msgInfoToExpire.getAccountId(), msgInfoToExpire.getContainerId(), msgInfoToExpire.getOperationTimeMs(), msgInfoToExpire.getLifeVersion()));
    testSetup.replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), testSetup.replicasToReplicate.get(remoteHost.dataNodeId), responses, false);
    Assert.assertEquals(idsToExpectByPartition.keySet(), localHost.infosByPartition.keySet());
    Assert.assertEquals("Actual keys in Exchange Metadata Response different from expected", idsToExpectByPartition.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), localHost.infosByPartition.values().stream().flatMap(Collection::stream).map(MessageInfo::getStoreKey).collect(Collectors.toSet()));
}
Also used : CoreMatchers(org.hamcrest.CoreMatchers) Arrays(java.util.Arrays) StorageManager(com.github.ambry.store.StorageManager) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter) DataNodeId(com.github.ambry.clustermap.DataNodeId) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) PortType(com.github.ambry.network.PortType) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) Parameterized(org.junit.runners.Parameterized) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Container(com.github.ambry.account.Container) DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) Predicate(java.util.function.Predicate) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Collection(java.util.Collection) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Collectors(java.util.stream.Collectors) ConnectedChannel(com.github.ambry.network.ConnectedChannel) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) PartitionStateChangeListener(com.github.ambry.clustermap.PartitionStateChangeListener) MockTime(com.github.ambry.utils.MockTime) Account(com.github.ambry.account.Account) Optional(java.util.Optional) TransitionErrorCode(com.github.ambry.clustermap.StateTransitionException.TransitionErrorCode) MockId(com.github.ambry.store.MockId) InMemAccountService(com.github.ambry.account.InMemAccountService) AmbryReplicaSyncUpManager(com.github.ambry.clustermap.AmbryReplicaSyncUpManager) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) ClusterMapChangeListener(com.github.ambry.clustermap.ClusterMapChangeListener) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Transformer(com.github.ambry.store.Transformer) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) CommonTestUtils(com.github.ambry.commons.CommonTestUtils) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) Time(com.github.ambry.utils.Time) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) ReplicaState(com.github.ambry.clustermap.ReplicaState) StateModelListenerType(com.github.ambry.clustermap.StateModelListenerType) StoreConfig(com.github.ambry.config.StoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) ReplicaType(com.github.ambry.clustermap.ReplicaType) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMap(com.github.ambry.clustermap.ClusterMap) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Store(com.github.ambry.store.Store) Mockito(org.mockito.Mockito) MessageInfo(com.github.ambry.store.MessageInfo) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) ReplicaId(com.github.ambry.clustermap.ReplicaId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Port(com.github.ambry.network.Port) Comparator(java.util.Comparator) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Set(java.util.Set) HashSet(java.util.HashSet) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) HashMap(java.util.HashMap) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StoreKey(com.github.ambry.store.StoreKey) MessageInfo(com.github.ambry.store.MessageInfo) Collection(java.util.Collection) BlobId(com.github.ambry.commons.BlobId) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

Store (com.github.ambry.store.Store)35 ArrayList (java.util.ArrayList)22 PartitionId (com.github.ambry.clustermap.PartitionId)19 ReplicaId (com.github.ambry.clustermap.ReplicaId)17 StoreException (com.github.ambry.store.StoreException)16 IOException (java.io.IOException)13 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)12 StorageManager (com.github.ambry.store.StorageManager)12 Test (org.junit.Test)12 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)11 MessageInfo (com.github.ambry.store.MessageInfo)11 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)10 DataInputStream (java.io.DataInputStream)10 HashMap (java.util.HashMap)10 MetricRegistry (com.codahale.metrics.MetricRegistry)9 StoreKey (com.github.ambry.store.StoreKey)9 List (java.util.List)8 Map (java.util.Map)8 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)7 ServerNetworkResponseMetrics (com.github.ambry.network.ServerNetworkResponseMetrics)7