Search in sources :

Example 1 with DeleteMessageFormatInputStream

use of com.github.ambry.messageformat.DeleteMessageFormatInputStream in project ambry by linkedin.

the class ReplicationTest method getDeleteMessage.

/**
 * Returns a delete message for the given {@code id}
 * @param id the id for which a delete message must be constructed.
 * @return {@link ByteBuffer} representing the entire message.
 * @throws MessageFormatException
 * @throws IOException
 */
private ByteBuffer getDeleteMessage(StoreKey id, short accountId, short containerId, long deletionTimeMs) throws MessageFormatException, IOException {
    MessageFormatInputStream stream = new DeleteMessageFormatInputStream(id, accountId, containerId, deletionTimeMs);
    byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
    return ByteBuffer.wrap(message);
}
Also used : DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream)

Example 2 with DeleteMessageFormatInputStream

use of com.github.ambry.messageformat.DeleteMessageFormatInputStream in project ambry by linkedin.

the class InMemoryStore method delete.

@Override
public void delete(List<MessageInfo> infos) throws StoreException {
    List<MessageInfo> infosToDelete = new ArrayList<>(infos.size());
    List<InputStream> inputStreams = new ArrayList<>();
    try {
        for (MessageInfo info : infos) {
            short lifeVersion = info.getLifeVersion();
            MessageInfo latestInfo = getMergedMessageInfo(info.getStoreKey(), messageInfos);
            if (latestInfo == null) {
                throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is not present in the index.", StoreErrorCodes.ID_Not_Found);
            }
            if (lifeVersion == MessageInfo.LIFE_VERSION_FROM_FRONTEND) {
                if (latestInfo.isDeleted()) {
                    throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
                }
                lifeVersion = latestInfo.getLifeVersion();
            } else {
                if ((latestInfo.isDeleted() && latestInfo.getLifeVersion() >= info.getLifeVersion()) || (latestInfo.getLifeVersion() > info.getLifeVersion())) {
                    throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.Life_Version_Conflict);
                }
                lifeVersion = info.getLifeVersion();
            }
            MessageFormatInputStream stream = new DeleteMessageFormatInputStream(info.getStoreKey(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), lifeVersion);
            infosToDelete.add(new MessageInfo(info.getStoreKey(), stream.getSize(), true, info.isTtlUpdated(), false, info.getExpirationTimeInMs(), null, info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), lifeVersion));
            inputStreams.add(stream);
        }
        MessageFormatWriteSet writeSet = new MessageFormatWriteSet(new SequenceInputStream(Collections.enumeration(inputStreams)), infosToDelete, false);
        writeSet.writeTo(log);
        messageInfos.addAll(infosToDelete);
    } catch (Exception e) {
        throw (e instanceof StoreException ? (StoreException) e : new StoreException(e, StoreErrorCodes.Unknown_Error));
    }
}
Also used : SequenceInputStream(java.io.SequenceInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) SequenceInputStream(java.io.SequenceInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) MessageInfo(com.github.ambry.store.MessageInfo) StoreException(com.github.ambry.store.StoreException) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Example 3 with DeleteMessageFormatInputStream

use of com.github.ambry.messageformat.DeleteMessageFormatInputStream in project ambry by linkedin.

the class ReplicationTest method replicaFromInactiveToOfflineTest.

/**
 * Test INACTIVE -> OFFLINE transition on existing replica (both success and failure cases)
 */
@Test
public void replicaFromInactiveToOfflineTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    MockHelixParticipant.metricRegistry = new MetricRegistry();
    MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
    Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
    StorageManager storageManager = managers.getFirst();
    MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
    // 1. test replica not found case
    try {
        mockHelixParticipant.onPartitionBecomeOfflineFromInactive("-1");
        fail("should fail because of invalid partition");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", ReplicaNotFound, e.getErrorCode());
    }
    // 2. test store not started case
    PartitionId existingPartition = replicationManager.partitionToPartitionInfo.keySet().iterator().next();
    storageManager.shutdownBlobStore(existingPartition);
    try {
        mockHelixParticipant.onPartitionBecomeOfflineFromInactive(existingPartition.toPathString());
        fail("should fail because store is not started");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", StoreNotStarted, e.getErrorCode());
    }
    storageManager.startBlobStore(existingPartition);
    // before testing success case, let's write a blob (size = 100) into local store and add a delete record for new blob
    Store localStore = storageManager.getStore(existingPartition);
    MockId id = new MockId(TestUtils.getRandomString(10), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
    long crc = (new Random()).nextLong();
    long blobSize = 100;
    MessageInfo info = new MessageInfo(id, blobSize, false, false, Utils.Infinite_Time, crc, id.getAccountId(), id.getContainerId(), Utils.Infinite_Time);
    List<MessageInfo> infos = new ArrayList<>();
    List<ByteBuffer> buffers = new ArrayList<>();
    ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes((int) blobSize));
    infos.add(info);
    buffers.add(buffer);
    localStore.put(new MockMessageWriteSet(infos, buffers));
    // delete the blob
    int deleteRecordSize = (int) (new DeleteMessageFormatInputStream(id, (short) 0, (short) 0, 0).getSize());
    MessageInfo deleteInfo = new MessageInfo(id, deleteRecordSize, id.getAccountId(), id.getContainerId(), time.milliseconds());
    localStore.delete(Collections.singletonList(deleteInfo));
    int sizeOfPutAndHeader = 100 + 18;
    int sizeOfWhole = sizeOfPutAndHeader + deleteRecordSize;
    // note that end offset of last PUT = 100 + 18 = 118, end offset of the store is sizeOfWhole
    // 3. test success case (create a new thread and trigger INACTIVE -> OFFLINE transition)
    ReplicaId localReplica = storageManager.getReplica(existingPartition.toPathString());
    // put a decommission-in-progress file into local store dir
    File decommissionFile = new File(localReplica.getReplicaPath(), "decommission_in_progress");
    assertTrue("Couldn't create decommission file in local store", decommissionFile.createNewFile());
    decommissionFile.deleteOnExit();
    assertNotSame("Before disconnection, the local store state shouldn't be OFFLINE", ReplicaState.OFFLINE, localStore.getCurrentState());
    mockHelixParticipant.registerPartitionStateChangeListener(StateModelListenerType.ReplicationManagerListener, replicationManager.replicationListener);
    CountDownLatch participantLatch = new CountDownLatch(1);
    replicationManager.listenerExecutionLatch = new CountDownLatch(1);
    Utils.newThread(() -> {
        mockHelixParticipant.onPartitionBecomeOfflineFromInactive(existingPartition.toPathString());
        participantLatch.countDown();
    }, false).start();
    assertTrue("Partition state change listener in ReplicationManager didn't get called within 1 sec", replicationManager.listenerExecutionLatch.await(1, TimeUnit.SECONDS));
    // the state of local store should be updated to OFFLINE
    assertEquals("Local store state is not expected", ReplicaState.OFFLINE, localStore.getCurrentState());
    // update replication lag between local and peer replicas
    List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(existingPartition).getRemoteReplicaInfos();
    ReplicaId peerReplica1 = remoteReplicaInfos.get(0).getReplicaId();
    ReplicaId peerReplica2 = remoteReplicaInfos.get(1).getReplicaId();
    // peer1 catches up with last PUT, peer2 catches up with end offset of local store. In this case, SyncUp is not complete
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), sizeOfPutAndHeader);
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath(), sizeOfWhole);
    assertFalse("Only one peer replica has fully caught up with end offset so sync-up should not complete", mockHelixParticipant.getReplicaSyncUpManager().isSyncUpComplete(localReplica));
    // make peer1 catch up with end offset
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), sizeOfWhole);
    // Now, sync-up should complete and transition should be able to proceed.
    assertTrue("Inactive-To-Offline transition didn't complete within 1 sec", participantLatch.await(1, TimeUnit.SECONDS));
    assertFalse("Local store should be stopped after transition", localStore.isStarted());
    storageManager.shutdown();
}
Also used : StorageManager(com.github.ambry.store.StorageManager) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) MockId(com.github.ambry.store.MockId) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) Random(java.util.Random) MetricRegistry(com.codahale.metrics.MetricRegistry) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) ByteBuffer(java.nio.ByteBuffer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MessageInfo(com.github.ambry.store.MessageInfo) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) File(java.io.File) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 4 with DeleteMessageFormatInputStream

use of com.github.ambry.messageformat.DeleteMessageFormatInputStream in project ambry by linkedin.

the class ReplicationTestHelper method getDeleteMessage.

/**
 * Returns a delete message for the given {@code id}
 * @param id the id for which a delete message must be constructed.
 * @return {@link ByteBuffer} representing the entire message.
 * @throws MessageFormatException
 * @throws IOException
 */
public static ByteBuffer getDeleteMessage(StoreKey id, short accountId, short containerId, long deletionTimeMs, short lifeVersion) throws MessageFormatException, IOException {
    MessageFormatInputStream stream = new DeleteMessageFormatInputStream(id, accountId, containerId, deletionTimeMs, lifeVersion);
    byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
    return ByteBuffer.wrap(message);
}
Also used : DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream)

Example 5 with DeleteMessageFormatInputStream

use of com.github.ambry.messageformat.DeleteMessageFormatInputStream in project ambry by linkedin.

the class BlobStore method delete.

@Override
public void delete(List<MessageInfo> infosToDelete) throws StoreException {
    checkStarted();
    checkDuplicates(infosToDelete);
    final Timer.Context context = metrics.deleteResponse.time();
    try {
        List<IndexValue> indexValuesPriorToDelete = new ArrayList<>();
        List<IndexValue> originalPuts = new ArrayList<>();
        List<Short> lifeVersions = new ArrayList<>();
        Offset indexEndOffsetBeforeCheck = index.getCurrentEndOffset();
        for (MessageInfo info : infosToDelete) {
            IndexValue value = index.findKey(info.getStoreKey(), new FileSpan(index.getStartOffset(), indexEndOffsetBeforeCheck));
            if (value == null) {
                throw new StoreException("Cannot delete id " + info.getStoreKey() + " because it is not present in the index", StoreErrorCodes.ID_Not_Found);
            }
            if (!info.getStoreKey().isAccountContainerMatch(value.getAccountId(), value.getContainerId())) {
                if (config.storeValidateAuthorization) {
                    throw new StoreException("DELETE authorization failure. Key: " + info.getStoreKey() + "Actually accountId: " + value.getAccountId() + "Actually containerId: " + value.getContainerId(), StoreErrorCodes.Authorization_Failure);
                } else {
                    logger.warn("DELETE authorization failure. Key: {} Actually accountId: {} Actually containerId: {}", info.getStoreKey(), value.getAccountId(), value.getContainerId());
                    metrics.deleteAuthorizationFailureCount.inc();
                }
            }
            short revisedLifeVersion = info.getLifeVersion();
            if (info.getLifeVersion() == MessageInfo.LIFE_VERSION_FROM_FRONTEND) {
                // This is a delete request from frontend
                if (value.isDelete()) {
                    throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
                }
                revisedLifeVersion = value.getLifeVersion();
            } else {
                // This is a delete request from replication
                if (value.isDelete() && value.getLifeVersion() == info.getLifeVersion()) {
                    throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index with lifeVersion " + value.getLifeVersion() + ".", StoreErrorCodes.ID_Deleted);
                }
                if (value.getLifeVersion() > info.getLifeVersion()) {
                    throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it has a higher lifeVersion than the message info: " + value.getLifeVersion() + ">" + info.getLifeVersion(), StoreErrorCodes.Life_Version_Conflict);
                }
            }
            indexValuesPriorToDelete.add(value);
            lifeVersions.add(revisedLifeVersion);
            if (!value.isDelete() && !value.isUndelete()) {
                originalPuts.add(value);
            } else {
                originalPuts.add(index.findKey(info.getStoreKey(), new FileSpan(index.getStartOffset(), value.getOffset()), EnumSet.of(PersistentIndex.IndexEntryType.PUT)));
            }
        }
        synchronized (storeWriteLock) {
            Offset currentIndexEndOffset = index.getCurrentEndOffset();
            if (!currentIndexEndOffset.equals(indexEndOffsetBeforeCheck)) {
                FileSpan fileSpan = new FileSpan(indexEndOffsetBeforeCheck, currentIndexEndOffset);
                int i = 0;
                for (MessageInfo info : infosToDelete) {
                    IndexValue value = index.findKey(info.getStoreKey(), fileSpan, EnumSet.allOf(PersistentIndex.IndexEntryType.class));
                    if (value != null) {
                        // From these cases, we can have value being DELETE, TTL_UPDATE AND UNDELETE, we have to deal with them accordingly.
                        if (value.getLifeVersion() == lifeVersions.get(i)) {
                            if (value.isDelete()) {
                                throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
                            }
                        // value being ttl update is fine, we can just append DELETE to it.
                        } else {
                            // For the extreme case, we log it out and throw an exception.
                            logger.warn("Concurrent operation for id " + info.getStoreKey() + " in store " + dataDir + ". Newly added value " + value);
                            throw new StoreException("Cannot delete id " + info.getStoreKey() + " since there are concurrent operation while delete", StoreErrorCodes.Life_Version_Conflict);
                        }
                        indexValuesPriorToDelete.set(i, value);
                    }
                    i++;
                }
            }
            List<InputStream> inputStreams = new ArrayList<>(infosToDelete.size());
            List<MessageInfo> updatedInfos = new ArrayList<>(infosToDelete.size());
            int i = 0;
            for (MessageInfo info : infosToDelete) {
                MessageFormatInputStream stream = new DeleteMessageFormatInputStream(info.getStoreKey(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), lifeVersions.get(i));
                // Don't change the lifeVersion here, there are other logic in markAsDeleted that relies on this lifeVersion.
                updatedInfos.add(new MessageInfo(info.getStoreKey(), stream.getSize(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), info.getLifeVersion()));
                inputStreams.add(stream);
                i++;
            }
            Offset endOffsetOfLastMessage = log.getEndOffset();
            MessageFormatWriteSet writeSet = new MessageFormatWriteSet(new SequenceInputStream(Collections.enumeration(inputStreams)), updatedInfos, false);
            writeSet.writeTo(log);
            logger.trace("Store : {} delete mark written to log", dataDir);
            int correspondingPutIndex = 0;
            for (MessageInfo info : updatedInfos) {
                FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfLastMessage, info.getSize());
                IndexValue deleteIndexValue = index.markAsDeleted(info.getStoreKey(), fileSpan, null, info.getOperationTimeMs(), info.getLifeVersion());
                endOffsetOfLastMessage = fileSpan.getEndOffset();
                blobStoreStats.handleNewDeleteEntry(info.getStoreKey(), deleteIndexValue, originalPuts.get(correspondingPutIndex), indexValuesPriorToDelete.get(correspondingPutIndex));
                correspondingPutIndex++;
            }
            logger.trace("Store : {} delete has been marked in the index ", dataDir);
        }
        onSuccess();
    } catch (StoreException e) {
        if (e.getErrorCode() == StoreErrorCodes.IOError) {
            onError();
        }
        throw e;
    } catch (Exception e) {
        throw new StoreException("Unknown error while trying to delete blobs from store " + dataDir, e, StoreErrorCodes.Unknown_Error);
    } finally {
        context.stop();
    }
}
Also used : DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) SequenceInputStream(java.io.SequenceInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) UndeleteMessageFormatInputStream(com.github.ambry.messageformat.UndeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) TtlUpdateMessageFormatInputStream(com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream) IOException(java.io.IOException) Timer(com.codahale.metrics.Timer) SequenceInputStream(java.io.SequenceInputStream) MessageFormatWriteSet(com.github.ambry.messageformat.MessageFormatWriteSet)

Aggregations

DeleteMessageFormatInputStream (com.github.ambry.messageformat.DeleteMessageFormatInputStream)7 MessageFormatInputStream (com.github.ambry.messageformat.MessageFormatInputStream)6 ArrayList (java.util.ArrayList)5 MessageFormatWriteSet (com.github.ambry.messageformat.MessageFormatWriteSet)4 MessageInfo (com.github.ambry.store.MessageInfo)4 PutMessageFormatInputStream (com.github.ambry.messageformat.PutMessageFormatInputStream)3 TtlUpdateMessageFormatInputStream (com.github.ambry.messageformat.TtlUpdateMessageFormatInputStream)3 UndeleteMessageFormatInputStream (com.github.ambry.messageformat.UndeleteMessageFormatInputStream)3 StoreException (com.github.ambry.store.StoreException)3 IOException (java.io.IOException)3 Store (com.github.ambry.store.Store)2 InputStream (java.io.InputStream)2 SequenceInputStream (java.io.SequenceInputStream)2 MetricRegistry (com.codahale.metrics.MetricRegistry)1 Timer (com.codahale.metrics.Timer)1 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)1 MockHelixParticipant (com.github.ambry.clustermap.MockHelixParticipant)1 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)1 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)1 PartitionId (com.github.ambry.clustermap.PartitionId)1