Search in sources :

Example 6 with MockMessageWriteSet

use of com.github.ambry.store.MockMessageWriteSet in project ambry by linkedin.

the class CloudBlobStoreIntegrationTest method testDeleteFromFrontend.

/**
 * Test {@link CloudBlobStore#delete} method from frontend.
 */
public void testDeleteFromFrontend() throws StoreException {
    // First upload a blob with a life version 2
    MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
    addBlobToMessageSet(messageWriteSet, Utils.Infinite_Time, accountId, containerId, partitionId, operationTime, (short) -1);
    cloudBlobStore.put(messageWriteSet);
    // verify that the blob was uploaded with expected metadata.
    StoreInfo storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
    assertEquals("Unexpected live version", 0, storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
    assertEquals("Unexpected delete status", messageWriteSet.getMessageSetInfo().get(0).isDeleted(), storeInfo.getMessageReadSetInfo().get(0).isDeleted());
    // Deleting again should fail with ID_Deleted exception.
    MessageInfo messageInfo = messageWriteSet.getMessageSetInfo().get(0);
    MessageInfo deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (short) -1);
    try {
        cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
    } catch (StoreException ex) {
        assertEquals("Unexpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
    }
    storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
    assertEquals("Unexpected live version", 0, storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
    assertTrue("Unexpected delete status", storeInfo.getMessageReadSetInfo().get(0).isDeleted());
    // Restart cloud blob store to clear cache. Deleting again should still fail with ID_Deleted Store Exception.
    cloudBlobStore.shutdown();
    cloudBlobStore.start();
    deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (short) 3);
    try {
        cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
    } catch (StoreException ex) {
        assertEquals("Unexpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
    }
    storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
    assertEquals("Unexpected live version", 3, storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
    assertTrue("Unexpected delete status", storeInfo.getMessageReadSetInfo().get(0).isDeleted());
}
Also used : MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) StoreGetOptions(com.github.ambry.store.StoreGetOptions) StoreInfo(com.github.ambry.store.StoreInfo) MessageInfo(com.github.ambry.store.MessageInfo) StoreException(com.github.ambry.store.StoreException)

Example 7 with MockMessageWriteSet

use of com.github.ambry.store.MockMessageWriteSet in project ambry by linkedin.

the class CloudBlobStoreIntegrationTest method testUndelete.

/**
 * Test {@link CloudBlobStore#undelete} method.
 */
@Test
public void testUndelete() throws StoreException {
    MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
    addBlobToMessageSet(messageWriteSet, Utils.Infinite_Time, accountId, containerId, partitionId, operationTime, initLifeVersion(isVcr));
    cloudBlobStore.put(messageWriteSet);
    // Attempt to undelete a blob that is not deleted. Should fail silently for vcr and throw exception for frontend.
    MessageInfo messageInfo = messageWriteSet.getMessageSetInfo().get(0);
    MessageInfo undeleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (isVcr ? (short) 1 : -1));
    try {
        cloudBlobStore.undelete(undeleteMessageInfo);
        if (!isVcr) {
            fail("Undelete from frontend of a not deleted blob should throw exception.");
        }
    } catch (StoreException ex) {
        if (isVcr) {
            fail("Undelete for the vcr should fail silently");
        }
        assertEquals("Unexpected error message", StoreErrorCodes.ID_Not_Deleted, ex.getErrorCode());
    }
    // delete the blob.
    MessageInfo deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (short) (isVcr ? 1 : -1));
    cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
    // Attempt to undelete should pass
    short lifeVersion = cloudBlobStore.undelete(undeleteMessageInfo);
    assertEquals("Unexpected life version after undelete", lifeVersion, 1);
}
Also used : MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) MessageInfo(com.github.ambry.store.MessageInfo) StoreException(com.github.ambry.store.StoreException) Test(org.junit.Test)

Example 8 with MockMessageWriteSet

use of com.github.ambry.store.MockMessageWriteSet in project ambry by linkedin.

the class ReplicationTest method replicaFromStandbyToInactiveTest.

/**
 * Test STANDBY -> INACTIVE transition on existing replica (both success and failure cases)
 */
@Test
public void replicaFromStandbyToInactiveTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    MockHelixParticipant.metricRegistry = new MetricRegistry();
    MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
    Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
    StorageManager storageManager = managers.getFirst();
    MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
    // get an existing partition to test both success and failure cases
    PartitionId existingPartition = replicationManager.partitionToPartitionInfo.keySet().iterator().next();
    storageManager.shutdownBlobStore(existingPartition);
    try {
        mockHelixParticipant.onPartitionBecomeInactiveFromStandby(existingPartition.toPathString());
        fail("should fail because store is not started");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", StoreNotStarted, e.getErrorCode());
    }
    // restart the store and trigger Standby-To-Inactive transition again
    storageManager.startBlobStore(existingPartition);
    // write a blob with size = 100 into local store (end offset of last PUT = 100 + 18 = 118)
    Store localStore = storageManager.getStore(existingPartition);
    MockId id = new MockId(TestUtils.getRandomString(10), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
    long crc = (new Random()).nextLong();
    long blobSize = 100;
    MessageInfo info = new MessageInfo(id, blobSize, false, false, Utils.Infinite_Time, crc, id.getAccountId(), id.getContainerId(), Utils.Infinite_Time);
    List<MessageInfo> infos = new ArrayList<>();
    List<ByteBuffer> buffers = new ArrayList<>();
    ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes((int) blobSize));
    infos.add(info);
    buffers.add(buffer);
    localStore.put(new MockMessageWriteSet(infos, buffers));
    ReplicaId localReplica = storageManager.getReplica(existingPartition.toPathString());
    // override partition state change listener in ReplicationManager to help thread manipulation
    mockHelixParticipant.registerPartitionStateChangeListener(StateModelListenerType.ReplicationManagerListener, replicationManager.replicationListener);
    CountDownLatch participantLatch = new CountDownLatch(1);
    replicationManager.listenerExecutionLatch = new CountDownLatch(1);
    // create a new thread and trigger STANDBY -> INACTIVE transition
    Utils.newThread(() -> {
        mockHelixParticipant.onPartitionBecomeInactiveFromStandby(existingPartition.toPathString());
        participantLatch.countDown();
    }, false).start();
    assertTrue("Partition state change listener didn't get called within 1 sec", replicationManager.listenerExecutionLatch.await(1, TimeUnit.SECONDS));
    assertEquals("Local store state should be INACTIVE", ReplicaState.INACTIVE, storageManager.getStore(existingPartition).getCurrentState());
    List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(existingPartition).getRemoteReplicaInfos();
    ReplicaId peerReplica1 = remoteReplicaInfos.get(0).getReplicaId();
    assertFalse("Sync up should not complete because not enough replicas have caught up", mockHelixParticipant.getReplicaSyncUpManager().updateReplicaLagAndCheckSyncStatus(localReplica, peerReplica1, 10L, ReplicaState.INACTIVE));
    // pick another remote replica to update the replication lag
    ReplicaId peerReplica2 = remoteReplicaInfos.get(1).getReplicaId();
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), 118);
    assertFalse("Sync up shouldn't complete because only one replica has caught up with local replica", mockHelixParticipant.getReplicaSyncUpManager().isSyncUpComplete(localReplica));
    // make second peer replica catch up with last PUT in local store
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath(), 118);
    assertTrue("Standby-To-Inactive transition didn't complete within 1 sec", participantLatch.await(1, TimeUnit.SECONDS));
    // we purposely update lag against local replica to verify local replica is no longer in ReplicaSyncUpManager because
    // deactivation is complete and local replica should be removed from "replicaToLagInfos" map.
    assertFalse("Sync up should complete (2 replicas have caught up), hence updated should be false", mockHelixParticipant.getReplicaSyncUpManager().updateReplicaLagAndCheckSyncStatus(localReplica, peerReplica2, 0L, ReplicaState.INACTIVE));
    storageManager.shutdown();
}
Also used : MetricRegistry(com.codahale.metrics.MetricRegistry) StorageManager(com.github.ambry.store.StorageManager) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) MockId(com.github.ambry.store.MockId) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) ByteBuffer(java.nio.ByteBuffer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MessageInfo(com.github.ambry.store.MessageInfo) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) Random(java.util.Random) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 9 with MockMessageWriteSet

use of com.github.ambry.store.MockMessageWriteSet in project ambry by linkedin.

the class ReplicationTest method replicaFromInactiveToOfflineTest.

/**
 * Test INACTIVE -> OFFLINE transition on existing replica (both success and failure cases)
 */
@Test
public void replicaFromInactiveToOfflineTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    MockHelixParticipant.metricRegistry = new MetricRegistry();
    MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
    Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
    StorageManager storageManager = managers.getFirst();
    MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
    // 1. test replica not found case
    try {
        mockHelixParticipant.onPartitionBecomeOfflineFromInactive("-1");
        fail("should fail because of invalid partition");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", ReplicaNotFound, e.getErrorCode());
    }
    // 2. test store not started case
    PartitionId existingPartition = replicationManager.partitionToPartitionInfo.keySet().iterator().next();
    storageManager.shutdownBlobStore(existingPartition);
    try {
        mockHelixParticipant.onPartitionBecomeOfflineFromInactive(existingPartition.toPathString());
        fail("should fail because store is not started");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", StoreNotStarted, e.getErrorCode());
    }
    storageManager.startBlobStore(existingPartition);
    // before testing success case, let's write a blob (size = 100) into local store and add a delete record for new blob
    Store localStore = storageManager.getStore(existingPartition);
    MockId id = new MockId(TestUtils.getRandomString(10), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
    long crc = (new Random()).nextLong();
    long blobSize = 100;
    MessageInfo info = new MessageInfo(id, blobSize, false, false, Utils.Infinite_Time, crc, id.getAccountId(), id.getContainerId(), Utils.Infinite_Time);
    List<MessageInfo> infos = new ArrayList<>();
    List<ByteBuffer> buffers = new ArrayList<>();
    ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes((int) blobSize));
    infos.add(info);
    buffers.add(buffer);
    localStore.put(new MockMessageWriteSet(infos, buffers));
    // delete the blob
    int deleteRecordSize = (int) (new DeleteMessageFormatInputStream(id, (short) 0, (short) 0, 0).getSize());
    MessageInfo deleteInfo = new MessageInfo(id, deleteRecordSize, id.getAccountId(), id.getContainerId(), time.milliseconds());
    localStore.delete(Collections.singletonList(deleteInfo));
    int sizeOfPutAndHeader = 100 + 18;
    int sizeOfWhole = sizeOfPutAndHeader + deleteRecordSize;
    // note that end offset of last PUT = 100 + 18 = 118, end offset of the store is sizeOfWhole
    // 3. test success case (create a new thread and trigger INACTIVE -> OFFLINE transition)
    ReplicaId localReplica = storageManager.getReplica(existingPartition.toPathString());
    // put a decommission-in-progress file into local store dir
    File decommissionFile = new File(localReplica.getReplicaPath(), "decommission_in_progress");
    assertTrue("Couldn't create decommission file in local store", decommissionFile.createNewFile());
    decommissionFile.deleteOnExit();
    assertNotSame("Before disconnection, the local store state shouldn't be OFFLINE", ReplicaState.OFFLINE, localStore.getCurrentState());
    mockHelixParticipant.registerPartitionStateChangeListener(StateModelListenerType.ReplicationManagerListener, replicationManager.replicationListener);
    CountDownLatch participantLatch = new CountDownLatch(1);
    replicationManager.listenerExecutionLatch = new CountDownLatch(1);
    Utils.newThread(() -> {
        mockHelixParticipant.onPartitionBecomeOfflineFromInactive(existingPartition.toPathString());
        participantLatch.countDown();
    }, false).start();
    assertTrue("Partition state change listener in ReplicationManager didn't get called within 1 sec", replicationManager.listenerExecutionLatch.await(1, TimeUnit.SECONDS));
    // the state of local store should be updated to OFFLINE
    assertEquals("Local store state is not expected", ReplicaState.OFFLINE, localStore.getCurrentState());
    // update replication lag between local and peer replicas
    List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(existingPartition).getRemoteReplicaInfos();
    ReplicaId peerReplica1 = remoteReplicaInfos.get(0).getReplicaId();
    ReplicaId peerReplica2 = remoteReplicaInfos.get(1).getReplicaId();
    // peer1 catches up with last PUT, peer2 catches up with end offset of local store. In this case, SyncUp is not complete
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), sizeOfPutAndHeader);
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath(), sizeOfWhole);
    assertFalse("Only one peer replica has fully caught up with end offset so sync-up should not complete", mockHelixParticipant.getReplicaSyncUpManager().isSyncUpComplete(localReplica));
    // make peer1 catch up with end offset
    replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), sizeOfWhole);
    // Now, sync-up should complete and transition should be able to proceed.
    assertTrue("Inactive-To-Offline transition didn't complete within 1 sec", participantLatch.await(1, TimeUnit.SECONDS));
    assertFalse("Local store should be stopped after transition", localStore.isStarted());
    storageManager.shutdown();
}
Also used : StorageManager(com.github.ambry.store.StorageManager) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) MockId(com.github.ambry.store.MockId) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) Random(java.util.Random) MetricRegistry(com.codahale.metrics.MetricRegistry) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) ByteBuffer(java.nio.ByteBuffer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MessageInfo(com.github.ambry.store.MessageInfo) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) File(java.io.File) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 10 with MockMessageWriteSet

use of com.github.ambry.store.MockMessageWriteSet in project ambry by linkedin.

the class CloudBlobStoreTest method testCacheEvictionOrder.

/**
 * Test CloudBlobStore cache eviction.
 */
@Test
public void testCacheEvictionOrder() throws Exception {
    assumeTrue(isVcr);
    // setup store with small cache size
    int cacheSize = 10;
    setupCloudStore(false, false, cacheSize, true);
    // put blobs to fill up cache
    List<StoreKey> blobIdList = new ArrayList<>();
    for (int j = 0; j < cacheSize; j++) {
        blobIdList.add(getUniqueId(refAccountId, refContainerId, false, partitionId));
        store.addToCache(blobIdList.get(j).getID(), (short) 0, CloudBlobStore.BlobState.CREATED);
    }
    // findMissingKeys should stay in cache
    store.findMissingKeys(blobIdList);
    verify(dest, never()).getBlobMetadata(anyList());
    int expectedLookups = blobIdList.size();
    int expectedHits = expectedLookups;
    verifyCacheHits(expectedLookups, expectedHits);
    // Perform access on first 5 blobs
    int delta = 5;
    MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
    for (int j = 0; j < delta; j++) {
        CloudTestUtil.addBlobToMessageSet(messageWriteSet, (BlobId) blobIdList.get(j), SMALL_BLOB_SIZE, Utils.Infinite_Time, operationTime, isVcr);
    }
    store.updateTtl(messageWriteSet.getMessageSetInfo());
    expectedLookups += delta;
    // Note: should be cache misses since blobs are still in CREATED state.
    verifyCacheHits(expectedLookups, expectedHits);
    // put 5 more blobs
    for (int j = cacheSize; j < cacheSize + delta; j++) {
        blobIdList.add(getUniqueId(refAccountId, refContainerId, false, partitionId));
        store.addToCache(blobIdList.get(j).getID(), (short) 0, CloudBlobStore.BlobState.CREATED);
    }
    // get same 1-5 which should be still cached.
    store.findMissingKeys(blobIdList.subList(0, delta));
    expectedLookups += delta;
    expectedHits += delta;
    verifyCacheHits(expectedLookups, expectedHits);
    verify(dest, never()).getBlobMetadata(anyList());
    // call findMissingKeys on 6-10 which should trigger getBlobMetadata
    store.findMissingKeys(blobIdList.subList(delta, cacheSize));
    expectedLookups += delta;
    verifyCacheHits(expectedLookups, expectedHits);
    verify(dest).getBlobMetadata(anyList());
}
Also used : MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) ArrayList(java.util.ArrayList) StoreKey(com.github.ambry.store.StoreKey) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Aggregations

MockMessageWriteSet (com.github.ambry.store.MockMessageWriteSet)16 Test (org.junit.Test)12 MessageInfo (com.github.ambry.store.MessageInfo)11 StoreException (com.github.ambry.store.StoreException)10 ReplicationTest (com.github.ambry.replication.ReplicationTest)6 BlobId (com.github.ambry.commons.BlobId)5 StoreGetOptions (com.github.ambry.store.StoreGetOptions)5 MetricRegistry (com.codahale.metrics.MetricRegistry)4 StoreInfo (com.github.ambry.store.StoreInfo)4 StoreKey (com.github.ambry.store.StoreKey)4 ByteBuffer (java.nio.ByteBuffer)4 ArrayList (java.util.ArrayList)4 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)2 MockHelixParticipant (com.github.ambry.clustermap.MockHelixParticipant)2 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)2 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)2 PartitionId (com.github.ambry.clustermap.PartitionId)2 ReplicaId (com.github.ambry.clustermap.ReplicaId)2 StateTransitionException (com.github.ambry.clustermap.StateTransitionException)2 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)2