Search in sources :

Example 36 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class StatsManagerTest method testReplicaFromOfflineToDropped.

/**
 * Test Offline-To-Dropped transition (both failure and success cases)
 * @throws Exception
 */
@Test
public void testReplicaFromOfflineToDropped() throws Exception {
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
    StoreConfig storeConfig = new StoreConfig(verifiableProperties);
    MockClusterMap clusterMap = new MockClusterMap();
    DataNodeId currentNode = clusterMap.getDataNodeIds().get(0);
    List<ReplicaId> localReplicas = clusterMap.getReplicaIds(currentNode);
    StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, currentNode, null, Collections.singletonList(clusterParticipant), new MockTime(), null, new InMemAccountService(false, false));
    storageManager.start();
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    MockReplicationManager mockReplicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, currentNode, storeKeyConverterFactory, clusterParticipant);
    MockStatsManager mockStatsManager = new MockStatsManager(storageManager, localReplicas, new MetricRegistry(), statsManagerConfig, clusterParticipant);
    // 1. attempt to remove replica while store is still running (remove store failure case)
    ReplicaId replicaToDrop = localReplicas.get(0);
    try {
        clusterParticipant.onPartitionBecomeDroppedFromOffline(replicaToDrop.getPartitionId().toPathString());
        fail("should fail because store is still running");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", ReplicaOperationFailure, e.getErrorCode());
    }
    // 2. shutdown the store but introduce file deletion failure (put a invalid dir in store dir)
    storageManager.shutdownBlobStore(replicaToDrop.getPartitionId());
    File invalidDir = new File(replicaToDrop.getReplicaPath(), "invalidDir");
    invalidDir.deleteOnExit();
    assertTrue("Couldn't create dir within store dir", invalidDir.mkdir());
    assertTrue("Could not make unreadable", invalidDir.setReadable(false));
    try {
        clusterParticipant.onPartitionBecomeDroppedFromOffline(replicaToDrop.getPartitionId().toPathString());
        fail("should fail because store deletion fails");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", ReplicaOperationFailure, e.getErrorCode());
    }
    // reset permission to allow deletion to succeed.
    assertTrue("Could not make readable", invalidDir.setReadable(true));
    assertTrue("Could not delete invalid dir", invalidDir.delete());
    // 3. success case (remove another replica because previous replica has been removed from in-mem data structures)
    ReplicaId replica = localReplicas.get(1);
    storageManager.shutdownBlobStore(replica.getPartitionId());
    MockHelixParticipant mockHelixParticipant = Mockito.spy(clusterParticipant);
    doNothing().when(mockHelixParticipant).setPartitionDisabledState(anyString(), anyBoolean());
    mockHelixParticipant.onPartitionBecomeDroppedFromOffline(replica.getPartitionId().toPathString());
    // verify that the replica is no longer present in StorageManager
    assertNull("Store of removed replica should not exist", storageManager.getStore(replica.getPartitionId(), true));
    // purposely remove the same replica in ReplicationManager again to verify it no longer exists
    assertFalse("Should return false because replica no longer exists", mockReplicationManager.removeReplica(replica));
    // purposely remove the same replica in StatsManager again to verify it no longer exists
    assertFalse("Should return false because replica no longer exists", mockStatsManager.removeReplica(replica));
    verify(mockHelixParticipant).setPartitionDisabledState(replica.getPartitionId().toPathString(), false);
    storageManager.shutdown();
    mockStatsManager.shutdown();
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) StorageManager(com.github.ambry.store.StorageManager) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicationManager(com.github.ambry.replication.MockReplicationManager) InMemAccountService(com.github.ambry.account.InMemAccountService) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) MockTime(com.github.ambry.utils.MockTime) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 37 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class BlobStoreCompactorTest method statsBasedCompactionStrategyWithInvalidLogSegment.

@Test
public void statsBasedCompactionStrategyWithInvalidLogSegment() throws Exception {
    assumeTrue(!withUndelete);
    refreshState(false, true, false);
    // Current log segment is setup like this:
    // three log segment: 0_0, 1_0, 2_0
    // Now set the log segments so that we have:
    // 3_0 doesn't have any valid index values (all expired put)
    // 4_0 has only one valid index value (the rest are expired put)
    // 5_0 has data so 4_0 won't be in the journal
    // This setup would make sure that:
    // 3_0 has value 0 in the result from BlobStoreStats
    // 1_0, 2_0, 4_0 would be the best candidate to compact if we ignore 3_0
    long requiredCount = state.index.getLogSegmentCount();
    long requiredBytes = requiredCount * state.log.getSegmentCapacity();
    long numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE;
    state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, Utils.Infinite_Time);
    requiredBytes = (requiredCount + 1) * state.log.getSegmentCapacity();
    numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE;
    state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, 0);
    requiredBytes = (requiredCount + 2) * state.log.getSegmentCapacity();
    numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE - 1;
    state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, 0L);
    state.addPutEntries(1, PUT_RECORD_SIZE, Utils.Infinite_Time);
    requiredBytes = (requiredCount + 3) * state.log.getSegmentCapacity();
    numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE;
    state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, Utils.Infinite_Time);
    state.time.setCurrentMilliseconds(System.currentTimeMillis());
    Properties properties = new Properties();
    properties.setProperty("store.min.used.capacity.to.trigger.compaction.in.percentage", "1");
    StoreConfig storeConfig = new StoreConfig(new VerifiableProperties(properties));
    StatsBasedCompactionPolicy policy = new StatsBasedCompactionPolicy(storeConfig, state.time);
    ScheduledExecutorService scheduler = Utils.newScheduler(1, true);
    BlobStoreStats stats = new BlobStoreStats("", state.index, 0, Time.MsPerSec, 0, 100, Time.SecsPerMin, false, purgeDeleteTombstone, state.time, scheduler, scheduler, DISK_IO_SCHEDULER, new StoreMetrics(new MetricRegistry()), 1, false);
    BlobStoreStats spyStats = Mockito.spy(stats);
    Mockito.doReturn(PUT_RECORD_SIZE).when(spyStats).getMaxBlobSize();
    CompactionDetails details = policy.getCompactionDetails(state.log.getCapacityInBytes(), state.index.getLogUsedCapacity(), state.log.getSegmentCapacity(), LogSegment.HEADER_SIZE, state.index.getLogSegmentsNotInJournal(), spyStats, "/tmp");
    List<LogSegmentName> logSegmentNames = details.getLogSegmentsUnderCompaction();
    assertEquals(1, logSegmentNames.size());
    assertEquals("3" + BlobStore.SEPARATOR + "0", logSegmentNames.get(0).toString());
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) StoreConfig(com.github.ambry.config.StoreConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Test(org.junit.Test)

Example 38 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class BlobStoreTest method storeIoErrorCountTest.

/**
 * Tests that {@link BlobStore#onError()} and {@link BlobStore#onSuccess()} can correctly capture disk related I/O errors
 * and properly shutdown the store.
 * @throws StoreException
 */
@Test
public void storeIoErrorCountTest() throws StoreException, IOException {
    // setup testing environment
    store.shutdown();
    properties.put("store.io.error.count.to.trigger.shutdown", "2");
    MockId id1 = getUniqueId();
    MockId id2 = getUniqueId();
    MockId id3 = getUniqueId();
    MessageInfo corruptedInfo = new MessageInfo(getUniqueId(), PUT_RECORD_SIZE, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), Utils.Infinite_Time);
    MessageInfo info1 = new MessageInfo(id1, PUT_RECORD_SIZE, 3 * 24 * 60 * 60 * 1000, id1.getAccountId(), id1.getContainerId(), Utils.Infinite_Time);
    MessageInfo info2 = new MessageInfo(id2, PUT_RECORD_SIZE, id2.getAccountId(), id2.getContainerId(), Utils.Infinite_Time);
    MessageInfo info3 = new MessageInfo(id3, PUT_RECORD_SIZE, id3.getAccountId(), id3.getContainerId(), Utils.Infinite_Time);
    MessageWriteSet corruptedWriteSet = new MockMessageWriteSet(Collections.singletonList(corruptedInfo), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), new StoreException(StoreException.IO_ERROR_STR, StoreErrorCodes.IOError));
    MessageWriteSet validWriteSet1 = new MockMessageWriteSet(Collections.singletonList(info1), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), null);
    MessageWriteSet validWriteSet2 = new MockMessageWriteSet(Collections.singletonList(info2), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), null);
    MessageWriteSet validWriteSet3 = new MockMessageWriteSet(Collections.singletonList(info3), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), null);
    ReplicaStatusDelegate mockDelegate = mock(ReplicaStatusDelegate.class);
    // Test1: simulate StoreErrorCodes.IOError triggered by corrupted write set.
    // verify that store can capture disk I/O errors in Put/Delete/TtlUpdate methods and take proper actions.
    BlobStore testStore1 = createBlobStore(getMockReplicaId(tempDirStr), new StoreConfig(new VerifiableProperties(properties)), Collections.singletonList(mockDelegate));
    testStore1.start();
    assertTrue("Store should start successfully", testStore1.isStarted());
    // verify store can keep track of real I/O errors for Put operation and shutdown properly.
    try {
        testStore1.put(corruptedWriteSet);
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    assertTrue("Store should be up", testStore1.isStarted());
    // verify error count would be reset after successful Put operation
    testStore1.put(validWriteSet1);
    assertEquals("Error count should be reset", 0, testStore1.getErrorCount().get());
    // trigger a normal shutdown to persist data (otherwise following delete/ttl update operation will encounter ID_Not_Found error)
    testStore1.shutdown();
    // restart for subsequent tests
    testStore1.start();
    // verify consecutive two failed Puts would make store shutdown (storeIoErrorCountToTriggerShutdown = 2)
    for (int i = 0; i < 2; ++i) {
        try {
            testStore1.put(corruptedWriteSet);
        } catch (StoreException e) {
            assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
        }
    }
    assertFalse("Store should shutdown because error count exceeded threshold", testStore1.isStarted());
    testStore1.start();
    // verify store can keep track of real I/O errors for Delete and TtlUpdate operations and shutdown properly.
    assertEquals("Error count should be reset", 0, testStore1.getErrorCount().get());
    testStore1.shutdown();
    // Test2: Simulate StoreErrorCodes.IOError occurred in getStoreKey step even though WriteSet is valid
    // verify that store can capture disk I/O errors in GET method and take proper actions. Put/Delete/TtlUpdates are also tested.
    properties.put("store.index.max.number.of.inmem.elements", "1");
    properties.put("store.io.error.count.to.trigger.shutdown", "3");
    MetricRegistry registry = new MetricRegistry();
    StoreMetrics metrics = new StoreMetrics(registry);
    StoreKeyFactory mockStoreKeyFactory = Mockito.spy(STORE_KEY_FACTORY);
    BlobStore testStore2 = new BlobStore(getMockReplicaId(tempDirStr), new StoreConfig(new VerifiableProperties(properties)), scheduler, storeStatsScheduler, diskIOScheduler, diskSpaceAllocator, metrics, metrics, mockStoreKeyFactory, recovery, hardDelete, Collections.singletonList(mockDelegate), time, new InMemAccountService(false, false), null);
    testStore2.start();
    assertTrue("Store should start up", testStore2.isStarted());
    testStore2.put(validWriteSet2);
    testStore2.put(validWriteSet3);
    // shutdown and restart to make the segments be memory mapped (this is used to simulate IOException generated by mockStoreKeyFactory)
    testStore2.shutdown();
    testStore2.start();
    doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    // verify that store exceptions (caused by IOException and InternalError) could be captured by Get operation
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    doThrow(new InternalError(StoreException.INTERNAL_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    assertEquals("Mismatch in error count", 2, testStore2.getErrorCount().get());
    // test that when InternalError's error message is null, the error code should be Unknown_Error and store error count
    // stays unchanged.
    doThrow(new InternalError()).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
    }
    assertEquals("Mismatch in error count", 2, testStore2.getErrorCount().get());
    // verify that StoreException.Unknown_Error could be captured by Get and error count stays unchanged.
    doThrow(new IOException("Unknown exception")).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
    }
    doThrow(new InternalError("Unknown exception")).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
    }
    assertEquals("Mismatch in error count", 2, testStore2.getErrorCount().get());
    // verify error count would be reset after successful Get operation
    Mockito.reset(mockStoreKeyFactory);
    StoreInfo storeInfo = testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
    assertNotNull(storeInfo);
    assertEquals("Error count should be reset", 0, testStore2.getErrorCount().get());
    doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    // call put method to trigger StoreException
    try {
        testStore2.put(validWriteSet1);
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    // call TtlUpdate method to trigger StoreException
    MessageInfo ttlUpdateInfo = new MessageInfo(id2, TTL_UPDATE_RECORD_SIZE, false, true, Utils.Infinite_Time, id2.getAccountId(), id2.getContainerId(), time.milliseconds());
    try {
        testStore2.updateTtl(Collections.singletonList(ttlUpdateInfo));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    // call delete method to trigger StoreException
    MessageInfo deleteInfo = new MessageInfo(id2, DELETE_RECORD_SIZE, id2.getAccountId(), id2.getContainerId(), time.milliseconds());
    try {
        testStore2.delete(Collections.singletonList(deleteInfo));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    // verify error count keeps track of StoreException and shut down store properly
    assertEquals("Mismatch in triggered shutdown counter", 1, metrics.storeIoErrorTriggeredShutdownCount.getCount());
    assertFalse("Store should shutdown because error count exceeded threshold", testStore2.isStarted());
    reloadStore();
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ReplicaStatusDelegate(com.github.ambry.clustermap.ReplicaStatusDelegate) InMemAccountService(com.github.ambry.account.InMemAccountService) StoreConfig(com.github.ambry.config.StoreConfig) Test(org.junit.Test)

Example 39 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class BlobStoreTest method testClusterManagerReplicaStatusDelegateUse.

/**
 * Tests blob store use of {@link ReplicaStatusDelegate}
 * @throws StoreException
 */
@Test
public void testClusterManagerReplicaStatusDelegateUse() throws StoreException, IOException, InterruptedException {
    // TODO: compaction for segmented logs) never encounters TTL updates
    if (isLogSegmented) {
        cleanup();
        scheduler = Utils.newScheduler(1, false);
        storeStatsScheduler = Utils.newScheduler(1, false);
        setupTestState(false, false);
    }
    properties.setProperty("store.set.local.partition.state.enabled", Boolean.toString(true));
    // Setup threshold test properties, replicaId, mock write status delegate
    StoreConfig defaultConfig = changeThreshold(65, 5, true);
    StoreTestUtils.MockReplicaId replicaId = getMockReplicaId(tempDirStr);
    ReplicaStatusDelegate replicaStatusDelegate = mock(ReplicaStatusDelegate.class);
    when(replicaStatusDelegate.unseal(any())).thenReturn(true);
    when(replicaStatusDelegate.seal(any())).thenReturn(true);
    // Restart store
    reloadStore(defaultConfig, replicaId, Collections.singletonList(replicaStatusDelegate));
    // Check that after start, replicaStatusDelegate is called to enable replica if it was previously disabled
    verify(replicaStatusDelegate, times(1)).enableReplica(replicaId);
    // Verify that putting in data that doesn't go over the threshold doesn't trigger the delegate
    put(1, 50, Utils.Infinite_Time);
    verify(replicaStatusDelegate, times(0)).seal(replicaId);
    // Verify that after putting in enough data, the store goes to read only
    // setupTestState already have created 3 log segments, there we create another 4 segments, it should
    // be enough to fill up to 65% of the log capacity.
    List<MockId> addedIds = put(4, (long) (SEGMENT_CAPACITY * 0.8), Utils.Infinite_Time);
    verify(replicaStatusDelegate, times(1)).seal(replicaId);
    // Assumes ClusterParticipant sets replicaId status to true
    replicaId.setSealedState(true);
    // Change config threshold but with delegate disabled, verify that nothing happens (store doesn't get unsealed)
    reloadStore(changeThreshold(99, 1, false), replicaId, Collections.singletonList(replicaStatusDelegate));
    verify(replicaStatusDelegate, times(0)).unseal(replicaId);
    // Change config threshold to higher, see that it gets changed to unsealed on reset
    reloadStore(changeThreshold(99, 1, true), replicaId, Collections.singletonList(replicaStatusDelegate));
    verify(replicaStatusDelegate, times(1)).unseal(replicaId);
    replicaId.setSealedState(false);
    // Reset thresholds, verify that it changed back
    reloadStore(defaultConfig, replicaId, Collections.singletonList(replicaStatusDelegate));
    verify(replicaStatusDelegate, times(2)).seal(replicaId);
    replicaId.setSealedState(true);
    // Remaining tests only relevant for segmented logs
    if (isLogSegmented) {
        // Delete added data
        for (MockId addedId : addedIds) {
            delete(addedId);
        }
        // Need to restart blob otherwise compaction will ignore segments in journal (which are all segments right now).
        // By restarting, only last segment will be in journal
        reloadStore(defaultConfig, replicaId, Collections.singletonList(replicaStatusDelegate));
        verify(replicaStatusDelegate, times(4)).enableReplica(replicaId);
        // Advance time by 8 days, call compaction to compact segments with deleted data, then verify
        // that the store is now read-write
        time.sleep(TimeUnit.DAYS.toMillis(8));
        store.compact(store.getCompactionDetails(new CompactAllPolicy(defaultConfig, time)), new byte[PUT_RECORD_SIZE * 2 + 1]);
        verify(replicaStatusDelegate, times(2)).unseal(replicaId);
        // Test if replicaId is erroneously true that it updates the status upon startup
        replicaId.setSealedState(true);
        reloadStore(defaultConfig, replicaId, Collections.singletonList(replicaStatusDelegate));
        verify(replicaStatusDelegate, times(3)).unseal(replicaId);
    }
    store.shutdown();
    properties.setProperty("store.set.local.partition.state.enabled", Boolean.toString(false));
}
Also used : ReplicaStatusDelegate(com.github.ambry.clustermap.ReplicaStatusDelegate) StoreConfig(com.github.ambry.config.StoreConfig) Test(org.junit.Test)

Example 40 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class BlobStoreTest method testBlobStoreStatsHandleNewEntries.

/**
 * Test {@link BlobStoreStats}'s handle new entries method and make sure the correct {@link IndexValue}s are passed
 * to these methods.
 * @throws Exception
 */
@Test
public void testBlobStoreStatsHandleNewEntries() throws Exception {
    store.shutdown();
    ReplicaId replicaId = getMockReplicaId(tempDirStr);
    StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
    MetricRegistry registry = new MetricRegistry();
    StoreMetrics metrics = new StoreMetrics(registry);
    MockBlobStoreStats mockBlobStoreStats = new MockBlobStoreStats(time);
    store = new MockBlobStore(replicaId, config, null, metrics, mockBlobStoreStats);
    store.start();
    MockId id = put(1, PUT_RECORD_SIZE, Utils.Infinite_Time).get(0);
    assertNotNull(mockBlobStoreStats.currentValue);
    assertNull(mockBlobStoreStats.originalPutValue);
    assertNull(mockBlobStoreStats.previousValue);
    assertTrue(mockBlobStoreStats.currentValue.isPut());
    updateTtl(id);
    assertNotNull(mockBlobStoreStats.currentValue);
    assertNotNull(mockBlobStoreStats.originalPutValue);
    assertNull(mockBlobStoreStats.previousValue);
    assertTrue(mockBlobStoreStats.currentValue.isTtlUpdate());
    assertTrue(mockBlobStoreStats.originalPutValue.isPut());
    delete(id);
    assertNotNull(mockBlobStoreStats.currentValue);
    assertNotNull(mockBlobStoreStats.originalPutValue);
    assertNotNull(mockBlobStoreStats.previousValue);
    assertTrue(mockBlobStoreStats.currentValue.isDelete());
    assertTrue(mockBlobStoreStats.currentValue.isTtlUpdate());
    assertTrue(mockBlobStoreStats.previousValue.isTtlUpdate());
    assertTrue(mockBlobStoreStats.originalPutValue.isTtlUpdate());
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) StoreConfig(com.github.ambry.config.StoreConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) Test(org.junit.Test)

Aggregations

StoreConfig (com.github.ambry.config.StoreConfig)60 VerifiableProperties (com.github.ambry.config.VerifiableProperties)50 MetricRegistry (com.codahale.metrics.MetricRegistry)34 Test (org.junit.Test)29 File (java.io.File)18 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)17 ArrayList (java.util.ArrayList)15 Properties (java.util.Properties)15 ClusterMap (com.github.ambry.clustermap.ClusterMap)10 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)9 InMemAccountService (com.github.ambry.account.InMemAccountService)8 DataNodeId (com.github.ambry.clustermap.DataNodeId)8 CountDownLatch (java.util.concurrent.CountDownLatch)8 MockTime (com.github.ambry.utils.MockTime)7 HashSet (java.util.HashSet)7 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)6 ReplicaId (com.github.ambry.clustermap.ReplicaId)6 ReplicaStatusDelegate (com.github.ambry.clustermap.ReplicaStatusDelegate)6 DiskManagerConfig (com.github.ambry.config.DiskManagerConfig)6 ReplicationConfig (com.github.ambry.config.ReplicationConfig)6