use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method changeThreshold.
/**
* Change threshold levels for dynamic replica sealing
* @param readOnlyThreshold new storeReadOnlyEnableSizeThresholdPercentageName value
* @param readWriteDeltaThreshold new storeReadWriteEnableSizeThresholdPercentageDeltaName value
* @return StoreConfig object with new threshold values
*/
private StoreConfig changeThreshold(int readOnlyThreshold, int readWriteDeltaThreshold, boolean delegateEnabled) {
properties.setProperty(StoreConfig.storeReplicaStatusDelegateEnableName, Boolean.toString(delegateEnabled));
properties.setProperty(StoreConfig.storeReadOnlyEnableSizeThresholdPercentageName, Integer.toString(readOnlyThreshold));
properties.setProperty(StoreConfig.storeReadWriteEnableSizeThresholdPercentageDeltaName, Integer.toString(readWriteDeltaThreshold));
return new StoreConfig(new VerifiableProperties(properties));
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method inNoTtlUpdatePeriodTest.
// ttlUpdateErrorCasesTest() helpers
/**
* Tests the case where the TTL update arrives when the the blob has not expired but cannot be TTL updated because
* it is past the buffer time for TTL updates (i.e. expiry time of the blob is "too close")
* @throws Exception
*/
private void inNoTtlUpdatePeriodTest() throws Exception {
long bufferTimeSecs = new StoreConfig(new VerifiableProperties(properties)).storeTtlUpdateBufferTimeSeconds;
long cutOffTimeMs = time.milliseconds() + TimeUnit.SECONDS.toMillis(bufferTimeSecs);
MockId id = put(1, PUT_RECORD_SIZE, cutOffTimeMs - 1).get(0);
verifyTtlUpdateFailure(id, Utils.Infinite_Time, StoreErrorCodes.Update_Not_Allowed);
// something that is AT cutoff time succeeds
id = put(1, PUT_RECORD_SIZE, cutOffTimeMs).get(0);
updateTtl(id);
verifyTtlUpdate(id);
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method deleteStoreFilesTest.
/**
* Test both success and failure cases when deleting store files.
* @throws Exception
*/
@Test
public void deleteStoreFilesTest() throws Exception {
store.shutdown();
// create test store directory
File storeDir = StoreTestUtils.createTempDirectory("store-" + storeId);
File reserveDir = StoreTestUtils.createTempDirectory("reserve-pool");
reserveDir.deleteOnExit();
DiskSpaceAllocator diskAllocator = new DiskSpaceAllocator(true, reserveDir, 0, new StorageManagerMetrics(new MetricRegistry()));
StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
MetricRegistry registry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(registry);
BlobStore testStore = new BlobStore(getMockReplicaId(storeDir.getAbsolutePath()), config, scheduler, storeStatsScheduler, diskIOScheduler, diskAllocator, metrics, metrics, STORE_KEY_FACTORY, recovery, hardDelete, null, time, new InMemAccountService(false, false), null);
testStore.start();
DiskSpaceRequirements diskSpaceRequirements = testStore.getDiskSpaceRequirements();
diskAllocator.initializePool(diskSpaceRequirements == null ? Collections.emptyList() : Collections.singletonList(testStore.getDiskSpaceRequirements()));
// ensure store directory and file exist
assertTrue("Store directory doesn't exist", storeDir.exists());
File storeSegmentDir = new File(reserveDir, DiskSpaceAllocator.STORE_DIR_PREFIX + storeId);
if (isLogSegmented) {
assertTrue("Store segment directory doesn't exist", storeSegmentDir.exists());
assertTrue("In-mem store file map should contain entry associated with test store", diskAllocator.getStoreReserveFileMap().containsKey(storeId));
}
// test that deletion on started store should fail
try {
testStore.deleteStoreFiles();
} catch (IllegalStateException e) {
// expected
}
// create a unreadable dir in store dir to induce deletion failure
File invalidDir = new File(storeDir, "invalidDir");
assertTrue("Couldn't create dir within store dir", invalidDir.mkdir());
assertTrue("Could not make unreadable", invalidDir.setReadable(false));
testStore.shutdown();
try {
testStore.deleteStoreFiles();
fail("should fail because one invalid dir is unreadable");
} catch (Exception e) {
// expected
}
assertTrue("store directory should exist because deletion failed", storeDir.exists());
// reset permission to allow deletion to succeed.
assertTrue("Could not make readable", invalidDir.setReadable(true));
// put a swap segment into store dir
File tempFile = File.createTempFile("sample-swap", LogSegmentName.SUFFIX + BlobStoreCompactor.TEMP_LOG_SEGMENT_NAME_SUFFIX, storeDir);
// test success case (swap segment is returned and store dir is correctly deleted)
assertEquals("Swap reserve dir should be empty initially", 0, diskAllocator.getSwapReserveFileMap().getFileSizeSet().size());
testStore.deleteStoreFiles();
assertFalse("swap segment still exists", tempFile.exists());
assertEquals("Swap reserve dir should have one swap segment", 1, diskAllocator.getSwapReserveFileMap().getFileSizeSet().size());
assertFalse("store directory shouldn't exist", storeDir.exists());
assertFalse("store segment directory shouldn't exist", storeSegmentDir.exists());
assertFalse("test store entry should have been removed from in-mem store file map ", diskAllocator.getStoreReserveFileMap().containsKey(storeId));
reloadStore();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method multiReplicaStatusDelegatesTest.
/**
* Test store is able to correctly seal/unseal replica with multiple participants.
* @throws Exception
*/
@Test
public void multiReplicaStatusDelegatesTest() throws Exception {
Set<ReplicaId> sealedReplicas1 = new HashSet<>();
ReplicaStatusDelegate mockDelegate1 = Mockito.mock(ReplicaStatusDelegate.class);
doAnswer(invocation -> {
sealedReplicas1.add(invocation.getArgument(0));
return true;
}).when(mockDelegate1).seal(any());
Set<ReplicaId> sealedReplicas2 = new HashSet<>();
ReplicaStatusDelegate mockDelegate2 = Mockito.mock(ReplicaStatusDelegate.class);
doAnswer(invocation -> {
sealedReplicas2.add(invocation.getArgument(0));
return true;
}).when(mockDelegate2).seal(any());
doAnswer(invocation -> {
sealedReplicas1.remove((ReplicaId) invocation.getArgument(0));
return true;
}).when(mockDelegate1).unseal(any());
doAnswer(invocation -> {
sealedReplicas2.remove((ReplicaId) invocation.getArgument(0));
return true;
}).when(mockDelegate2).unseal(any());
doAnswer(invocation -> sealedReplicas1.stream().map(r -> r.getPartitionId().toPathString()).collect(Collectors.toList())).when(mockDelegate1).getSealedReplicas();
doAnswer(invocation -> sealedReplicas2.stream().map(r -> r.getPartitionId().toPathString()).collect(Collectors.toList())).when(mockDelegate2).getSealedReplicas();
StoreConfig defaultConfig = changeThreshold(65, 5, true);
StoreTestUtils.MockReplicaId replicaId = getMockReplicaId(tempDirStr);
reloadStore(defaultConfig, replicaId, Arrays.asList(mockDelegate1, mockDelegate2));
// make the replica sealed
put(4, (long) (SEGMENT_CAPACITY * 0.8), Utils.Infinite_Time);
assertEquals("Sealed replica lists are different", sealedReplicas1, sealedReplicas2);
assertEquals("Sealed replica is not correct", replicaId, sealedReplicas1.iterator().next());
// try to bump the readonly threshold so as to unseal the replica
replicaId.setSealedState(true);
reloadStore(changeThreshold(99, 1, true), replicaId, Arrays.asList(mockDelegate1, mockDelegate2));
assertTrue("Replica should be unsealed", sealedReplicas1.isEmpty() && sealedReplicas2.isEmpty());
assertEquals("After startup, store should be in STANDBY state", STANDBY, store.getCurrentState());
// verify store still updates sealed lists even though replica state is already sealed. ("replicaId.setSealedState(true)")
// lower the threshold to make replica sealed again
reloadStore(changeThreshold(50, 5, true), replicaId, Arrays.asList(mockDelegate1, mockDelegate2));
assertEquals("Sealed replica lists are different", sealedReplicas1, sealedReplicas2);
assertEquals("Sealed replica is not correct", replicaId, sealedReplicas1.iterator().next());
// verify reconciliation case: we make read-write delta a wide range and clear sealedReplicas2 to make them reconcile
sealedReplicas2.clear();
reloadStore(changeThreshold(99, 90, true), replicaId, Arrays.asList(mockDelegate1, mockDelegate2));
assertEquals("Sealed replica lists are different", sealedReplicas1, sealedReplicas2);
assertEquals("Sealed replica is not correct", replicaId, sealedReplicas2.iterator().next());
store.shutdown();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreCompactorTest method getCompactor.
/**
* Gets an instance of {@link BlobStoreCompactor}.
* @param log the {@link Log} instance to use.
* @param ioScheduler the {@link DiskIOScheduler} instance to use.
* @param remoteTokenTracker the {@link RemoteTokenTracker} instance to use.
* @param enableAutoCloseLastLogSegment
* @return an instance of {@link BlobStoreCompactor}.
* @throws IOException
* @throws StoreException
*/
private BlobStoreCompactor getCompactor(Log log, DiskIOScheduler ioScheduler, RemoteTokenTracker remoteTokenTracker, boolean enableAutoCloseLastLogSegment) throws IOException, StoreException {
closeOrExceptionInduced = false;
state.properties.put("store.compaction.enable.direct.io", Boolean.toString(doDirectIO));
if (withUndelete) {
state.properties.put("store.compaction.filter", "IndexSegmentValidEntryWithUndelete");
}
state.properties.put("store.auto.close.last.log.segment.enabled", Boolean.toString(enableAutoCloseLastLogSegment));
if (enableAutoCloseLastLogSegment) {
state.properties.put("store.compaction.policy.factory", "com.github.ambry.store.HybridCompactionPolicyFactory");
state.properties.put("store.container.deletion.enabled", Boolean.toString(enableAutoCloseLastLogSegment));
}
state.properties.put("store.compaction.purge.delete.tombstone", Boolean.toString(purgeDeleteTombstone));
state.properties.put(StoreConfig.storeAlwaysEnableTargetIndexDuplicateCheckingName, Boolean.toString(alwaysEnableTargetIndexDuplicateChecking));
StoreConfig config = new StoreConfig(new VerifiableProperties(state.properties));
metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
return new BlobStoreCompactor(tempDirStr, STORE_ID, STORE_KEY_FACTORY, config, metrics, metrics, ioScheduler, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, log, state.time, state.sessionId, state.incarnationId, accountService, remoteTokenTracker, null);
}
Aggregations