use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class MockIndexSegment method populateBloomFilterWithUuidTest.
/**
* Test populating bloom filter with whole blob id and with UUID respectively.
* @throws Exception
*/
@Test
public void populateBloomFilterWithUuidTest() throws Exception {
assumeTrue(formatVersion > PersistentIndex.VERSION_1);
// with default config, bloom filter will be populated by whole blob id bytes array
LogSegmentName logSegmentName1 = LogSegmentName.fromPositionAndGeneration(0, 0);
int indexValueSize = PersistentIndex.CURRENT_VERSION >= PersistentIndex.VERSION_3 ? IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V3_V4 : IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V1_V2;
IndexSegment indexSegment1 = new IndexSegment(tempDir.getAbsolutePath(), new Offset(logSegmentName1, 0), STORE_KEY_FACTORY, KEY_SIZE + indexValueSize, indexValueSize, config, metrics, time);
Random random = new Random();
short accountId1 = getRandomShort(random);
short containerId1 = getRandomShort(random);
short accountId2, containerId2;
do {
accountId2 = getRandomShort(random);
} while (accountId2 == accountId1);
do {
containerId2 = getRandomShort(random);
} while (containerId2 == containerId1);
String idStr = TestUtils.getRandomString(CUSTOM_ID_SIZE);
// generate two ids with same id string but different account/container.
MockId id1 = new MockId(idStr, accountId1, containerId1);
MockId id2 = new MockId(idStr, accountId2, containerId2);
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName1, 0), Utils.Infinite_Time, time.milliseconds(), accountId1, containerId1, (short) 0, formatVersion);
indexSegment1.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName1, 1000));
// deliberately add one more entry for later bloom rebuilding test
indexSegment1.addEntry(new IndexEntry(id2, value1), new Offset(logSegmentName1, 2000));
indexSegment1.writeIndexSegmentToFile(new Offset(logSegmentName1, 2000));
indexSegment1.seal();
// test that id1 can be found in index segment but id2 should be non-existent because bloom filter considers it not present
Set<IndexValue> findResult = indexSegment1.find(id1);
assertNotNull("Should have found the added key", findResult);
assertEquals(accountId1, findResult.iterator().next().getAccountId());
assertNull("Should have failed to find non existent key", indexSegment1.find(id2));
// create second index segment whose bloom filter is populated with UUID only.
// We add id1 into indexSegment2 and attempt to get id2 which should succeed.
Properties properties = new Properties();
properties.setProperty("store.uuid.based.bloom.filter.enabled", Boolean.toString(true));
StoreConfig storeConfig = new StoreConfig(new VerifiableProperties(properties));
LogSegmentName logSegmentName2 = LogSegmentName.fromPositionAndGeneration(1, 0);
IndexSegment indexSegment2 = new IndexSegment(tempDir.getAbsolutePath(), new Offset(logSegmentName2, 0), STORE_KEY_FACTORY, KEY_SIZE + indexValueSize, indexValueSize, storeConfig, metrics, time);
indexSegment2.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName2, 1000));
indexSegment2.writeIndexSegmentToFile(new Offset(logSegmentName2, 1000));
indexSegment2.seal();
findResult = indexSegment2.find(id1);
assertNotNull("Should have found the id1", findResult);
// test that we are able to get id2 from indexSegment2
findResult = indexSegment2.find(id2);
assertNotNull("Should have found the id2", findResult);
// verify that the found entry actually has account/container associated with id1
IndexValue indexValue = findResult.iterator().next();
assertTrue("Account or container is not expected", indexValue.getAccountId() == accountId1 && indexValue.getContainerId() == containerId1);
File bloomFile1 = new File(tempDir, generateIndexSegmentFilenamePrefix(new Offset(logSegmentName1, 0)) + BLOOM_FILE_NAME_SUFFIX);
assertTrue("The bloom file should exist", bloomFile1.exists());
long lengthBeforeRebuild = bloomFile1.length();
// rebuild bloom filter in indexSegment1
properties.setProperty("store.index.rebuild.bloom.filter.enabled", Boolean.toString(true));
storeConfig = new StoreConfig(new VerifiableProperties(properties));
IndexSegment indexSegmentFromFile = new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, storeConfig, metrics, null, time);
// test that id2 is found in reconstructed index segment (with bloom filter rebuilt based on UUID only)
findResult = indexSegmentFromFile.find(id2);
assertNotNull("Should have found the id2", findResult);
indexValue = findResult.iterator().next();
assertTrue("Account or container is not expected", indexValue.getAccountId() == accountId1 && indexValue.getContainerId() == containerId1);
// verify that bloom file length didn't change
assertEquals("Bloom file length has changed", lengthBeforeRebuild, bloomFile1.length());
// now, change value of storeIndexMaxNumberOfInmemElements to 1
properties.setProperty("store.index.max.number.of.inmem.elements", Integer.toString(1));
storeConfig = new StoreConfig(new VerifiableProperties(properties));
// rebuild bloomFile1 again and file size should be changed
new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, storeConfig, metrics, null, time);
assertTrue("Bloom file length has changed", lengthBeforeRebuild > bloomFile1.length());
// additional test: exception occurs when deleting previous bloom file
assertTrue("Could not make unwritable", tempDir.setWritable(false));
try {
new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, storeConfig, metrics, null, time);
fail("Deletion on unwritable file should fail");
} catch (StoreException e) {
assertEquals("Error code is not expected.", StoreErrorCodes.Index_Creation_Failure, e.getErrorCode());
} finally {
assertTrue("Could not make writable", tempDir.setWritable(true));
}
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreCompactorTest method getCompactor.
/**
* Gets an instance of {@link BlobStoreCompactor}.
* @param log the {@link Log} instance to use.
* @param ioScheduler the {@link DiskIOScheduler} instance to use.
* @param remoteTokenTracker the {@link RemoteTokenTracker} instance to use.
* @param enableAutoCloseLastLogSegment
* @return an instance of {@link BlobStoreCompactor}.
* @throws IOException
* @throws StoreException
*/
private BlobStoreCompactor getCompactor(Log log, DiskIOScheduler ioScheduler, RemoteTokenTracker remoteTokenTracker, boolean enableAutoCloseLastLogSegment) throws IOException, StoreException {
closeOrExceptionInduced = false;
state.properties.put("store.compaction.enable.direct.io", Boolean.toString(doDirectIO));
if (withUndelete) {
state.properties.put("store.compaction.filter", "IndexSegmentValidEntryWithUndelete");
}
state.properties.put("store.auto.close.last.log.segment.enabled", Boolean.toString(enableAutoCloseLastLogSegment));
if (enableAutoCloseLastLogSegment) {
state.properties.put("store.compaction.policy.factory", "com.github.ambry.store.HybridCompactionPolicyFactory");
state.properties.put("store.container.deletion.enabled", Boolean.toString(enableAutoCloseLastLogSegment));
}
state.properties.put("store.compaction.purge.delete.tombstone", Boolean.toString(purgeDeleteTombstone));
state.properties.put(StoreConfig.storeAlwaysEnableTargetIndexDuplicateCheckingName, Boolean.toString(alwaysEnableTargetIndexDuplicateChecking));
StoreConfig config = new StoreConfig(new VerifiableProperties(state.properties));
metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
return new BlobStoreCompactor(tempDirStr, STORE_ID, STORE_KEY_FACTORY, config, metrics, metrics, ioScheduler, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, log, state.time, state.sessionId, state.incarnationId, accountService, remoteTokenTracker, null);
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreCompactorTest method statsBasedCompactionStrategyWithInvalidLogSegment.
@Test
public void statsBasedCompactionStrategyWithInvalidLogSegment() throws Exception {
assumeTrue(!withUndelete);
refreshState(false, true, false);
// Current log segment is setup like this:
// three log segment: 0_0, 1_0, 2_0
// Now set the log segments so that we have:
// 3_0 doesn't have any valid index values (all expired put)
// 4_0 has only one valid index value (the rest are expired put)
// 5_0 has data so 4_0 won't be in the journal
// This setup would make sure that:
// 3_0 has value 0 in the result from BlobStoreStats
// 1_0, 2_0, 4_0 would be the best candidate to compact if we ignore 3_0
long requiredCount = state.index.getLogSegmentCount();
long requiredBytes = requiredCount * state.log.getSegmentCapacity();
long numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE;
state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, Utils.Infinite_Time);
requiredBytes = (requiredCount + 1) * state.log.getSegmentCapacity();
numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE;
state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, 0);
requiredBytes = (requiredCount + 2) * state.log.getSegmentCapacity();
numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE - 1;
state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, 0L);
state.addPutEntries(1, PUT_RECORD_SIZE, Utils.Infinite_Time);
requiredBytes = (requiredCount + 3) * state.log.getSegmentCapacity();
numPuts = (requiredBytes - state.index.getLogUsedCapacity()) / PUT_RECORD_SIZE;
state.addPutEntries((int) numPuts, PUT_RECORD_SIZE, Utils.Infinite_Time);
state.time.setCurrentMilliseconds(System.currentTimeMillis());
Properties properties = new Properties();
properties.setProperty("store.min.used.capacity.to.trigger.compaction.in.percentage", "1");
StoreConfig storeConfig = new StoreConfig(new VerifiableProperties(properties));
StatsBasedCompactionPolicy policy = new StatsBasedCompactionPolicy(storeConfig, state.time);
ScheduledExecutorService scheduler = Utils.newScheduler(1, true);
BlobStoreStats stats = new BlobStoreStats("", state.index, 0, Time.MsPerSec, 0, 100, Time.SecsPerMin, false, purgeDeleteTombstone, state.time, scheduler, scheduler, DISK_IO_SCHEDULER, new StoreMetrics(new MetricRegistry()), 1, false);
BlobStoreStats spyStats = Mockito.spy(stats);
Mockito.doReturn(PUT_RECORD_SIZE).when(spyStats).getMaxBlobSize();
CompactionDetails details = policy.getCompactionDetails(state.log.getCapacityInBytes(), state.index.getLogUsedCapacity(), state.log.getSegmentCapacity(), LogSegment.HEADER_SIZE, state.index.getLogSegmentsNotInJournal(), spyStats, "/tmp");
List<LogSegmentName> logSegmentNames = details.getLogSegmentsUnderCompaction();
assertEquals(1, logSegmentNames.size());
assertEquals("3" + BlobStore.SEPARATOR + "0", logSegmentNames.get(0).toString());
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class CompactionManagerTest method testAddBlobStore.
/**
* Tests for adding new BlobStore with/without compaction enabled.
*/
@Test
public void testAddBlobStore() {
StoreMetrics storeMetrics = new StoreMetrics(new MetricRegistry());
BlobStore newAddedStore = new MockBlobStore(config, storeMetrics, time, null);
// without compaction enabled.
compactionManager.enable();
compactionManager.addBlobStore(newAddedStore);
// verify new store is correctly added into compaction manager
assertTrue("New store is not found in compaction manager", compactionManager.getAllStores().contains(newAddedStore));
compactionManager.disable();
compactionManager.awaitTermination();
// with compaction enabled.
properties.setProperty("store.compaction.triggers", ALL_COMPACTION_TRIGGERS);
config = new StoreConfig(new VerifiableProperties(properties));
StorageManagerMetrics metrics = new StorageManagerMetrics(new MetricRegistry());
compactionManager = new CompactionManager(MOUNT_PATH, config, Collections.singleton(blobStore), metrics, time);
compactionManager.enable();
compactionManager.addBlobStore(newAddedStore);
assertTrue("New store is not found in compaction manager", compactionManager.getAllStores().contains(newAddedStore));
assertFalse("BlobStore should not be scheduled for the new added store before it is started", compactionManager.scheduleNextForCompaction(newAddedStore));
compactionManager.disable();
compactionManager.awaitTermination();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class CompactionManagerTest method testDisableWithoutEnable.
/**
* Tests {@link CompactionManager#disable()} without having called {@link CompactionManager#enable()} first.
*/
@Test
public void testDisableWithoutEnable() {
// without compaction enabled.
compactionManager.disable();
compactionManager.awaitTermination();
// with compaction enabled.
properties.setProperty("store.compaction.triggers", ALL_COMPACTION_TRIGGERS);
config = new StoreConfig(new VerifiableProperties(properties));
StorageManagerMetrics metrics = new StorageManagerMetrics(new MetricRegistry());
compactionManager = new CompactionManager(MOUNT_PATH, config, Collections.singleton(blobStore), metrics, time);
compactionManager.disable();
compactionManager.awaitTermination();
}
Aggregations