use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.
the class CuratedLogIndexState method initIndex.
/**
* Creates the index instance with the provided {@code metricRegistry}.
* @throws StoreException
*/
void initIndex() throws StoreException {
StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
sessionId = UUID.randomUUID();
metricRegistry = new MetricRegistry();
metrics = new StoreMetrics(metricRegistry);
index = new PersistentIndex(tempDirStr, tempDirStr, scheduler, log, config, CuratedLogIndexState.STORE_KEY_FACTORY, recovery, hardDelete, DISK_IO_SCHEDULER, metrics, time, sessionId, incarnationId);
}
use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.
the class HardDeleterTest method setup.
@Before
public void setup() throws Exception {
File rootDirectory = StoreTestUtils.createTempDirectory("ambry");
File indexFile = new File(rootDirectory.getAbsolutePath());
for (File c : indexFile.listFiles()) {
c.delete();
}
scheduler = Utils.newScheduler(1, false);
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
log = new Log(rootDirectory.getAbsolutePath(), 10000, 10000, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, metrics);
Properties props = new Properties();
// the test will set the tokens, so disable the index persistor.
props.setProperty("store.data.flush.interval.seconds", "3600");
props.setProperty("store.deleted.message.retention.days", "1");
props.setProperty("store.index.max.number.of.inmem.elements", "2");
// the following determines the number of entries that will be fetched at most. We need this to test the
// case where the endToken does not reach the journal.
props.setProperty("store.cleanup.operations.bytes.per.sec", "40");
StoreConfig config = new StoreConfig(new VerifiableProperties(props));
StoreKeyFactory factory = Utils.getObj("com.github.ambry.store.MockIdFactory");
time = new MockTime(SystemTime.getInstance().milliseconds());
helper = new HardDeleteTestHelper(0, 200);
index = new MockIndex(rootDirectory.getAbsolutePath(), scheduler, log, config, factory, helper, time, UUID.randomUUID());
helper.setIndex(index, log);
// Setting this below will not enable the hard delete thread. This being a unit test, the methods
// are going to be called directly. We simply want to set the enabled flag to avoid those methods
// from bailing out prematurely.
index.setHardDeleteRunningStatus(true);
}
use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.
the class IndexTest method generateIndexSegmentV1.
/**
* Generate {@link IndexSegment} of version {@link PersistentIndex#VERSION_1}
* @param startOffset the start offset of the {@link IndexSegment}
* @param entrySize The entry size that this segment supports
* @param valueSize The value size that this segment supports
* @return the {@link IndexSegment} created of version {@link PersistentIndex#VERSION_1}
*/
private IndexSegment generateIndexSegmentV1(Offset startOffset, int entrySize, int valueSize) {
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
StoreConfig config = new StoreConfig(new VerifiableProperties(state.properties));
return new MockIndexSegmentV1(tempDir.getAbsolutePath(), startOffset, CuratedLogIndexState.STORE_KEY_FACTORY, entrySize, valueSize, config, metrics, state.time);
}
use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.
the class StorageManagerTest method diskSpaceAllocatorTest.
/**
* Test that stores on a disk are inaccessible if the {@link DiskSpaceAllocator} fails to start.
* @throws Exception
*/
@Test
public void diskSpaceAllocatorTest() throws Exception {
generateConfigs(true);
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<String> mountPaths = dataNode.getMountPaths();
// There should be 1 unallocated segment per replica on a mount path (each replica can have 2 segments) and the
// swap segments.
int expectedSegmentsInPool = (replicas.size() / mountPaths.size()) + diskManagerConfig.diskManagerRequiredSwapSegmentsPerSize;
// Startup/shutdown one more time to verify the restart scenario.
for (int i = 0; i < 2; i++) {
metricRegistry = new MetricRegistry();
StorageManager storageManager = createStorageManager(replicas, metricRegistry);
storageManager.start();
checkStoreAccessibility(replicas, null, storageManager);
Map<String, Counter> counters = metricRegistry.getCounters();
assertEquals(0, getCounterValue(counters, DiskSpaceAllocator.class.getName(), "DiskSpaceAllocatorInitFailureCount"));
assertEquals(0, getCounterValue(counters, DiskManager.class.getName(), "TotalStoreStartFailures"));
assertEquals(0, getCounterValue(counters, DiskManager.class.getName(), "DiskMountPathFailures"));
for (String mountPath : dataNode.getMountPaths()) {
DiskSpaceAllocatorTest.verifyPoolState(new File(mountPath, diskManagerConfig.diskManagerReserveFileDirName), new DiskSpaceAllocatorTest.ExpectedState().add(storeConfig.storeSegmentSizeInBytes, expectedSegmentsInPool));
}
shutdownAndAssertStoresInaccessible(storageManager, replicas);
assertEquals(0, getCounterValue(counters, DiskManager.class.getName(), "TotalStoreShutdownFailures"));
}
// Induce a initializePool failure by:
// 1. deleting a file size directory
// 2. instantiating the DiskManagers (this will not fail b/c the directory just won't be inventory)
// 3. creating a regular file with the same name as the file size directory
// 4. start the DiskManagers (this should cause the DiskSpaceAllocator to fail to initialize when it sees the
// file where the directory should be created.
metricRegistry = new MetricRegistry();
String diskToFail = mountPaths.get(RANDOM.nextInt(mountPaths.size()));
File reservePoolDir = new File(diskToFail, diskManagerConfig.diskManagerReserveFileDirName);
File fileSizeDir = new File(reservePoolDir, DiskSpaceAllocator.generateFileSizeDirName(storeConfig.storeSegmentSizeInBytes));
Utils.deleteFileOrDirectory(fileSizeDir);
StorageManager storageManager = createStorageManager(replicas, metricRegistry);
assertTrue("File creation should have succeeded", fileSizeDir.createNewFile());
storageManager.start();
checkStoreAccessibility(replicas, diskToFail, storageManager);
Map<String, Counter> counters = metricRegistry.getCounters();
shutdownAndAssertStoresInaccessible(storageManager, replicas);
assertEquals(0, getCounterValue(counters, DiskManager.class.getName(), "TotalStoreShutdownFailures"));
}
use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.
the class BlobStoreCompactorTest method getCompactor.
/**
* Gets an instance of {@link BlobStoreCompactor}.
* @param log the {@link Log} instance to use.
* @param ioScheduler the {@link DiskIOScheduler} instance to use.
* @return an instance of {@link BlobStoreCompactor}.
* @throws IOException
* @throws StoreException
*/
private BlobStoreCompactor getCompactor(Log log, DiskIOScheduler ioScheduler) throws IOException, StoreException {
closeOrExceptionInduced = false;
StoreConfig config = new StoreConfig(new VerifiableProperties(state.properties));
metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
return new BlobStoreCompactor(tempDirStr, STORE_ID, CuratedLogIndexState.STORE_KEY_FACTORY, config, metrics, metrics, ioScheduler, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, log, state.time, state.sessionId, state.incarnationId);
}
Aggregations