use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class CuratedLogIndexState method initIndex.
/**
* Creates the index instance with the provided {@code metricRegistry}.
* @param newScheduler the {@link ScheduledExecutorService} to use in testing. If null, a default scheduler will be created to use.
* @throws StoreException
*/
void initIndex(ScheduledExecutorService newScheduler) throws StoreException {
StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
sessionId = UUID.randomUUID();
metricRegistry = new MetricRegistry();
metrics = new StoreMetrics(metricRegistry);
shutDownExecutorService(scheduler, 1, TimeUnit.SECONDS);
scheduler = newScheduler == null ? Utils.newScheduler(1, false) : newScheduler;
index = new PersistentIndex(tempDirStr, tempDirStr, scheduler, log, config, CuratedLogIndexState.STORE_KEY_FACTORY, recovery, hardDelete, DISK_IO_SCHEDULER, metrics, time, sessionId, incarnationId);
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class StoreDescriptorTest method testStoreDescriptor.
/**
* Tests {@link StoreDescriptor} for unit tests for instantiation and converting bytes into StoreDescriptor
* @throws IOException
*/
@Test
public void testStoreDescriptor() throws IOException {
StoreConfig config = new StoreConfig(new VerifiableProperties(new Properties()));
File tempDir = StoreTestUtils.createTempDirectory("storeDir");
File storeDescriptorFile = new File(tempDir.getAbsolutePath(), StoreDescriptor.STORE_DESCRIPTOR_FILENAME);
StoreDescriptor storeDescriptor = new StoreDescriptor(tempDir.getAbsolutePath(), config);
// store descriptor file should have been created.
StoreDescriptor newStoreDescriptor = new StoreDescriptor(tempDir.getAbsolutePath(), config);
assertEquals("IncarnationId mismatch ", storeDescriptor.getIncarnationId(), newStoreDescriptor.getIncarnationId());
assertTrue("Store descriptor file could not be deleted", storeDescriptorFile.delete());
// Create StoreDescriptor file with new incarnationId
UUID incarnationIdUUID = UUID.randomUUID();
byte[] toBytes = getBytesForStoreDescriptor(StoreDescriptor.VERSION_0, incarnationIdUUID);
storeDescriptorFile = new File(tempDir.getAbsolutePath(), StoreDescriptor.STORE_DESCRIPTOR_FILENAME);
assertTrue("Store descriptor file could not be created", storeDescriptorFile.createNewFile());
createStoreFile(storeDescriptorFile, toBytes);
storeDescriptor = new StoreDescriptor(tempDir.getAbsolutePath(), config);
assertEquals("IncarnationId mismatch ", incarnationIdUUID, storeDescriptor.getIncarnationId());
// check for wrong version
assertTrue("Store descriptor file could not be deleted", storeDescriptorFile.delete());
toBytes = getBytesForStoreDescriptor((short) 1, incarnationIdUUID);
assertTrue("Store descriptor file could not be created", storeDescriptorFile.createNewFile());
createStoreFile(storeDescriptorFile, toBytes);
try {
new StoreDescriptor(tempDir.getAbsolutePath(), config);
fail("Wrong version should have thrown IllegalArgumentException ");
} catch (IllegalArgumentException e) {
}
// check for wrong Crc
assertTrue("Store descriptor file could not be deleted", storeDescriptorFile.delete());
assertTrue("Store descriptor file could not be created", storeDescriptorFile.createNewFile());
toBytes = getBytesForStoreDescriptor(StoreDescriptor.VERSION_0, incarnationIdUUID);
CrcOutputStream crcOutputStream = new CrcOutputStream(new FileOutputStream(storeDescriptorFile));
DataOutputStream dataOutputStream = new DataOutputStream(crcOutputStream);
dataOutputStream.write(toBytes);
dataOutputStream.writeLong(crcOutputStream.getValue() + 1);
dataOutputStream.close();
try {
new StoreDescriptor(tempDir.getAbsolutePath(), config);
fail("Wrong CRC should have thrown IllegalStateException ");
} catch (IllegalStateException e) {
}
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class HardDeleterTest method setup.
@Before
public void setup() throws Exception {
File rootDirectory = StoreTestUtils.createTempDirectory("ambry");
File indexFile = new File(rootDirectory.getAbsolutePath());
for (File c : indexFile.listFiles()) {
c.delete();
}
scheduler = Utils.newScheduler(1, false);
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
Properties props = new Properties();
// the test will set the tokens, so disable the index persistor.
props.setProperty("store.data.flush.interval.seconds", "3600");
props.setProperty("store.deleted.message.retention.hours", "1");
props.setProperty("store.index.max.number.of.inmem.elements", "2");
props.setProperty("store.segment.size.in.bytes", "10000");
// the following determines the number of entries that will be fetched at most. We need this to test the
// case where the endToken does not reach the journal.
props.setProperty("store.hard.delete.operations.bytes.per.sec", "40");
StoreConfig config = new StoreConfig(new VerifiableProperties(props));
log = new Log(rootDirectory.getAbsolutePath(), 10000, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, config, metrics, null);
StoreKeyFactory factory = Utils.getObj("com.github.ambry.store.MockIdFactory");
time = new MockTime(SystemTime.getInstance().milliseconds());
helper = new HardDeleteTestHelper(0, 200);
diskIOScheduler = new MockDiskIOScheduler();
index = new MockIndex(rootDirectory.getAbsolutePath(), scheduler, log, config, factory, helper, diskIOScheduler, time, UUID.randomUUID());
helper.setIndex(index, log);
// Setting this below will not enable the hard delete thread. This being a unit test, the methods
// are going to be called directly. We simply want to set the enabled flag to avoid those methods
// from bailing out prematurely.
index.setHardDeleteRunningStatus(true);
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class ConsistencyCheckerTool method main.
public static void main(String[] args) throws Exception {
VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
ConsistencyCheckerToolConfig config = new ConsistencyCheckerToolConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
try (ClusterMap clusterMap = new StaticClusterAgentsFactory(clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath).getClusterMap()) {
StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
StoreConfig storeConfig = new StoreConfig(properties);
// this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
Set<StoreKey> filterKeySet = new HashSet<>();
for (String key : config.filterSet) {
filterKeySet.add(new BlobId(key, clusterMap));
}
Time time = SystemTime.getInstance();
Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
ConsistencyCheckerTool consistencyCheckerTool = new ConsistencyCheckerTool(clusterMap, blobIdFactory, storeConfig, filterKeySet, throttler, metrics, time, storeKeyConverterFactory.getStoreKeyConverter());
boolean success = consistencyCheckerTool.checkConsistency(config.pathOfInput.listFiles(File::isDirectory)).getFirst();
System.exit(success ? 0 : 1);
}
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class DumpCompactionLogTool method main.
public static void main(String[] args) throws Exception {
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
DumpCompactionLogConfig config = new DumpCompactionLogConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
File file = new File(config.compactionLogFilePath);
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
Time time = SystemTime.getInstance();
CompactionLog compactionLog = new CompactionLog(file, blobIdFactory, time, storeConfig);
System.out.println(compactionLog.toString());
}
}
Aggregations