use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method resolveStoreInitialStateTest.
/**
* Test that if {@link HelixParticipant} is adopted, store state is set to OFFLINE after startup (which will be updated
* by Helix state transition later)
* @throws Exception
*/
@Test
public void resolveStoreInitialStateTest() throws Exception {
store.shutdown();
properties.setProperty(StoreConfig.storeReplicaStatusDelegateEnableName, "true");
File storeDir = StoreTestUtils.createTempDirectory("store-" + storeId);
File reserveDir = StoreTestUtils.createTempDirectory("reserve-pool");
reserveDir.deleteOnExit();
DiskSpaceAllocator diskAllocator = new DiskSpaceAllocator(true, reserveDir, 0, new StorageManagerMetrics(new MetricRegistry()));
StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
MetricRegistry registry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(registry);
ClusterParticipant dynamicParticipant = Mockito.mock(ClusterParticipant.class);
when(dynamicParticipant.supportsStateChanges()).thenReturn(true);
ReplicaStatusDelegate delegate = new ReplicaStatusDelegate(dynamicParticipant);
BlobStore testStore = new BlobStore(getMockReplicaId(storeDir.getAbsolutePath()), config, scheduler, storeStatsScheduler, diskIOScheduler, diskAllocator, metrics, metrics, STORE_KEY_FACTORY, recovery, hardDelete, Collections.singletonList(delegate), time, new InMemAccountService(false, false), null);
testStore.start();
assertEquals("Store current state should be OFFLINE if dynamic participant is adopted", OFFLINE, testStore.getCurrentState());
testStore.shutdown();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method reloadStore.
/**
* Shuts down, restarts the store and reset configs. All further tests will implicitly test persistence.
* @throws StoreException
*/
private void reloadStore() throws StoreException {
properties.put("store.index.max.number.of.inmem.elements", Integer.toString(MAX_IN_MEM_ELEMENTS));
properties.put("store.io.error.count.to.trigger.shutdown", Integer.toString(Integer.MAX_VALUE));
StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
reloadStore(config, getMockReplicaId(tempDirStr), null);
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class IndexWritePerformance method main.
public static void main(String[] args) {
FileWriter writer = null;
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<Integer> numberOfIndexesOpt = parser.accepts("numberOfIndexes", "The number of indexes to create").withRequiredArg().describedAs("number_of_indexes").ofType(Integer.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that write to a random index concurrently").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(numberOfIndexesOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
int numberOfIndexes = options.valueOf(numberOfIndexesOpt);
int numberOfWriters = options.valueOf(numberOfWritersOpt);
int writesPerSecond = options.valueOf(writesPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
final AtomicLong totalTimeTakenInNs = new AtomicLong(0);
final AtomicLong totalWrites = new AtomicLong(0);
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
StoreKeyFactory factory = new BlobIdFactory(map);
File logFile = new File(System.getProperty("user.dir"), "writeperflog");
writer = new FileWriter(logFile);
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
Properties props = new Properties();
props.setProperty("store.index.memory.size.bytes", "2097152");
props.setProperty("store.segment.size.in.bytes", "10");
StoreConfig config = new StoreConfig(new VerifiableProperties(props));
Log log = new Log(System.getProperty("user.dir"), 10, diskSpaceAllocator, config, metrics, null);
ScheduledExecutorService s = Utils.newScheduler(numberOfWriters, "index", false);
ArrayList<BlobIndexMetrics> indexWithMetrics = new ArrayList<BlobIndexMetrics>(numberOfIndexes);
for (int i = 0; i < numberOfIndexes; i++) {
File indexFile = new File(System.getProperty("user.dir"), Integer.toString(i));
if (indexFile.exists()) {
for (File c : indexFile.listFiles()) {
c.delete();
}
} else {
indexFile.mkdir();
}
System.out.println("Creating index folder " + indexFile.getAbsolutePath());
writer.write("logdir-" + indexFile.getAbsolutePath() + "\n");
indexWithMetrics.add(new BlobIndexMetrics(indexFile.getAbsolutePath(), s, log, enableVerboseLogging, totalWrites, totalTimeTakenInNs, totalWrites, config, writer, factory));
}
final CountDownLatch latch = new CountDownLatch(numberOfWriters);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
latch.await();
System.out.println("Total writes : " + totalWrites.get() + " Total time taken : " + totalTimeTakenInNs.get() + " Nano Seconds Average time taken per write " + ((double) totalWrites.get() / totalTimeTakenInNs.get()) / SystemTime.NsPerSec + " Seconds");
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
Thread[] threadIndexPerf = new Thread[numberOfWriters];
for (int i = 0; i < numberOfWriters; i++) {
threadIndexPerf[i] = new Thread(new IndexWritePerfRun(indexWithMetrics, throttler, shutdown, latch, map));
threadIndexPerf[i].start();
}
for (int i = 0; i < numberOfWriters; i++) {
threadIndexPerf[i].join();
}
} catch (StoreException e) {
System.err.println("Index creation error on exit " + e.getMessage());
} catch (Exception e) {
System.err.println("Error on exit " + e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (Exception e) {
System.out.println("Error when closing the writer");
}
}
}
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class IndexTest method generateIndexSegmentV0.
/**
* Generate {@link IndexSegment} of version {@link PersistentIndex#VERSION_0}
* @param startOffset the start offset of the {@link IndexSegment}
* @param entrySize The entry size that this segment supports
* @param valueSize The value size that this segment supports
* @return the {@link IndexSegment} created of version {@link PersistentIndex#VERSION_0}
*/
private IndexSegment generateIndexSegmentV0(Offset startOffset, int entrySize, int valueSize) {
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
StoreConfig config = new StoreConfig(new VerifiableProperties(state.properties));
return new MockIndexSegmentV0(tempDir.getAbsolutePath(), startOffset, CuratedLogIndexState.STORE_KEY_FACTORY, entrySize, valueSize, config, metrics, state.time);
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class BlobStoreTest method testClusterManagerWriteStatusDelegateUse.
/**
* Tests blob store use of {@link WriteStatusDelegate}
* @throws StoreException
*/
@Test
public void testClusterManagerWriteStatusDelegateUse() throws StoreException, IOException, InterruptedException {
// Setup threshold test properties, replicaId, mock write status delegate
StoreConfig defaultConfig = changeThreshold(65, 5, true);
StoreTestUtils.MockReplicaId replicaId = getMockReplicaId(tempDirStr);
WriteStatusDelegate writeStatusDelegate = mock(WriteStatusDelegate.class);
when(writeStatusDelegate.unseal(any())).thenReturn(true);
when(writeStatusDelegate.seal(any())).thenReturn(true);
// Restart store
restartStore(defaultConfig, replicaId, writeStatusDelegate);
// Check that after start, because ReplicaId defaults to non-sealed, delegate is not called
verifyZeroInteractions(writeStatusDelegate);
// Verify that putting in data that doesn't go over the threshold doesn't trigger the delegate
put(1, 50, Utils.Infinite_Time);
verifyNoMoreInteractions(writeStatusDelegate);
// Verify that after putting in enough data, the store goes to read only
List<MockId> addedIds = put(4, 900, Utils.Infinite_Time);
verify(writeStatusDelegate, times(1)).seal(replicaId);
// Assumes ClusterParticipant sets replicaId status to true
replicaId.setSealedState(true);
// Change config threshold but with delegate disabled, verify that nothing happens
restartStore(changeThreshold(99, 1, false), replicaId, writeStatusDelegate);
verifyNoMoreInteractions(writeStatusDelegate);
// Change config threshold to higher, see that it gets changed to unsealed on reset
restartStore(changeThreshold(99, 1, true), replicaId, writeStatusDelegate);
verify(writeStatusDelegate, times(1)).unseal(replicaId);
replicaId.setSealedState(false);
// Reset thresholds, verify that it changed back
restartStore(defaultConfig, replicaId, writeStatusDelegate);
verify(writeStatusDelegate, times(2)).seal(replicaId);
replicaId.setSealedState(true);
// Remaining tests only relevant for segmented logs
if (isLogSegmented) {
// Delete added data
for (MockId addedId : addedIds) {
delete(addedId);
}
// Need to restart blob otherwise compaction will ignore segments in journal (which are all segments right now).
// By restarting, only last segment will be in journal
restartStore(defaultConfig, replicaId, writeStatusDelegate);
verifyNoMoreInteractions(writeStatusDelegate);
// Advance time by 8 days, call compaction to compact segments with deleted data, then verify
// that the store is now read-write
time.sleep(TimeUnit.DAYS.toMillis(8));
store.compact(store.getCompactionDetails(new CompactAllPolicy(defaultConfig, time)));
verify(writeStatusDelegate, times(2)).unseal(replicaId);
// Test if replicaId is erroneously true that it updates the status upon startup
replicaId.setSealedState(true);
restartStore(defaultConfig, replicaId, writeStatusDelegate);
verify(writeStatusDelegate, times(3)).unseal(replicaId);
}
store.shutdown();
}
Aggregations