use of com.github.ambry.clustermap.ClusterAgentsFactory in project ambry by linkedin.
the class RestServerMain method main.
public static void main(String[] args) {
final RestServer restServer;
int exitCode = 0;
ClusterMap clusterMap = null;
try {
InvocationOptions options = new InvocationOptions(args);
Properties properties = Utils.loadProps(options.serverPropsFilePath);
VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, options.hardwareLayoutFilePath, options.partitionLayoutFilePath);
clusterMap = clusterAgentsFactory.getClusterMap();
SSLFactory sslFactory = getSSLFactoryIfRequired(verifiableProperties);
logger.info("Bootstrapping RestServer");
restServer = new RestServer(verifiableProperties, clusterMap, new LoggingNotificationSystem(), sslFactory);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
logger.info("Received shutdown signal. Shutting down RestServer");
restServer.shutdown();
}
});
restServer.start();
restServer.awaitShutdown();
} catch (Exception e) {
logger.error("Exception during bootstrap of RestServer", e);
exitCode = 1;
} finally {
if (clusterMap != null) {
clusterMap.close();
}
}
logger.info("Exiting RestServerMain");
System.exit(exitCode);
}
use of com.github.ambry.clustermap.ClusterAgentsFactory in project ambry by linkedin.
the class AmbryMain method main.
public static void main(String[] args) {
final AmbryServer ambryServer;
int exitCode = 0;
try {
InvocationOptions options = new InvocationOptions(args);
Properties properties = Utils.loadProps(options.serverPropsFilePath);
VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, options.hardwareLayoutFilePath, options.partitionLayoutFilePath);
logger.info("Bootstrapping AmbryServer");
ambryServer = new AmbryServer(verifiableProperties, clusterAgentsFactory, SystemTime.getInstance());
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
logger.info("Received shutdown signal. Shutting down AmbryServer");
ambryServer.shutdown();
}
});
ambryServer.startup();
ambryServer.awaitShutdown();
} catch (Exception e) {
logger.error("Exception during bootstrap of AmbryServer", e);
exitCode = 1;
}
logger.info("Exiting AmbryMain");
System.exit(exitCode);
}
use of com.github.ambry.clustermap.ClusterAgentsFactory in project ambry by linkedin.
the class IndexWritePerformance method main.
public static void main(String[] args) {
FileWriter writer = null;
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<Integer> numberOfIndexesOpt = parser.accepts("numberOfIndexes", "The number of indexes to create").withRequiredArg().describedAs("number_of_indexes").ofType(Integer.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that write to a random index concurrently").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(numberOfIndexesOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
int numberOfIndexes = options.valueOf(numberOfIndexesOpt);
int numberOfWriters = options.valueOf(numberOfWritersOpt);
int writesPerSecond = options.valueOf(writesPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
final AtomicLong totalTimeTakenInNs = new AtomicLong(0);
final AtomicLong totalWrites = new AtomicLong(0);
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
StoreKeyFactory factory = new BlobIdFactory(map);
File logFile = new File(System.getProperty("user.dir"), "writeperflog");
writer = new FileWriter(logFile);
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
Log log = new Log(System.getProperty("user.dir"), 10, 10, diskSpaceAllocator, metrics);
ScheduledExecutorService s = Utils.newScheduler(numberOfWriters, "index", false);
ArrayList<BlobIndexMetrics> indexWithMetrics = new ArrayList<BlobIndexMetrics>(numberOfIndexes);
Properties props = new Properties();
props.setProperty("store.index.memory.size.bytes", "2097152");
StoreConfig config = new StoreConfig(new VerifiableProperties(props));
for (Integer i = 0; i < numberOfIndexes; i++) {
File indexFile = new File(System.getProperty("user.dir"), i.toString());
if (indexFile.exists()) {
for (File c : indexFile.listFiles()) {
c.delete();
}
} else {
indexFile.mkdir();
}
System.out.println("Creating index folder " + indexFile.getAbsolutePath());
writer.write("logdir-" + indexFile.getAbsolutePath() + "\n");
indexWithMetrics.add(new BlobIndexMetrics(indexFile.getAbsolutePath(), s, log, enableVerboseLogging, totalWrites, totalTimeTakenInNs, totalWrites, config, writer, factory));
}
final CountDownLatch latch = new CountDownLatch(numberOfWriters);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
latch.await();
System.out.println("Total writes : " + totalWrites.get() + " Total time taken : " + totalTimeTakenInNs.get() + " Nano Seconds Average time taken per write " + ((double) totalWrites.get() / totalTimeTakenInNs.get()) / SystemTime.NsPerSec + " Seconds");
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
Thread[] threadIndexPerf = new Thread[numberOfWriters];
for (int i = 0; i < numberOfWriters; i++) {
threadIndexPerf[i] = new Thread(new IndexWritePerfRun(indexWithMetrics, throttler, shutdown, latch, map));
threadIndexPerf[i].start();
}
for (int i = 0; i < numberOfWriters; i++) {
threadIndexPerf[i].join();
}
} catch (StoreException e) {
System.err.println("Index creation error on exit " + e.getMessage());
} catch (Exception e) {
System.err.println("Error on exit " + e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (Exception e) {
System.out.println("Error when closing the writer");
}
}
}
}
use of com.github.ambry.clustermap.ClusterAgentsFactory in project ambry by linkedin.
the class StoreCopier method main.
public static void main(String[] args) throws Exception {
VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
CopierConfig config = new CopierConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath);
try (ClusterMap clusterMap = clusterAgentsFactory.getClusterMap()) {
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
File srcDir = new File(config.srcStoreDirPath);
File tgtDir = new File(config.tgtStoreDirPath);
StoreMetrics metrics = new StoreMetrics(clusterMap.getMetricRegistry());
DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(clusterMap.getMetricRegistry()));
try (StoreCopier storeCopier = new StoreCopier("src", srcDir, tgtDir, config.storeCapacity, config.fetchSizeInBytes, storeConfig, metrics, storeKeyFactory, new DiskIOScheduler(null), diskSpaceAllocator, Collections.emptyList(), SystemTime.getInstance())) {
storeCopier.copy(new StoreFindTokenFactory(storeKeyFactory).getNewFindToken());
}
}
}
use of com.github.ambry.clustermap.ClusterAgentsFactory in project ambry by linkedin.
the class ConcurrencyTestTool method main.
public static void main(String[] args) throws Exception {
InvocationOptions options = new InvocationOptions(args);
Properties properties = Utils.loadProps(options.routerPropsFilePath);
ToolUtils.addClusterMapProperties(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(properties));
ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, options.hardwareLayoutFilePath, options.partitionLayoutFilePath)).getClusterMap();
PutGetHelperFactory putGetHelperFactory = null;
if (options.putGetHelperFactoryStr.equals(ROUTER_PUT_GET_HELPER)) {
putGetHelperFactory = Utils.getObj(options.putGetHelperFactoryStr, properties, clusterMap, options.maxBlobSizeInBytes, options.minBlobSizeInBytes);
} else if (options.putGetHelperFactoryStr.equals(SERVER_PUT_GET_HELPER)) {
putGetHelperFactory = Utils.getObj(options.putGetHelperFactoryStr, properties, options.hostName, options.port, clusterMap, options.maxBlobSizeInBytes, options.minBlobSizeInBytes);
}
PutGetHelper putGetHelper = putGetHelperFactory.getPutGetHelper();
ConcurrencyTestTool concurrencyTestTool = new ConcurrencyTestTool();
concurrencyTestTool.startTest(putGetHelper, options.maxParallelPutCount, options.parallelGetCount, options.totalPutBlobCount, options.maxGetCountPerBlob, options.burstCountForGet, options.maxFailuresPerPutBatchToStopPuts, options.maxFailuresPerGetBatchToStopGets, options.deleteAndValidate, options.sleepTimeBetweenBatchPutsInMs, options.sleepTimeBetweenBatchGetsInMs, options.measurementIntervalInSecs);
}
Aggregations