use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class ThroughputVsLatency method main.
/**
* The main entry point for ThroughputVsLatency.
* @param args the command line args
* @throws Exception on any error.
*/
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption(Option.builder("h").longOpt("help").desc("Print a help message").build());
options.addOption(Option.builder("t").longOpt("test-time").argName("MINS").hasArg().desc("How long to run the tests for in mins (defaults to " + TEST_EXECUTE_TIME_DEFAULT + ")").build());
options.addOption(Option.builder().longOpt("rate").argName("SENTENCES/SEC").hasArg().desc("How many sentences per second to run. (defaults to " + DEFAULT_RATE_PER_SECOND + ")").build());
options.addOption(Option.builder().longOpt("name").argName("TOPO_NAME").hasArg().desc("Name of the topology to run (defaults to " + DEFAULT_TOPO_NAME + ")").build());
options.addOption(Option.builder().longOpt("spouts").argName("NUM").hasArg().desc("Number of spouts to use (defaults to " + DEFAULT_NUM_SPOUTS + ")").build());
options.addOption(Option.builder().longOpt("splitters").argName("NUM").hasArg().desc("Number of splitter bolts to use (defaults to " + DEFAULT_NUM_SPLITS + ")").build());
options.addOption(Option.builder().longOpt("splitter-imbalance").argName("MS(:COUNT)?").hasArg().desc("The number of ms that the first COUNT splitters will wait before processing. This creates an imbalance " + "that helps test load aware groupings (defaults to 0:1)").build());
options.addOption(Option.builder().longOpt("counters").argName("NUM").hasArg().desc("Number of counter bolts to use (defaults to " + DEFAULT_NUM_COUNTS + ")").build());
LoadMetricsServer.addCommandLineOptions(options);
CommandLineParser parser = new DefaultParser();
CommandLine cmd = null;
Exception commandLineException = null;
SlowExecutorPattern slowness = null;
double numMins = TEST_EXECUTE_TIME_DEFAULT;
double ratePerSecond = DEFAULT_RATE_PER_SECOND;
String name = DEFAULT_TOPO_NAME;
int numSpouts = DEFAULT_NUM_SPOUTS;
int numSplits = DEFAULT_NUM_SPLITS;
int numCounts = DEFAULT_NUM_COUNTS;
try {
cmd = parser.parse(options, args);
if (cmd.hasOption("t")) {
numMins = Double.valueOf(cmd.getOptionValue("t"));
}
if (cmd.hasOption("rate")) {
ratePerSecond = Double.parseDouble(cmd.getOptionValue("rate"));
}
if (cmd.hasOption("name")) {
name = cmd.getOptionValue("name");
}
if (cmd.hasOption("spouts")) {
numSpouts = Integer.parseInt(cmd.getOptionValue("spouts"));
}
if (cmd.hasOption("splitters")) {
numSplits = Integer.parseInt(cmd.getOptionValue("splitters"));
}
if (cmd.hasOption("counters")) {
numCounts = Integer.parseInt(cmd.getOptionValue("counters"));
}
if (cmd.hasOption("splitter-imbalance")) {
slowness = SlowExecutorPattern.fromString(cmd.getOptionValue("splitter-imbalance"));
}
} catch (ParseException | NumberFormatException e) {
commandLineException = e;
}
if (commandLineException != null || cmd.hasOption('h')) {
if (commandLineException != null) {
System.err.println("ERROR " + commandLineException.getMessage());
}
new HelpFormatter().printHelp("ThroughputVsLatency [options]", options);
return;
}
Map<String, Object> metrics = new LinkedHashMap<>();
metrics.put("target_rate", ratePerSecond);
metrics.put("spout_parallel", numSpouts);
metrics.put("split_parallel", numSplits);
metrics.put("count_parallel", numCounts);
Config conf = new Config();
Map<String, Object> sysConf = Utils.readStormConfig();
LoadMetricsServer metricServer = new LoadMetricsServer(sysConf, cmd, metrics);
metricServer.serve();
String url = metricServer.getUrl();
NimbusClient client = NimbusClient.getConfiguredClient(sysConf);
conf.registerMetricsConsumer(LoggingMetricsConsumer.class);
conf.registerMetricsConsumer(HttpForwardingMetricsConsumer.class, url, 1);
Map<String, String> workerMetrics = new HashMap<>();
if (!NimbusClient.isLocalOverride()) {
// sigar uses JNI and does not work in local mode
workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
}
conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
conf.put(Config.TOPOLOGY_WORKER_GC_CHILDOPTS, "-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+UseConcMarkSweepGC " + "-XX:NewSize=128m -XX:CMSInitiatingOccupancyFraction=70 -XX:-CMSConcurrentMTEnabled");
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new FastRandomSentenceSpout((long) ratePerSecond / numSpouts), numSpouts);
builder.setBolt("split", new SplitSentence(slowness), numSplits).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), numCounts).fieldsGrouping("split", new Fields("word"));
int exitStatus = -1;
try (ScopedTopologySet topologyNames = new ScopedTopologySet(client.getClient())) {
StormSubmitter.submitTopology(name, conf, builder.createTopology());
topologyNames.add(name);
metricServer.monitorFor(numMins, client.getClient(), topologyNames);
exitStatus = 0;
} catch (Exception e) {
LOG.error("Error while running test", e);
} finally {
System.exit(exitStatus);
}
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class BlobStoreUtils method downloadMissingBlob.
// Download missing blobs from potential nimbodes
public static boolean downloadMissingBlob(Map<String, Object> conf, BlobStore blobStore, String key, Set<NimbusInfo> nimbusInfos) throws TTransportException {
ReadableBlobMeta rbm;
ClientBlobStore remoteBlobStore;
boolean isSuccess = false;
LOG.debug("Download blob NimbusInfos {}", nimbusInfos);
for (NimbusInfo nimbusInfo : nimbusInfos) {
if (isSuccess) {
break;
}
LOG.debug("Download blob key: {}, NimbusInfo {}", key, nimbusInfo);
try (NimbusClient client = new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null)) {
rbm = client.getClient().getBlobMeta(key);
remoteBlobStore = new NimbusBlobStore();
remoteBlobStore.setClient(conf, client);
try (InputStreamWithMeta in = remoteBlobStore.getBlob(key)) {
blobStore.createBlob(key, in, rbm.get_settable(), getNimbusSubject());
}
// if key already exists while creating the blob else update it
Iterator<String> keyIterator = blobStore.listKeys();
while (keyIterator.hasNext()) {
if (keyIterator.next().equals(key)) {
LOG.debug("Success creating key, {}", key);
isSuccess = true;
break;
}
}
} catch (IOException | AuthorizationException exception) {
throw new RuntimeException(exception);
} catch (KeyAlreadyExistsException kae) {
LOG.info("KeyAlreadyExistsException Key: {} {}", key, kae);
} catch (KeyNotFoundException knf) {
// Catching and logging KeyNotFoundException because, if
// there is a subsequent update and delete, the non-leader
// nimbodes might throw an exception.
LOG.info("KeyNotFoundException Key: {} {}", key, knf);
} catch (Exception exp) {
// Logging an exception while client is connecting
LOG.error("Exception {}", exp);
}
}
if (!isSuccess) {
LOG.error("Could not download the blob with key: {}", key);
}
return isSuccess;
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class BlobStoreUtils method createStateInZookeeper.
public static void createStateInZookeeper(Map<String, Object> conf, String key, NimbusInfo nimbusInfo) throws TTransportException {
ClientBlobStore cb = new NimbusBlobStore();
cb.setClient(conf, new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null));
cb.createStateInZookeeper(key);
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class BlobStoreUtils method downloadUpdatedBlob.
// Download updated blobs from potential nimbodes
public static boolean downloadUpdatedBlob(Map<String, Object> conf, BlobStore blobStore, String key, Set<NimbusInfo> nimbusInfos) throws TTransportException {
ClientBlobStore remoteBlobStore;
AtomicOutputStream out = null;
boolean isSuccess = false;
LOG.debug("Download blob NimbusInfos {}", nimbusInfos);
for (NimbusInfo nimbusInfo : nimbusInfos) {
if (isSuccess) {
break;
}
try (NimbusClient client = new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null)) {
remoteBlobStore = new NimbusBlobStore();
remoteBlobStore.setClient(conf, client);
try (InputStreamWithMeta in = remoteBlobStore.getBlob(key)) {
out = blobStore.updateBlob(key, getNimbusSubject());
byte[] buffer = new byte[2048];
int len = 0;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
out.close();
out = null;
}
isSuccess = true;
} catch (FileNotFoundException fnf) {
LOG.warn("Blobstore file for key '{}' does not exist or got deleted before it could be downloaded.", key, fnf);
} catch (IOException | AuthorizationException exception) {
throw new RuntimeException(exception);
} catch (KeyNotFoundException knf) {
// Catching and logging KeyNotFoundException because, if
// there is a subsequent update and delete, the non-leader
// nimbodes might throw an exception.
LOG.info("KeyNotFoundException", knf);
} catch (Exception exp) {
// Logging an exception while client is connecting
LOG.error("Exception", exp);
} finally {
if (out != null) {
try {
out.cancel();
} catch (IOException e) {
// Ignore.
}
}
}
}
if (!isSuccess) {
LOG.error("Could not update the blob with key: {}", key);
}
return isSuccess;
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class AuthTest method verifyIncorrectJaasConf.
public static void verifyIncorrectJaasConf(ThriftServer server, Map<String, Object> conf, String jaas, Class<? extends Exception> expectedException) {
Map<String, Object> badConf = new HashMap<>(conf);
badConf.put("java.security.auth.login.config", jaas);
try (NimbusClient client = new NimbusClient(badConf, "localhost", server.getPort(), NIMBUS_TIMEOUT)) {
client.getClient().activate("bad_auth_test_topology");
fail("An exception should have been thrown trying to connect.");
} catch (Exception e) {
LOG.info("Got Exception...", e);
assert (Utils.exceptionCauseIsInstanceOf(expectedException, e));
}
}
Aggregations