use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class EstimateThroughput method main.
/**
* Main entry point for estimate throughput command.
* @param args the command line arguments.
* @throws Exception on any error.
*/
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption(Option.builder("h").longOpt("help").desc("Print a help message").build());
CommandLineParser parser = new DefaultParser();
CommandLine cmd = null;
boolean printHelp = false;
try {
cmd = parser.parse(options, args);
} catch (ParseException e) {
System.err.println("ERROR " + e.getMessage());
printHelp = true;
}
if (printHelp || cmd.hasOption('h')) {
new HelpFormatter().printHelp("EstimateThroughput [options] [topologyName]*", options);
return;
}
Config conf = new Config();
int exitStatus = -1;
List<TopologyLoadConf> regular = new ArrayList<>();
List<TopologyLoadConf> trident = new ArrayList<>();
try (NimbusClient nc = NimbusClient.getConfiguredClient(conf)) {
Nimbus.Iface client = nc.getClient();
List<String> topologyNames = cmd.getArgList();
for (TopologySummary topologySummary : client.getTopologySummaries()) {
if (topologyNames.isEmpty() || topologyNames.contains(topologySummary.get_name())) {
TopologyLoadConf capturedConf = CaptureLoad.captureTopology(client, topologySummary);
if (capturedConf.looksLikeTrident()) {
trident.add(capturedConf);
} else {
regular.add(capturedConf);
}
}
}
System.out.println("TOPOLOGY\tTOTAL MESSAGES/sec\tESTIMATED INPUT MESSAGES/sec");
for (TopologyLoadConf tl : regular) {
System.out.println(tl.name + "\t" + tl.getAllEmittedAggregate() + "\t" + tl.getSpoutEmittedAggregate());
}
for (TopologyLoadConf tl : trident) {
System.out.println(tl.name + "\t" + tl.getAllEmittedAggregate() + "\t" + tl.getTridentEstimatedEmittedAggregate());
}
exitStatus = 0;
} catch (Exception e) {
LOG.error("Error trying to capture topologies...", e);
} finally {
System.exit(exitStatus);
}
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class StormSubmitter method pushCredentials.
/**
* Push a new set of credentials to the running topology.
* @param name the name of the topology to push credentials to.
* @param stormConf the topology-specific configuration, if desired. See {@link Config}.
* @param credentials the credentials to push.
* @throws AuthorizationException if you are not authorized ot push credentials.
* @throws NotAliveException if the topology is not alive
* @throws InvalidTopologyException if any other error happens
*/
public static void pushCredentials(String name, Map stormConf, Map<String, String> credentials) throws AuthorizationException, NotAliveException, InvalidTopologyException {
stormConf = new HashMap(stormConf);
stormConf.putAll(Utils.readCommandLineOpts());
Map conf = Utils.readStormConfig();
conf.putAll(stormConf);
Map<String, String> fullCreds = populateCredentials(conf, credentials);
if (fullCreds.isEmpty()) {
LOG.warn("No credentials were found to push to " + name);
return;
}
try {
if (localNimbus != null) {
LOG.info("Pushing Credentials to topology {} in local mode", name);
localNimbus.uploadNewCredentials(name, new Credentials(fullCreds));
} else {
try (NimbusClient client = NimbusClient.getConfiguredClient(conf)) {
LOG.info("Uploading new credentials to {}", name);
client.getClient().uploadNewCredentials(name, new Credentials(fullCreds));
}
}
LOG.info("Finished pushing creds to topology: {}", name);
} catch (TException e) {
throw new RuntimeException(e);
}
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class StormSubmitter method submitTopologyAs.
/**
* Submits a topology to run on the cluster as a particular user. A topology runs forever or until
* explicitly killed.
*
* @param name
* @param stormConf
* @param topology
* @param opts
* @param progressListener
* @param asUser The user as which this topology should be submitted.
* @throws AlreadyAliveException
* @throws InvalidTopologyException
* @throws AuthorizationException
* @throws IllegalArgumentException thrown if configs will yield an unschedulable topology. validateConfs validates confs
* @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook}
*/
public static void submitTopologyAs(String name, Map stormConf, StormTopology topology, SubmitOptions opts, ProgressListener progressListener, String asUser) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, IllegalArgumentException {
if (!Utils.isValidConf(stormConf)) {
throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
}
stormConf = new HashMap(stormConf);
stormConf.putAll(Utils.readCommandLineOpts());
Map conf = Utils.readStormConfig();
conf.putAll(stormConf);
stormConf.putAll(prepareZookeeperAuthentication(conf));
validateConfs(conf, topology);
Map<String, String> passedCreds = new HashMap<>();
if (opts != null) {
Credentials tmpCreds = opts.get_creds();
if (tmpCreds != null) {
passedCreds = tmpCreds.get_creds();
}
}
Map<String, String> fullCreds = populateCredentials(conf, passedCreds);
if (!fullCreds.isEmpty()) {
if (opts == null) {
opts = new SubmitOptions(TopologyInitialStatus.ACTIVE);
}
opts.set_creds(new Credentials(fullCreds));
}
try {
if (localNimbus != null) {
LOG.info("Submitting topology " + name + " in local mode");
if (opts != null) {
localNimbus.submitTopologyWithOpts(name, stormConf, topology, opts);
} else {
// this is for backwards compatibility
localNimbus.submitTopology(name, stormConf, topology);
}
LOG.info("Finished submitting topology: " + name);
} else {
String serConf = JSONValue.toJSONString(stormConf);
try (NimbusClient client = NimbusClient.getConfiguredClientAs(conf, asUser)) {
if (topologyNameExists(name, client)) {
throw new RuntimeException("Topology with name `" + name + "` already exists on cluster");
}
// Dependency uploading only makes sense for distributed mode
List<String> jarsBlobKeys = Collections.emptyList();
List<String> artifactsBlobKeys;
DependencyUploader uploader = new DependencyUploader();
try {
uploader.init();
jarsBlobKeys = uploadDependencyJarsToBlobStore(uploader);
artifactsBlobKeys = uploadDependencyArtifactsToBlobStore(uploader);
} catch (Throwable e) {
// remove uploaded jars blobs, not artifacts since they're shared across the cluster
uploader.deleteBlobs(jarsBlobKeys);
uploader.shutdown();
throw e;
}
try {
setDependencyBlobsToTopology(topology, jarsBlobKeys, artifactsBlobKeys);
submitTopologyInDistributeMode(name, topology, opts, progressListener, asUser, conf, serConf, client);
} catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e) {
// remove uploaded jars blobs, not artifacts since they're shared across the cluster
// Note that we don't handle TException to delete jars blobs
// because it's safer to leave some blobs instead of topology not running
uploader.deleteBlobs(jarsBlobKeys);
throw e;
} finally {
uploader.shutdown();
}
}
}
} catch (TException e) {
throw new RuntimeException(e);
}
invokeSubmitterHook(name, asUser, conf, topology);
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class ShellSubmission method main.
public static void main(String[] args) throws Exception {
if (args.length <= 1) {
LOG.error("Arguments should be of the form: <path_to_jar> [argument...]");
System.exit(-1);
}
Map<String, Object> conf = ConfigUtils.readStormConfig();
try (NimbusClient client = NimbusClient.getConfiguredClient(conf)) {
NimbusSummary ns = client.getClient().getLeader();
String host = ns.get_host();
int port = ns.get_port();
String jarPath = StormSubmitter.submitJar(conf, args[0]);
String[] newArgs = (String[]) ArrayUtils.addAll(Arrays.copyOfRange(args, 1, args.length), new String[] { host, String.valueOf(port), jarPath });
ServerUtils.execCommand(newArgs);
}
}
use of org.apache.storm.utils.NimbusClient in project storm by apache.
the class GenLoad method main.
/**
* Main entry point for GenLoad application.
* @param args the command line args.
* @throws Exception on any error.
*/
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption(Option.builder("h").longOpt("help").desc("Print a help message").build());
options.addOption(Option.builder("t").longOpt("test-time").argName("MINS").hasArg().desc("How long to run the tests for in mins (defaults to " + TEST_EXECUTE_TIME_DEFAULT + ")").build());
options.addOption(Option.builder().longOpt("parallel").argName("MULTIPLIER(:TOPO:COMP)?").hasArg().desc("How much to scale the topology up or down in parallelism. " + "The new parallelism will round up to the next whole number. " + "If a topology + component is supplied only that component will be scaled. " + "If topo or component is blank or a '*' all topologies or components matched will be scaled. " + "Only 1 scaling rule, the most specific, will be applied to a component. Providing a topology name is considered more " + "specific than not providing one." + "(defaults to 1.0 no scaling)").build());
options.addOption(Option.builder().longOpt("throughput").argName("MULTIPLIER(:TOPO:COMP)?").hasArg().desc("How much to scale the topology up or down in throughput. " + "If a topology + component is supplied only that component will be scaled. " + "If topo or component is blank or a '*' all topologies or components matched will be scaled. " + "Only 1 scaling rule, the most specific, will be applied to a component. Providing a topology name is considered more " + "specific than not providing one." + "(defaults to 1.0 no scaling)").build());
options.addOption(Option.builder().longOpt("local-or-shuffle").desc("replace shuffle grouping with local or shuffle grouping").build());
options.addOption(Option.builder().longOpt("imbalance").argName("MS(:COUNT)?:TOPO:COMP").hasArg().desc("The number of ms that the first COUNT of TOPO:COMP will wait before processing. This creates an imbalance " + "that helps test load aware groupings. By default there is no imbalance. If no count is given it defaults to 1").build());
options.addOption(Option.builder().longOpt("debug").desc("Print debug information about the adjusted topology before submitting it.").build());
LoadMetricsServer.addCommandLineOptions(options);
CommandLineParser parser = new DefaultParser();
CommandLine cmd = null;
Exception commandLineException = null;
double executeTime = TEST_EXECUTE_TIME_DEFAULT;
double globalParallel = 1.0;
Map<String, Double> topoSpecificParallel = new HashMap<>();
double globalThroughput = 1.0;
Map<String, Double> topoSpecificThroughput = new HashMap<>();
Map<String, SlowExecutorPattern> topoSpecificImbalance = new HashMap<>();
try {
cmd = parser.parse(options, args);
if (cmd.hasOption("t")) {
executeTime = Double.valueOf(cmd.getOptionValue("t"));
}
if (cmd.hasOption("parallel")) {
for (String stringParallel : cmd.getOptionValues("parallel")) {
Matcher m = MULTI_PATTERN.matcher(stringParallel);
if (!m.matches()) {
throw new ParseException("--parallel " + stringParallel + " is not in the format MULTIPLIER(:TOPO:COMP)?");
}
double parallel = Double.parseDouble(m.group("value"));
String topo = m.group("topo");
if (topo == null || topo.isEmpty()) {
topo = "*";
}
String comp = m.group("comp");
if (comp == null || comp.isEmpty()) {
comp = "*";
}
if ("*".equals(topo) && "*".equals(comp)) {
globalParallel = parallel;
} else {
topoSpecificParallel.put(topo + ":" + comp, parallel);
}
}
}
if (cmd.hasOption("throughput")) {
for (String stringThroughput : cmd.getOptionValues("throughput")) {
Matcher m = MULTI_PATTERN.matcher(stringThroughput);
if (!m.matches()) {
throw new ParseException("--throughput " + stringThroughput + " is not in the format MULTIPLIER(:TOPO:COMP)?");
}
double throughput = Double.parseDouble(m.group("value"));
String topo = m.group("topo");
if (topo == null || topo.isEmpty()) {
topo = "*";
}
String comp = m.group("comp");
if (comp == null || comp.isEmpty()) {
comp = "*";
}
if ("*".equals(topo) && "*".equals(comp)) {
globalThroughput = throughput;
} else {
topoSpecificThroughput.put(topo + ":" + comp, throughput);
}
}
}
if (cmd.hasOption("imbalance")) {
for (String stringImbalance : cmd.getOptionValues("imbalance")) {
// We require there to be both a topology and a component in this case, so parse it out as such.
String[] parts = stringImbalance.split(":");
if (parts.length < 3 || parts.length > 4) {
throw new ParseException(stringImbalance + " does not appear to match the expected pattern");
} else if (parts.length == 3) {
topoSpecificImbalance.put(parts[1] + ":" + parts[2], SlowExecutorPattern.fromString(parts[0]));
} else {
// == 4
topoSpecificImbalance.put(parts[2] + ":" + parts[3], SlowExecutorPattern.fromString(parts[0] + ":" + parts[1]));
}
}
}
} catch (ParseException | NumberFormatException e) {
commandLineException = e;
}
if (commandLineException != null || cmd.hasOption('h')) {
if (commandLineException != null) {
System.err.println("ERROR " + commandLineException.getMessage());
}
new HelpFormatter().printHelp("GenLoad [options] [captured_file]*", options);
return;
}
Map<String, Object> metrics = new LinkedHashMap<>();
metrics.put("parallel_adjust", globalParallel);
metrics.put("throughput_adjust", globalThroughput);
metrics.put("local_or_shuffle", cmd.hasOption("local-or-shuffle"));
metrics.put("topo_parallel", topoSpecificParallel.entrySet().stream().map((entry) -> entry.getValue() + ":" + entry.getKey()).collect(Collectors.toList()));
metrics.put("topo_throuhgput", topoSpecificThroughput.entrySet().stream().map((entry) -> entry.getValue() + ":" + entry.getKey()).collect(Collectors.toList()));
metrics.put("slow_execs", topoSpecificImbalance.entrySet().stream().map((entry) -> entry.getValue() + ":" + entry.getKey()).collect(Collectors.toList()));
Config conf = new Config();
LoadMetricsServer metricServer = new LoadMetricsServer(conf, cmd, metrics);
metricServer.serve();
String url = metricServer.getUrl();
int exitStatus = -1;
try (NimbusClient client = NimbusClient.getConfiguredClient(conf);
ScopedTopologySet topoNames = new ScopedTopologySet(client.getClient())) {
for (String topoFile : cmd.getArgList()) {
try {
TopologyLoadConf tlc = readTopology(topoFile);
tlc = tlc.scaleParallel(globalParallel, topoSpecificParallel);
tlc = tlc.scaleThroughput(globalThroughput, topoSpecificThroughput);
tlc = tlc.overrideSlowExecs(topoSpecificImbalance);
if (cmd.hasOption("local-or-shuffle")) {
tlc = tlc.replaceShuffleWithLocalOrShuffle();
}
if (cmd.hasOption("debug")) {
LOG.info("DEBUGGING: {}", tlc.toYamlString());
}
topoNames.add(parseAndSubmit(tlc, url));
} catch (Exception e) {
System.err.println("Could Not Submit Topology From " + topoFile);
e.printStackTrace(System.err);
}
}
metricServer.monitorFor(executeTime, client.getClient(), topoNames);
exitStatus = 0;
} catch (Exception e) {
LOG.error("Error trying to run topologies...", e);
} finally {
System.exit(exitStatus);
}
}
Aggregations