use of org.apache.storm.utils.WrappedInvalidTopologyException in project storm by apache.
the class StormSubmitter method submitTopologyAs.
/**
* Submits a topology to run on the cluster as a particular user. A topology runs forever or until explicitly killed.
*
* @param asUser The user as which this topology should be submitted.
* @throws IllegalArgumentException thrown if configs will yield an unschedulable topology. validateConfs validates confs
* @throws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook}
*/
public static void submitTopologyAs(String name, Map<String, Object> topoConf, StormTopology topology, SubmitOptions opts, ProgressListener progressListener, String asUser) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, IllegalArgumentException {
// validate topology name first; nothing else should be done if it's invalid.
Utils.validateTopologyName(name);
if (!Utils.isValidConf(topoConf)) {
throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
}
if (topology.get_spouts_size() == 0) {
throw new WrappedInvalidTopologyException("Topology " + name + " does not have any spout");
}
topoConf = new HashMap<>(topoConf);
topoConf.putAll(Utils.readCommandLineOpts());
Map<String, Object> conf = Utils.readStormConfig();
conf.putAll(topoConf);
topoConf.putAll(prepareZookeeperAuthentication(conf));
validateConfs(conf);
try {
Utils.validateCycleFree(topology, name);
} catch (InvalidTopologyException ex) {
LOG.warn("", ex);
}
Map<String, String> passedCreds = new HashMap<>();
if (opts != null) {
Credentials tmpCreds = opts.get_creds();
if (tmpCreds != null) {
passedCreds = tmpCreds.get_creds();
}
}
Map<String, String> fullCreds = populateCredentials(conf, passedCreds);
if (!fullCreds.isEmpty()) {
if (opts == null) {
opts = new SubmitOptions(TopologyInitialStatus.ACTIVE);
}
opts.set_creds(new Credentials(fullCreds));
}
try {
String serConf = JSONValue.toJSONString(topoConf);
try (NimbusClient client = NimbusClient.getConfiguredClientAs(conf, asUser)) {
if (!isTopologyNameAllowed(name, client)) {
throw new RuntimeException("Topology name " + name + " is either not allowed or it already exists on the cluster");
}
// Dependency uploading only makes sense for distributed mode
List<String> jarsBlobKeys = Collections.emptyList();
List<String> artifactsBlobKeys;
DependencyUploader uploader = new DependencyUploader();
try {
uploader.init();
jarsBlobKeys = uploadDependencyJarsToBlobStore(uploader);
artifactsBlobKeys = uploadDependencyArtifactsToBlobStore(uploader);
} catch (Throwable e) {
// remove uploaded jars blobs, not artifacts since they're shared across the cluster
uploader.deleteBlobs(jarsBlobKeys);
uploader.shutdown();
throw e;
}
try {
setDependencyBlobsToTopology(topology, jarsBlobKeys, artifactsBlobKeys);
submitTopologyInDistributeMode(name, topology, opts, progressListener, asUser, conf, serConf, client);
} catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e) {
// remove uploaded jars blobs, not artifacts since they're shared across the cluster
// Note that we don't handle TException to delete jars blobs
// because it's safer to leave some blobs instead of topology not running
uploader.deleteBlobs(jarsBlobKeys);
throw e;
} finally {
uploader.shutdown();
}
}
} catch (TException e) {
throw new RuntimeException(e);
}
invokeSubmitterHook(name, asUser, conf, topology);
}
use of org.apache.storm.utils.WrappedInvalidTopologyException in project storm by apache.
the class StrictTopologyValidator method validate.
@Override
public void validate(String topologyName, Map topologyConf, StormTopology topology) throws InvalidTopologyException {
if (topologyName.contains(".")) {
throw new WrappedInvalidTopologyException(String.format("Topology name '%s' contains illegal character '.'", topologyName));
}
Map<String, SpoutSpec> spouts = topology.get_spouts();
for (String spoutName : spouts.keySet()) {
if (spoutName.contains(".")) {
throw new WrappedInvalidTopologyException(String.format("Spout name '%s' contains illegal character '.'", spoutName));
}
SpoutSpec spoutSpec = spouts.get(spoutName);
for (String streamName : spoutSpec.get_common().get_streams().keySet()) {
if (streamName.contains(".")) {
throw new WrappedInvalidTopologyException(String.format("Stream name '%s' contains illegal character '.'", streamName));
}
}
}
Map<String, Bolt> bolts = topology.get_bolts();
for (String boltName : bolts.keySet()) {
if (boltName.contains(".")) {
throw new WrappedInvalidTopologyException(String.format("Bolt name '%s' contains illegal character '.'", boltName));
}
Bolt bolt = bolts.get(boltName);
for (String streamName : bolt.get_common().get_streams().keySet()) {
if (streamName.contains(".")) {
throw new WrappedInvalidTopologyException(String.format("Stream name '%s' contains illegal character '.'", streamName));
}
}
}
}
use of org.apache.storm.utils.WrappedInvalidTopologyException in project storm by apache.
the class Nimbus method validateTopologySize.
private static void validateTopologySize(Map<String, Object> topoConf, Map<String, Object> nimbusConf, StormTopology topology) throws InvalidTopologyException {
// check allowedWorkers only if the scheduler is not the Resource Aware Scheduler
if (!ServerUtils.isRas(nimbusConf)) {
int workerCount = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 1);
Integer allowedWorkers = ObjectReader.getInt(nimbusConf.get(DaemonConfig.NIMBUS_SLOTS_PER_TOPOLOGY), null);
if (allowedWorkers != null && workerCount > allowedWorkers) {
throw new WrappedInvalidTopologyException("Failed to submit topology. Topology requests more than " + allowedWorkers + " workers.");
}
}
int executorsCount = 0;
for (Object comp : StormCommon.allComponents(topology).values()) {
executorsCount += StormCommon.numStartExecutors(comp);
}
Integer allowedExecutors = ObjectReader.getInt(nimbusConf.get(DaemonConfig.NIMBUS_EXECUTORS_PER_TOPOLOGY), null);
if (allowedExecutors != null && executorsCount > allowedExecutors) {
throw new WrappedInvalidTopologyException("Failed to submit topology. Topology requests more than " + allowedExecutors + " executors.");
}
}
use of org.apache.storm.utils.WrappedInvalidTopologyException in project storm by apache.
the class Nimbus method rebalance.
@Override
public void rebalance(String topoName, RebalanceOptions options) throws NotAliveException, InvalidTopologyException, AuthorizationException, TException {
rebalanceCalls.mark();
assertTopoActive(topoName, true);
try {
Map<String, Object> topoConf = tryReadTopoConfFromName(topoName);
topoConf = Utils.merge(conf, topoConf);
final String operation = "rebalance";
checkAuthorization(topoName, topoConf, operation);
// Set principal in RebalanceOptions to nil because users are not suppose to set this
options.set_principal(null);
// check if executor counts are correctly specified
StormTopology stormTopology = tryReadTopologyFromName(topoName);
Set<String> comps = new TreeSet<>();
comps.addAll(stormTopology.get_spouts().keySet());
comps.addAll(stormTopology.get_bolts().keySet());
Map<String, Integer> execOverrides = options.is_set_num_executors() ? options.get_num_executors() : Collections.emptyMap();
for (Map.Entry<String, Integer> e : execOverrides.entrySet()) {
String comp = e.getKey();
// validate non-system component ids
if (!Utils.isSystemId(comp) && !comps.contains(comp)) {
throw new WrappedInvalidTopologyException(String.format("Invalid component %s for topology %s, valid values are %s", comp, topoName, String.join(",", comps)));
}
// validate executor count for component
Integer value = e.getValue();
if (value == null || value <= 0) {
throw new WrappedInvalidTopologyException("Number of executors must be greater than 0");
}
}
if (options.is_set_topology_conf_overrides()) {
Map<String, Object> topoConfigOverrides = Utils.parseJson(options.get_topology_conf_overrides());
// Clean up some things the user should not set. (Not a security issue, just might confuse the topology)
topoConfigOverrides.remove(Config.TOPOLOGY_SUBMITTER_PRINCIPAL);
topoConfigOverrides.remove(Config.TOPOLOGY_SUBMITTER_USER);
topoConfigOverrides.remove(Config.STORM_ZOOKEEPER_SUPERACL);
topoConfigOverrides.remove(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME);
topoConfigOverrides.remove(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD);
if ((boolean) conf.getOrDefault(DaemonConfig.STORM_TOPOLOGY_CLASSPATH_BEGINNING_ENABLED, false)) {
topoConfigOverrides.remove(Config.TOPOLOGY_CLASSPATH_BEGINNING);
}
topoConfigOverrides.remove(Config.STORM_LOCAL_HOSTNAME);
options.set_topology_conf_overrides(JSONValue.toJSONString(topoConfigOverrides));
}
Subject subject = getSubject();
if (subject != null) {
options.set_principal(subject.getPrincipals().iterator().next().getName());
}
transitionName(topoName, TopologyActions.REBALANCE, options, true);
notifyTopologyActionListener(topoName, operation);
} catch (Exception e) {
LOG.warn("rebalance topology exception. (topology name='{}')", topoName, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
Aggregations