Search in sources :

Example 1 with AlreadyAliveException

use of org.apache.storm.generated.AlreadyAliveException in project storm by apache.

the class Nimbus method submitTopologyWithOpts.

@Override
public void submitTopologyWithOpts(String topoName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
    try {
        submitTopologyWithOptsCalls.mark();
        assertIsLeader();
        assert (options != null);
        validateTopologyName(topoName);
        checkAuthorization(topoName, null, "submitTopology");
        assertTopoActive(topoName, false);
        @SuppressWarnings("unchecked") Map<String, Object> topoConf = (Map<String, Object>) JSONValue.parse(jsonConf);
        try {
            ConfigValidation.validateFields(topoConf);
        } catch (IllegalArgumentException ex) {
            throw new InvalidTopologyException(ex.getMessage());
        }
        validator.validate(topoName, topoConf, topology);
        Utils.validateTopologyBlobStoreMap(topoConf, Sets.newHashSet(blobStore.listKeys()));
        long uniqueNum = submittedCount.incrementAndGet();
        String topoId = topoName + "-" + uniqueNum + "-" + Time.currentTimeSecs();
        Map<String, String> creds = null;
        if (options.is_set_creds()) {
            creds = options.get_creds().get_creds();
        }
        topoConf.put(Config.STORM_ID, topoId);
        topoConf.put(Config.TOPOLOGY_NAME, topoName);
        topoConf = normalizeConf(conf, topoConf, topology);
        ReqContext req = ReqContext.context();
        Principal principal = req.principal();
        String submitterPrincipal = principal == null ? null : principal.toString();
        String submitterUser = principalToLocal.toLocal(principal);
        String systemUser = System.getProperty("user.name");
        @SuppressWarnings("unchecked") Set<String> topoAcl = new HashSet<>((List<String>) topoConf.getOrDefault(Config.TOPOLOGY_USERS, Collections.emptyList()));
        topoAcl.add(submitterPrincipal);
        topoAcl.add(submitterUser);
        topoConf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, OR(submitterPrincipal, ""));
        //Don't let the user set who we launch as
        topoConf.put(Config.TOPOLOGY_SUBMITTER_USER, OR(submitterUser, systemUser));
        topoConf.put(Config.TOPOLOGY_USERS, new ArrayList<>(topoAcl));
        topoConf.put(Config.STORM_ZOOKEEPER_SUPERACL, conf.get(Config.STORM_ZOOKEEPER_SUPERACL));
        if (!Utils.isZkAuthenticationConfiguredStormServer(conf)) {
            topoConf.remove(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME);
            topoConf.remove(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD);
        }
        if (!(Boolean) conf.getOrDefault(Config.STORM_TOPOLOGY_CLASSPATH_BEGINNING_ENABLED, false)) {
            topoConf.remove(Config.TOPOLOGY_CLASSPATH_BEGINNING);
        }
        Map<String, Object> totalConf = merge(conf, topoConf);
        topology = normalizeTopology(totalConf, topology);
        IStormClusterState state = stormClusterState;
        if (creds != null) {
            Map<String, Object> finalConf = Collections.unmodifiableMap(topoConf);
            for (INimbusCredentialPlugin autocred : nimbusAutocredPlugins) {
                autocred.populateCredentials(creds, finalConf);
            }
        }
        if (Utils.getBoolean(conf.get(Config.SUPERVISOR_RUN_WORKER_AS_USER), false) && (submitterUser == null || submitterUser.isEmpty())) {
            throw new AuthorizationException("Could not determine the user to run this topology as.");
        }
        //this validates the structure of the topology
        StormCommon.systemTopology(totalConf, topology);
        validateTopologySize(topoConf, conf, topology);
        if (Utils.isZkAuthenticationConfiguredStormServer(conf) && !Utils.isZkAuthenticationConfiguredTopology(topoConf)) {
            throw new IllegalArgumentException("The cluster is configured for zookeeper authentication, but no payload was provided.");
        }
        LOG.info("Received topology submission for {} with conf {}", topoName, Utils.redactValue(topoConf, Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD));
        // cleanup thread killing topology in b/w assignment and starting the topology
        synchronized (submitLock) {
            assertTopoActive(topoName, false);
            //cred-update-lock is not needed here because creds are being added for the first time.
            if (creds != null) {
                state.setCredentials(topoId, new Credentials(creds), topoConf);
            }
            LOG.info("uploadedJar {}", uploadedJarLocation);
            setupStormCode(conf, topoId, uploadedJarLocation, totalConf, topology);
            waitForDesiredCodeReplication(totalConf, topoId);
            state.setupHeatbeats(topoId);
            if (Utils.getBoolean(totalConf.get(Config.TOPOLOGY_BACKPRESSURE_ENABLE), false)) {
                state.setupBackpressure(topoId);
            }
            notifyTopologyActionListener(topoName, "submitTopology");
            TopologyStatus status = null;
            switch(options.get_initial_status()) {
                case INACTIVE:
                    status = TopologyStatus.INACTIVE;
                    break;
                case ACTIVE:
                    status = TopologyStatus.ACTIVE;
                    break;
                default:
                    throw new IllegalArgumentException("Inital Status of " + options.get_initial_status() + " is not allowed.");
            }
            startTopology(topoName, topoId, status);
        }
    } catch (Exception e) {
        LOG.warn("Topology submission exception. (topology name='{}')", topoName, e);
        if (e instanceof TException) {
            throw (TException) e;
        }
        throw new RuntimeException(e);
    }
}
Also used : INimbusCredentialPlugin(org.apache.storm.security.INimbusCredentialPlugin) TException(org.apache.thrift.TException) AuthorizationException(org.apache.storm.generated.AuthorizationException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) ReqContext(org.apache.storm.security.auth.ReqContext) AuthorizationException(org.apache.storm.generated.AuthorizationException) NotAliveException(org.apache.storm.generated.NotAliveException) InterruptedIOException(java.io.InterruptedIOException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) KeyAlreadyExistsException(org.apache.storm.generated.KeyAlreadyExistsException) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) BindException(java.net.BindException) IStormClusterState(org.apache.storm.cluster.IStormClusterState) TopologyStatus(org.apache.storm.generated.TopologyStatus) Map(java.util.Map) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) NimbusPrincipal(org.apache.storm.security.auth.NimbusPrincipal) Principal(java.security.Principal) Credentials(org.apache.storm.generated.Credentials) HashSet(java.util.HashSet)

Example 2 with AlreadyAliveException

use of org.apache.storm.generated.AlreadyAliveException in project flink by apache.

the class FlinkClient method submitTopologyWithOpts.

/**
	 * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support
	 * uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted.
	 */
public void submitTopologyWithOpts(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException {
    if (this.getTopologyJobId(name) != null) {
        throw new AlreadyAliveException();
    }
    final URI uploadedJarUri;
    final URL uploadedJarUrl;
    try {
        uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
        uploadedJarUrl = uploadedJarUri.toURL();
        JobWithJars.checkJarFile(uploadedJarUrl);
    } catch (final IOException e) {
        throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
    }
    try {
        FlinkClient.addStormConfigToTopology(topology, conf);
    } catch (ClassNotFoundException e) {
        LOG.error("Could not register class for Kryo serialization.", e);
        throw new InvalidTopologyException("Could not register class for Kryo serialization.");
    }
    final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
    streamGraph.setJobName(name);
    final JobGraph jobGraph = streamGraph.getJobGraph();
    jobGraph.addJar(new Path(uploadedJarUri));
    final Configuration configuration = jobGraph.getJobConfiguration();
    configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
    configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);
    final ClusterClient client;
    try {
        client = new StandaloneClusterClient(configuration);
    } catch (final IOException e) {
        throw new RuntimeException("Could not establish a connection to the job manager", e);
    }
    try {
        ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader(Collections.<URL>singletonList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader());
        client.runDetached(jobGraph, classLoader);
    } catch (final ProgramInvocationException e) {
        throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
    }
}
Also used : Path(org.apache.flink.core.fs.Path) Configuration(org.apache.flink.configuration.Configuration) GlobalConfiguration(org.apache.flink.configuration.GlobalConfiguration) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) IOException(java.io.IOException) URI(java.net.URI) URL(java.net.URL) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) ClusterClient(org.apache.flink.client.program.ClusterClient) StreamGraph(org.apache.flink.streaming.api.graph.StreamGraph) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) File(java.io.File)

Example 3 with AlreadyAliveException

use of org.apache.storm.generated.AlreadyAliveException in project flink by apache.

the class FlinkSubmitter method submitTopology.

/**
	 * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed. The given {@link
	 * FlinkProgressListener} is ignored because progress bars are not supported by Flink.
	 *
	 * @param name
	 * 		the name of the storm.
	 * @param stormConf
	 * 		the topology-specific configuration. See {@link Config}.
	 * @param topology
	 * 		the processing to execute.
	 * @throws AlreadyAliveException
	 * 		if a topology with this name is already running
	 * @throws InvalidTopologyException
	 * 		if an invalid topology was submitted
	 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public static void submitTopology(final String name, final Map stormConf, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException {
    if (!Utils.isValidConf(stormConf)) {
        throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
    }
    final Configuration flinkConfig = GlobalConfiguration.loadConfiguration();
    if (!stormConf.containsKey(Config.NIMBUS_HOST)) {
        stormConf.put(Config.NIMBUS_HOST, flinkConfig.getString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "localhost"));
    }
    if (!stormConf.containsKey(Config.NIMBUS_THRIFT_PORT)) {
        stormConf.put(Config.NIMBUS_THRIFT_PORT, new Integer(flinkConfig.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 6123)));
    }
    final String serConf = JSONValue.toJSONString(stormConf);
    final FlinkClient client = FlinkClient.getConfiguredClient(stormConf);
    try {
        if (client.getTopologyJobId(name) != null) {
            throw new RuntimeException("Topology with name `" + name + "` already exists on cluster");
        }
        String localJar = System.getProperty("storm.jar");
        if (localJar == null) {
            try {
                for (final URL url : ((ContextEnvironment) ExecutionEnvironment.getExecutionEnvironment()).getJars()) {
                    // TODO verify that there is only one jar
                    localJar = new File(url.toURI()).getAbsolutePath();
                }
            } catch (final URISyntaxException e) {
            // ignore
            } catch (final ClassCastException e) {
            // ignore
            }
        }
        logger.info("Submitting topology " + name + " in distributed mode with conf " + serConf);
        client.submitTopologyWithOpts(name, localJar, topology);
    } catch (final InvalidTopologyException e) {
        logger.warn("Topology submission exception: " + e.get_msg());
        throw e;
    } catch (final AlreadyAliveException e) {
        logger.warn("Topology already alive exception", e);
        throw e;
    }
    logger.info("Finished submitting topology: " + name);
}
Also used : ContextEnvironment(org.apache.flink.client.program.ContextEnvironment) Configuration(org.apache.flink.configuration.Configuration) GlobalConfiguration(org.apache.flink.configuration.GlobalConfiguration) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) URISyntaxException(java.net.URISyntaxException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) URL(java.net.URL) File(java.io.File)

Example 4 with AlreadyAliveException

use of org.apache.storm.generated.AlreadyAliveException in project storm by apache.

the class TridentKafkaClientWordCountNamedTopics method run.

protected void run(String[] args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, InterruptedException {
    if (args.length > 0 && Arrays.stream(args).anyMatch(option -> option.equals("-h"))) {
        System.out.printf("Usage: java %s [%s] [%s] [%s] [%s]\n", getClass().getName(), "broker_host:broker_port", "topic1", "topic2", "topology_name");
    } else {
        final String brokerUrl = args.length > 0 ? args[0] : KAFKA_LOCAL_BROKER;
        final String topic1 = args.length > 1 ? args[1] : TOPIC_1;
        final String topic2 = args.length > 2 ? args[2] : TOPIC_2;
        System.out.printf("Running with broker_url: [%s], topics: [%s, %s]\n", brokerUrl, topic1, topic2);
        Config tpConf = LocalSubmitter.defaultConfig(true);
        if (args.length == 4) {
            //Submit Remote
            // Producers
            StormSubmitter.submitTopology(topic1 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic1));
            StormSubmitter.submitTopology(topic2 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic2));
            // Consumer
            StormSubmitter.submitTopology("topics-consumer", tpConf, TridentKafkaConsumerTopology.newTopology(newKafkaTridentSpoutOpaque()));
            // Print results to console, which also causes the print filter in the consumer topology to print the results in the worker log
            Thread.sleep(2000);
            DrpcResultsPrinter.remoteClient().printResults(60, 1, TimeUnit.SECONDS);
        } else {
            //Submit Local
            final LocalSubmitter localSubmitter = LocalSubmitter.newInstance();
            final String topic1Tp = "topic1-producer";
            final String topic2Tp = "topic2-producer";
            final String consTpName = "topics-consumer";
            try {
                // Producers
                localSubmitter.submit(topic1Tp, tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic1));
                localSubmitter.submit(topic2Tp, tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic2));
                // Consumer
                try {
                    localSubmitter.submit(consTpName, tpConf, TridentKafkaConsumerTopology.newTopology(localSubmitter.getDrpc(), newKafkaTridentSpoutOpaque()));
                    // print
                    localSubmitter.printResults(15, 1, TimeUnit.SECONDS);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            } finally {
                // kill
                localSubmitter.kill(topic1Tp);
                localSubmitter.kill(topic2Tp);
                localSubmitter.kill(consTpName);
                // shutdown
                localSubmitter.shutdown();
            }
        }
    }
    // Kill all the non daemon threads
    System.exit(0);
}
Also used : StormSubmitter(org.apache.storm.StormSubmitter) Arrays(java.util.Arrays) KafkaTridentSpoutOpaque(org.apache.storm.kafka.spout.trident.KafkaTridentSpoutOpaque) KafkaSpoutRetryService(org.apache.storm.kafka.spout.KafkaSpoutRetryService) Fields(org.apache.storm.tuple.Fields) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) TimeInterval(org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff.TimeInterval) Serializable(java.io.Serializable) TimeUnit(java.util.concurrent.TimeUnit) AuthorizationException(org.apache.storm.generated.AuthorizationException) Values(org.apache.storm.tuple.Values) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Func(org.apache.storm.kafka.spout.Func) EARLIEST(org.apache.storm.kafka.spout.KafkaSpoutConfig.FirstPollOffsetStrategy.EARLIEST) Config(org.apache.storm.Config) KafkaSpoutRetryExponentialBackoff(org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) KafkaSpoutConfig(org.apache.storm.kafka.spout.KafkaSpoutConfig) Config(org.apache.storm.Config) KafkaSpoutConfig(org.apache.storm.kafka.spout.KafkaSpoutConfig) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) AuthorizationException(org.apache.storm.generated.AuthorizationException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException)

Example 5 with AlreadyAliveException

use of org.apache.storm.generated.AlreadyAliveException in project storm by apache.

the class BlobStoreAPIWordCountTopology method buildAndLaunchWordCountTopology.

public void buildAndLaunchWordCountTopology(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new RandomSentenceSpout(), 5);
    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("filter", new FilterWords(), 6).shuffleGrouping("split");
    Config conf = new Config();
    conf.setDebug(true);
    try {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } catch (InvalidTopologyException | AuthorizationException | AlreadyAliveException exp) {
        throw new RuntimeException(exp);
    }
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) AuthorizationException(org.apache.storm.generated.AuthorizationException) Config(org.apache.storm.Config) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException)

Aggregations

AlreadyAliveException (org.apache.storm.generated.AlreadyAliveException)5 InvalidTopologyException (org.apache.storm.generated.InvalidTopologyException)5 AuthorizationException (org.apache.storm.generated.AuthorizationException)3 File (java.io.File)2 IOException (java.io.IOException)2 URL (java.net.URL)2 Configuration (org.apache.flink.configuration.Configuration)2 GlobalConfiguration (org.apache.flink.configuration.GlobalConfiguration)2 Config (org.apache.storm.Config)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 InterruptedIOException (java.io.InterruptedIOException)1 Serializable (java.io.Serializable)1 BindException (java.net.BindException)1 URI (java.net.URI)1 URISyntaxException (java.net.URISyntaxException)1 Principal (java.security.Principal)1 Arrays (java.util.Arrays)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1