Search in sources :

Example 1 with TopologyAssignException

use of backtype.storm.generated.TopologyAssignException in project jstorm by alibaba.

the class ServiceHandler method submitTopologyWithOpts.

/**
     * Submit one Topology
     *
     * @param topologyName        String: topology name
     * @param uploadedJarLocation String: already uploaded jar path
     * @param jsonConf            String: jsonConf serialize all toplogy configuration to
     *                            Json
     * @param topology            StormTopology: topology Object
     */
@SuppressWarnings("unchecked")
@Override
public String submitTopologyWithOpts(String topologyName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException {
    LOG.info("Receive " + topologyName + ", uploadedJarLocation:" + uploadedJarLocation);
    long start = System.nanoTime();
    //check topologyname is valid
    if (!Common.charValidate(topologyName)) {
        throw new InvalidTopologyException(topologyName + " is not a valid topology name");
    }
    Map<Object, Object> serializedConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
    if (serializedConf == null) {
        LOG.warn("Failed to serialized Configuration");
        throw new InvalidTopologyException("Failed to serialize topology configuration");
    }
    Common.confValidate(serializedConf, data.getConf());
    boolean enableDeploy = ConfigExtension.getTopologyHotDeplogyEnable(serializedConf);
    try {
        checkTopologyActive(data, topologyName, enableDeploy);
    } catch (AlreadyAliveException e) {
        LOG.info(topologyName + " already exists ");
        throw e;
    } catch (NotAliveException e) {
        LOG.info(topologyName + " is not alive ");
        throw e;
    } catch (Throwable e) {
        LOG.info("Failed to check whether topology is alive or not", e);
        throw new TException(e);
    }
    if (enableDeploy) {
        LOG.info("deploy the topology");
        try {
            StormClusterState stormClusterState = data.getStormClusterState();
            String topologyId = Cluster.get_topology_id(stormClusterState, topologyName);
            if (topologyId == null) {
                throw new NotAliveException(topologyName);
            }
            LOG.info("start kill the old  topology {}", topologyId);
            Map oldConf = new HashMap();
            oldConf.putAll(conf);
            Map killedStormConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
            if (killedStormConf != null) {
                oldConf.putAll(killedStormConf);
            }
            NimbusUtils.transitionName(data, topologyName, true, StatusType.kill, 0);
            KillTopologyEvent.pushEvent(topologyId);
            notifyTopologyActionListener(topologyName, "killTopology");
            //wait all workers' are killed
            final long timeoutSeconds = ConfigExtension.getTaskCleanupTimeoutSec(oldConf);
            ConcurrentHashMap<String, Semaphore> topologyIdtoSem = data.getTopologyIdtoSem();
            if (!topologyIdtoSem.contains(topologyId)) {
                topologyIdtoSem.putIfAbsent(topologyId, new Semaphore(0));
            }
            Semaphore semaphore = topologyIdtoSem.get(topologyId);
            if (semaphore != null) {
                semaphore.tryAcquire(timeoutSeconds, TimeUnit.SECONDS);
                topologyIdtoSem.remove(semaphore);
            }
            LOG.info("success kill the old topology {}", topologyId);
        } catch (Exception e) {
            String errMsg = "Failed to kill topology " + topologyName;
            LOG.error(errMsg, e);
            throw new TException(errMsg);
        }
    }
    String topologyId = null;
    synchronized (data) {
        // avoid same topologies from being submitted at the same time
        Set<String> pendingTopologies = data.getPendingSubmitTopologies().buildMap().keySet();
        for (String cachTopologyId : pendingTopologies) {
            if (cachTopologyId.contains(topologyName + "-"))
                throw new AlreadyAliveException(topologyName + "  were submitted");
        }
        int counter = data.getSubmittedCount().incrementAndGet();
        topologyId = Common.topologyNameToId(topologyName, counter);
        data.getPendingSubmitTopologies().put(topologyId, null);
    }
    try {
        serializedConf.put(Config.TOPOLOGY_ID, topologyId);
        serializedConf.put(Config.TOPOLOGY_NAME, topologyName);
        Map<Object, Object> stormConf;
        stormConf = NimbusUtils.normalizeConf(conf, serializedConf, topology);
        LOG.info("Normalized configuration:" + stormConf);
        Map<Object, Object> totalStormConf = new HashMap<Object, Object>(conf);
        totalStormConf.putAll(stormConf);
        StormTopology normalizedTopology = NimbusUtils.normalizeTopology(stormConf, topology, true);
        // this validates the structure of the topology
        Common.validate_basic(normalizedTopology, totalStormConf, topologyId);
        // don't need generate real topology, so skip Common.system_topology
        // Common.system_topology(totalStormConf, topology);
        StormClusterState stormClusterState = data.getStormClusterState();
        // create /local-dir/nimbus/topologyId/xxxx files
        setupStormCode(conf, topologyId, uploadedJarLocation, stormConf, normalizedTopology);
        // wait for blob replication before activate topology
        waitForDesiredCodeReplication(conf, topologyId);
        // generate TaskInfo for every bolt or spout in ZK
        // /ZK/tasks/topoologyId/xxx
        setupZkTaskInfo(conf, topologyId, stormClusterState);
        //mkdir topology error directory
        String path = Cluster.taskerror_storm_root(topologyId);
        stormClusterState.mkdir(path);
        // make assignments for a topology
        LOG.info("Submit for " + topologyName + " with conf " + serializedConf);
        makeAssignment(topologyName, topologyId, options.get_initial_status());
        // push start event after startup
        double metricsSampleRate = ConfigExtension.getMetricSampleRate(stormConf);
        StartTopologyEvent.pushEvent(topologyId, metricsSampleRate);
        notifyTopologyActionListener(topologyName, "submitTopology");
    } catch (FailedAssignTopologyException e) {
        StringBuilder sb = new StringBuilder();
        sb.append("Fail to sumbit topology, Root cause:");
        if (e.getMessage() == null) {
            sb.append("submit timeout");
        } else {
            sb.append(e.getMessage());
        }
        sb.append("\n\n");
        sb.append("topologyId:" + topologyId);
        sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
        LOG.error(sb.toString(), e);
        throw new TopologyAssignException(sb.toString());
    } catch (InvalidParameterException e) {
        StringBuilder sb = new StringBuilder();
        sb.append("Fail to sumbit topology ");
        sb.append(e.getMessage());
        sb.append(", cause:" + e.getCause());
        sb.append("\n\n");
        sb.append("topologyId:" + topologyId);
        sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
        LOG.error(sb.toString(), e);
        throw new InvalidParameterException(sb.toString());
    } catch (InvalidTopologyException e) {
        LOG.error("Topology is invalid. " + e.get_msg());
        throw e;
    } catch (Throwable e) {
        StringBuilder sb = new StringBuilder();
        sb.append("Fail to sumbit topology ");
        sb.append(e.getMessage());
        sb.append(", cause:" + e.getCause());
        sb.append("\n\n");
        sb.append("topologyId:" + topologyId);
        sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
        LOG.error(sb.toString(), e);
        throw new TopologyAssignException(sb.toString());
    } finally {
        // when make assignment for a topology,so remove the topologyid form
        // pendingSubmitTopologys
        data.getPendingSubmitTopologies().remove(topologyId);
        double spend = (System.nanoTime() - start) / TimeUtils.NS_PER_US;
        SimpleJStormMetric.updateNimbusHistogram("submitTopologyWithOpts", spend);
        LOG.info("submitTopologyWithOpts {} costs {}ms", topologyName, spend);
    }
    return topologyId;
}
Also used : TException(org.apache.thrift.TException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) FailedAssignTopologyException(com.alibaba.jstorm.utils.FailedAssignTopologyException) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) StormTopology(backtype.storm.generated.StormTopology) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) Semaphore(java.util.concurrent.Semaphore) InvalidParameterException(java.security.InvalidParameterException) FailedAssignTopologyException(com.alibaba.jstorm.utils.FailedAssignTopologyException) KeyNotFoundException(backtype.storm.generated.KeyNotFoundException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) TopologyAssignException(backtype.storm.generated.TopologyAssignException) FileNotFoundException(java.io.FileNotFoundException) NotAliveException(backtype.storm.generated.NotAliveException) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) KeyAlreadyExistsException(backtype.storm.generated.KeyAlreadyExistsException) InvalidParameterException(java.security.InvalidParameterException) StormClusterState(com.alibaba.jstorm.cluster.StormClusterState) NotAliveException(backtype.storm.generated.NotAliveException) TopologyAssignException(backtype.storm.generated.TopologyAssignException) Map(java.util.Map) TreeMap(java.util.TreeMap) TimeCacheMap(com.alibaba.jstorm.utils.TimeCacheMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 2 with TopologyAssignException

use of backtype.storm.generated.TopologyAssignException in project jstorm by alibaba.

the class ServiceHandler method restart.

@Override
public void restart(String name, String jsonConf) throws TException, NotAliveException, InvalidTopologyException, TopologyAssignException {
    LOG.info("Begin to restart " + name + ", new configuration:" + jsonConf);
    // 1. get topologyId
    StormClusterState stormClusterState = data.getStormClusterState();
    String topologyId;
    try {
        topologyId = Cluster.get_topology_id(stormClusterState, name);
    } catch (Exception e2) {
        topologyId = null;
    }
    if (topologyId == null) {
        LOG.info("No topology of " + name);
        throw new NotAliveException("No topology of " + name);
    }
    // Restart the topology: Deactivate -> Kill -> Submit
    // 2. Deactivate
    deactivate(name);
    JStormUtils.sleepMs(5000);
    LOG.info("Deactivate " + name);
    // 3. backup old jar/configuration/topology
    StormTopology topology;
    Map topologyConf;
    String topologyCodeLocation = null;
    try {
        topology = StormConfig.read_nimbus_topology_code(topologyId, data.getBlobStore());
        topologyConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
        if (jsonConf != null) {
            Map<Object, Object> newConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
            topologyConf.putAll(newConf);
        }
        // copy storm files back to inbox from blob store
        String parent = StormConfig.masterInbox(conf);
        topologyCodeLocation = parent + PathUtils.SEPERATOR + topologyId;
        FileUtils.forceMkdir(new File(topologyCodeLocation));
        FileUtils.cleanDirectory(new File(topologyCodeLocation));
        copyBackToInbox(topologyId, topologyCodeLocation);
        LOG.info("Successfully read old jar/conf/topology " + name);
        notifyTopologyActionListener(name, "restart");
    } catch (Exception e) {
        LOG.error("Failed to read old jar/conf/topology", e);
        if (topologyCodeLocation != null) {
            try {
                PathUtils.rmr(topologyCodeLocation);
            } catch (IOException ignored) {
            }
        }
        throw new TException("Failed to read old jar/conf/topology ");
    }
    // 4. Kill
    // directly use remove command to kill, more stable than issue kill cmd
    RemoveTransitionCallback killCb = new RemoveTransitionCallback(data, topologyId);
    killCb.execute(new Object[0]);
    LOG.info("Successfully kill the topology " + name);
    // send metric events
    KillTopologyEvent.pushEvent(topologyId);
    // 5. submit
    try {
        submitTopology(name, topologyCodeLocation, JStormUtils.to_json(topologyConf), topology);
    } catch (AlreadyAliveException e) {
        LOG.info("Failed to kill the topology" + name);
        throw new TException("Failed to kill the topology" + name);
    } finally {
        try {
            PathUtils.rmr(topologyCodeLocation);
        } catch (IOException ignored) {
        }
    }
}
Also used : TException(org.apache.thrift.TException) StormTopology(backtype.storm.generated.StormTopology) RemoveTransitionCallback(com.alibaba.jstorm.callback.impl.RemoveTransitionCallback) IOException(java.io.IOException) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) InvalidParameterException(java.security.InvalidParameterException) FailedAssignTopologyException(com.alibaba.jstorm.utils.FailedAssignTopologyException) KeyNotFoundException(backtype.storm.generated.KeyNotFoundException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) TopologyAssignException(backtype.storm.generated.TopologyAssignException) FileNotFoundException(java.io.FileNotFoundException) NotAliveException(backtype.storm.generated.NotAliveException) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) KeyAlreadyExistsException(backtype.storm.generated.KeyAlreadyExistsException) StormClusterState(com.alibaba.jstorm.cluster.StormClusterState) NotAliveException(backtype.storm.generated.NotAliveException) Map(java.util.Map) TreeMap(java.util.TreeMap) TimeCacheMap(com.alibaba.jstorm.utils.TimeCacheMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) File(java.io.File)

Aggregations

AlreadyAliveException (backtype.storm.generated.AlreadyAliveException)2 InvalidTopologyException (backtype.storm.generated.InvalidTopologyException)2 KeyAlreadyExistsException (backtype.storm.generated.KeyAlreadyExistsException)2 KeyNotFoundException (backtype.storm.generated.KeyNotFoundException)2 NotAliveException (backtype.storm.generated.NotAliveException)2 StormTopology (backtype.storm.generated.StormTopology)2 TopologyAssignException (backtype.storm.generated.TopologyAssignException)2 StormClusterState (com.alibaba.jstorm.cluster.StormClusterState)2 FailedAssignTopologyException (com.alibaba.jstorm.utils.FailedAssignTopologyException)2 TimeCacheMap (com.alibaba.jstorm.utils.TimeCacheMap)2 FileNotFoundException (java.io.FileNotFoundException)2 IOException (java.io.IOException)2 InvalidParameterException (java.security.InvalidParameterException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 TreeMap (java.util.TreeMap)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 TException (org.apache.thrift.TException)2 RemoveTransitionCallback (com.alibaba.jstorm.callback.impl.RemoveTransitionCallback)1 File (java.io.File)1