Search in sources :

Example 1 with InvalidTopologyException

use of backtype.storm.generated.InvalidTopologyException in project jstorm by alibaba.

the class WorkerData method updateStormTopology.

private void updateStormTopology() {
    StormTopology rawTmp;
    StormTopology sysTmp;
    try {
        rawTmp = StormConfig.read_supervisor_topology_code(conf, topologyId);
        sysTmp = Common.system_topology(stormConf, rawTopology);
    } catch (IOException e) {
        LOG.error("Failed to read supervisor topology code for " + topologyId, e);
        return;
    } catch (InvalidTopologyException e) {
        LOG.error("Failed to update sysTopology for " + topologyId, e);
        return;
    }
    updateTopology(rawTopology, rawTmp);
    updateTopology(sysTopology, sysTmp);
}
Also used : StormTopology(backtype.storm.generated.StormTopology) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) IOException(java.io.IOException)

Example 2 with InvalidTopologyException

use of backtype.storm.generated.InvalidTopologyException in project jstorm by alibaba.

the class ServiceHandler method submitTopologyWithOpts.

/**
     * Submit one Topology
     *
     * @param topologyName        String: topology name
     * @param uploadedJarLocation String: already uploaded jar path
     * @param jsonConf            String: jsonConf serialize all toplogy configuration to
     *                            Json
     * @param topology            StormTopology: topology Object
     */
@SuppressWarnings("unchecked")
@Override
public String submitTopologyWithOpts(String topologyName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException {
    LOG.info("Receive " + topologyName + ", uploadedJarLocation:" + uploadedJarLocation);
    long start = System.nanoTime();
    //check topologyname is valid
    if (!Common.charValidate(topologyName)) {
        throw new InvalidTopologyException(topologyName + " is not a valid topology name");
    }
    Map<Object, Object> serializedConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
    if (serializedConf == null) {
        LOG.warn("Failed to serialized Configuration");
        throw new InvalidTopologyException("Failed to serialize topology configuration");
    }
    Common.confValidate(serializedConf, data.getConf());
    boolean enableDeploy = ConfigExtension.getTopologyHotDeplogyEnable(serializedConf);
    try {
        checkTopologyActive(data, topologyName, enableDeploy);
    } catch (AlreadyAliveException e) {
        LOG.info(topologyName + " already exists ");
        throw e;
    } catch (NotAliveException e) {
        LOG.info(topologyName + " is not alive ");
        throw e;
    } catch (Throwable e) {
        LOG.info("Failed to check whether topology is alive or not", e);
        throw new TException(e);
    }
    if (enableDeploy) {
        LOG.info("deploy the topology");
        try {
            StormClusterState stormClusterState = data.getStormClusterState();
            String topologyId = Cluster.get_topology_id(stormClusterState, topologyName);
            if (topologyId == null) {
                throw new NotAliveException(topologyName);
            }
            LOG.info("start kill the old  topology {}", topologyId);
            Map oldConf = new HashMap();
            oldConf.putAll(conf);
            Map killedStormConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
            if (killedStormConf != null) {
                oldConf.putAll(killedStormConf);
            }
            NimbusUtils.transitionName(data, topologyName, true, StatusType.kill, 0);
            KillTopologyEvent.pushEvent(topologyId);
            notifyTopologyActionListener(topologyName, "killTopology");
            //wait all workers' are killed
            final long timeoutSeconds = ConfigExtension.getTaskCleanupTimeoutSec(oldConf);
            ConcurrentHashMap<String, Semaphore> topologyIdtoSem = data.getTopologyIdtoSem();
            if (!topologyIdtoSem.contains(topologyId)) {
                topologyIdtoSem.putIfAbsent(topologyId, new Semaphore(0));
            }
            Semaphore semaphore = topologyIdtoSem.get(topologyId);
            if (semaphore != null) {
                semaphore.tryAcquire(timeoutSeconds, TimeUnit.SECONDS);
                topologyIdtoSem.remove(semaphore);
            }
            LOG.info("success kill the old topology {}", topologyId);
        } catch (Exception e) {
            String errMsg = "Failed to kill topology " + topologyName;
            LOG.error(errMsg, e);
            throw new TException(errMsg);
        }
    }
    String topologyId = null;
    synchronized (data) {
        // avoid same topologies from being submitted at the same time
        Set<String> pendingTopologies = data.getPendingSubmitTopologies().buildMap().keySet();
        for (String cachTopologyId : pendingTopologies) {
            if (cachTopologyId.contains(topologyName + "-"))
                throw new AlreadyAliveException(topologyName + "  were submitted");
        }
        int counter = data.getSubmittedCount().incrementAndGet();
        topologyId = Common.topologyNameToId(topologyName, counter);
        data.getPendingSubmitTopologies().put(topologyId, null);
    }
    try {
        serializedConf.put(Config.TOPOLOGY_ID, topologyId);
        serializedConf.put(Config.TOPOLOGY_NAME, topologyName);
        Map<Object, Object> stormConf;
        stormConf = NimbusUtils.normalizeConf(conf, serializedConf, topology);
        LOG.info("Normalized configuration:" + stormConf);
        Map<Object, Object> totalStormConf = new HashMap<Object, Object>(conf);
        totalStormConf.putAll(stormConf);
        StormTopology normalizedTopology = NimbusUtils.normalizeTopology(stormConf, topology, true);
        // this validates the structure of the topology
        Common.validate_basic(normalizedTopology, totalStormConf, topologyId);
        // don't need generate real topology, so skip Common.system_topology
        // Common.system_topology(totalStormConf, topology);
        StormClusterState stormClusterState = data.getStormClusterState();
        // create /local-dir/nimbus/topologyId/xxxx files
        setupStormCode(conf, topologyId, uploadedJarLocation, stormConf, normalizedTopology);
        // wait for blob replication before activate topology
        waitForDesiredCodeReplication(conf, topologyId);
        // generate TaskInfo for every bolt or spout in ZK
        // /ZK/tasks/topoologyId/xxx
        setupZkTaskInfo(conf, topologyId, stormClusterState);
        //mkdir topology error directory
        String path = Cluster.taskerror_storm_root(topologyId);
        stormClusterState.mkdir(path);
        // make assignments for a topology
        LOG.info("Submit for " + topologyName + " with conf " + serializedConf);
        makeAssignment(topologyName, topologyId, options.get_initial_status());
        // push start event after startup
        double metricsSampleRate = ConfigExtension.getMetricSampleRate(stormConf);
        StartTopologyEvent.pushEvent(topologyId, metricsSampleRate);
        notifyTopologyActionListener(topologyName, "submitTopology");
    } catch (FailedAssignTopologyException e) {
        StringBuilder sb = new StringBuilder();
        sb.append("Fail to sumbit topology, Root cause:");
        if (e.getMessage() == null) {
            sb.append("submit timeout");
        } else {
            sb.append(e.getMessage());
        }
        sb.append("\n\n");
        sb.append("topologyId:" + topologyId);
        sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
        LOG.error(sb.toString(), e);
        throw new TopologyAssignException(sb.toString());
    } catch (InvalidParameterException e) {
        StringBuilder sb = new StringBuilder();
        sb.append("Fail to sumbit topology ");
        sb.append(e.getMessage());
        sb.append(", cause:" + e.getCause());
        sb.append("\n\n");
        sb.append("topologyId:" + topologyId);
        sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
        LOG.error(sb.toString(), e);
        throw new InvalidParameterException(sb.toString());
    } catch (InvalidTopologyException e) {
        LOG.error("Topology is invalid. " + e.get_msg());
        throw e;
    } catch (Throwable e) {
        StringBuilder sb = new StringBuilder();
        sb.append("Fail to sumbit topology ");
        sb.append(e.getMessage());
        sb.append(", cause:" + e.getCause());
        sb.append("\n\n");
        sb.append("topologyId:" + topologyId);
        sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
        LOG.error(sb.toString(), e);
        throw new TopologyAssignException(sb.toString());
    } finally {
        // when make assignment for a topology,so remove the topologyid form
        // pendingSubmitTopologys
        data.getPendingSubmitTopologies().remove(topologyId);
        double spend = (System.nanoTime() - start) / TimeUtils.NS_PER_US;
        SimpleJStormMetric.updateNimbusHistogram("submitTopologyWithOpts", spend);
        LOG.info("submitTopologyWithOpts {} costs {}ms", topologyName, spend);
    }
    return topologyId;
}
Also used : TException(org.apache.thrift.TException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) FailedAssignTopologyException(com.alibaba.jstorm.utils.FailedAssignTopologyException) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) StormTopology(backtype.storm.generated.StormTopology) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) Semaphore(java.util.concurrent.Semaphore) InvalidParameterException(java.security.InvalidParameterException) FailedAssignTopologyException(com.alibaba.jstorm.utils.FailedAssignTopologyException) KeyNotFoundException(backtype.storm.generated.KeyNotFoundException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) TopologyAssignException(backtype.storm.generated.TopologyAssignException) FileNotFoundException(java.io.FileNotFoundException) NotAliveException(backtype.storm.generated.NotAliveException) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) KeyAlreadyExistsException(backtype.storm.generated.KeyAlreadyExistsException) InvalidParameterException(java.security.InvalidParameterException) StormClusterState(com.alibaba.jstorm.cluster.StormClusterState) NotAliveException(backtype.storm.generated.NotAliveException) TopologyAssignException(backtype.storm.generated.TopologyAssignException) Map(java.util.Map) TreeMap(java.util.TreeMap) TimeCacheMap(com.alibaba.jstorm.utils.TimeCacheMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 3 with InvalidTopologyException

use of backtype.storm.generated.InvalidTopologyException in project jstorm by alibaba.

the class ServiceHandler method setupZkTaskInfo.

/**
     * generate TaskInfo for every bolt or spout in ZK /ZK/tasks/topoologyId/xxx
     *
     * @param conf
     * @param topologyId
     * @param stormClusterState
     * @throws Exception
     */
public void setupZkTaskInfo(Map<Object, Object> conf, String topologyId, StormClusterState stormClusterState) throws Exception {
    Map<Integer, TaskInfo> taskToTaskInfo = mkTaskComponentAssignments(conf, topologyId);
    // mkdir /ZK/taskbeats/topoologyId
    int masterId = NimbusUtils.getTopologyMasterId(taskToTaskInfo);
    TopologyTaskHbInfo topoTaskHbinfo = new TopologyTaskHbInfo(topologyId, masterId);
    data.getTasksHeartbeat().put(topologyId, topoTaskHbinfo);
    stormClusterState.topology_heartbeat(topologyId, topoTaskHbinfo);
    if (taskToTaskInfo == null || taskToTaskInfo.size() == 0) {
        throw new InvalidTopologyException("Failed to generate TaskIDs map");
    }
    // key is taskid, value is taskinfo
    stormClusterState.set_task(topologyId, taskToTaskInfo);
}
Also used : TaskInfo(com.alibaba.jstorm.task.TaskInfo) TopologyTaskHbInfo(backtype.storm.generated.TopologyTaskHbInfo) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException)

Example 4 with InvalidTopologyException

use of backtype.storm.generated.InvalidTopologyException in project jstorm by alibaba.

the class ServiceHandler method updateTopology.

@Override
public void updateTopology(String name, String uploadedLocation, String updateConf) throws NotAliveException, InvalidTopologyException, TException {
    try {
        //firstly update jar and conf
        checkTopologyActive(data, name, true);
        String topologyId = null;
        StormClusterState stormClusterState = data.getStormClusterState();
        topologyId = Cluster.get_topology_id(stormClusterState, name);
        if (topologyId == null) {
            throw new NotAliveException(name);
        }
        BlobStore blobStore = data.getBlobStore();
        StormClusterState clusterState = data.getStormClusterState();
        NimbusInfo nimbusInfo = data.getNimbusHostPortInfo();
        if (uploadedLocation != null) {
            setupJar(uploadedLocation, topologyId, blobStore, clusterState, nimbusInfo, true);
        }
        Map topoConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
        Map<Object, Object> config = (Map<Object, Object>) JStormUtils.from_json(updateConf);
        topoConf.putAll(config);
        String confKey = StormConfig.master_stormconf_key(topologyId);
        BlobStoreUtils.updateBlob(blobStore, confKey, Utils.serialize(topoConf));
        if (blobStore instanceof LocalFsBlobStore) {
            clusterState.setup_blobstore(confKey, nimbusInfo, BlobStoreUtils.getVersionForKey(confKey, nimbusInfo, conf));
        }
        NimbusUtils.transitionName(data, name, true, StatusType.update_topology, config);
        LOG.info("update topology " + name + " successfully");
        notifyTopologyActionListener(name, "updateTopology");
    } catch (NotAliveException e) {
        String errMsg = "Error, no this topology " + name;
        LOG.error(errMsg, e);
        throw new NotAliveException(errMsg);
    } catch (Exception e) {
        String errMsg = "Failed to update topology " + name;
        LOG.error(errMsg, e);
        throw new TException(errMsg);
    }
}
Also used : TException(org.apache.thrift.TException) StormClusterState(com.alibaba.jstorm.cluster.StormClusterState) LocalFsBlobStore(com.alibaba.jstorm.blobstore.LocalFsBlobStore) NotAliveException(backtype.storm.generated.NotAliveException) Map(java.util.Map) TreeMap(java.util.TreeMap) TimeCacheMap(com.alibaba.jstorm.utils.TimeCacheMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) BlobStore(com.alibaba.jstorm.blobstore.BlobStore) LocalFsBlobStore(com.alibaba.jstorm.blobstore.LocalFsBlobStore) InvalidParameterException(java.security.InvalidParameterException) FailedAssignTopologyException(com.alibaba.jstorm.utils.FailedAssignTopologyException) KeyNotFoundException(backtype.storm.generated.KeyNotFoundException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AlreadyAliveException(backtype.storm.generated.AlreadyAliveException) TopologyAssignException(backtype.storm.generated.TopologyAssignException) FileNotFoundException(java.io.FileNotFoundException) NotAliveException(backtype.storm.generated.NotAliveException) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) KeyAlreadyExistsException(backtype.storm.generated.KeyAlreadyExistsException) NimbusInfo(backtype.storm.nimbus.NimbusInfo)

Example 5 with InvalidTopologyException

use of backtype.storm.generated.InvalidTopologyException in project jstorm by alibaba.

the class TransactionBolt method prepare.

@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    this.conf = stormConf;
    this.topologyContext = context;
    this.topologyId = topologyContext.getTopologyId();
    this.taskId = topologyContext.getThisTaskId();
    this.componentId = topologyContext.getThisComponentId();
    this.upstreamTasks = TransactionCommon.getUpstreamTasks(componentId, topologyContext);
    this.downstreamTasks = TransactionCommon.getDownstreamTasks(componentId, topologyContext);
    this.topologyMasterId = context.getTopologyMasterId();
    LOG.info("TransactionBolt: upstreamTasks=" + upstreamTasks + ", downstreamTasks=" + downstreamTasks);
    this.outputCollector = new TransactionOutputCollector(this, collector);
    this.boltExecutor.prepare(conf, context, new OutputCollector(outputCollector));
    this.boltStatus = State.INIT;
    if (sysTopology == null) {
        try {
            sysTopology = Common.system_topology(stormConf, context.getRawTopology());
        } catch (InvalidTopologyException e) {
            LOG.error("Failed to build system topology", e);
            throw new RuntimeException(e);
        }
    }
    this.lastSuccessfulBatch = new ConcurrentHashMap<Integer, Long>();
    this.processingBatches = new HashMap<Integer, Map<Long, BatchTracker>>();
    Set<String> upstreamSpoutNames = TransactionCommon.getUpstreamSpouts(componentId, topologyContext);
    for (String spoutName : upstreamSpoutNames) {
        int groupId = TransactionCommon.groupIndex(topologyContext.getRawTopology(), spoutName);
        lastSuccessfulBatch.put(groupId, TransactionCommon.INIT_BATCH_ID);
        processingBatches.put(groupId, new HashMap<Long, BatchTracker>());
    }
    this.batchCache = new BatchCache(context, upstreamSpoutNames, sysTopology);
    this.kryoInput = new Input(1);
    this.streamIds = new SerializationFactory.IdDictionary(sysTopology);
    this.inputStreamIds = new HashSet<Integer>();
    Set<GlobalStreamId> inputs = topologyContext.getThisSources().keySet();
    for (GlobalStreamId stream : inputs) {
        inputStreamIds.add(streamIds.getStreamId(stream.get_componentId(), stream.get_streamId()));
    }
    for (String upstreamComponentId : TransactionCommon.getUpstreamComponents(componentId, topologyContext)) {
        inputStreamIds.add(streamIds.getStreamId(upstreamComponentId, TransactionCommon.BARRIER_STREAM_ID));
    }
    //LOG.info("Stream info prepare: streamIds={}, inputStreams={}, inputStreamIds={}", streamIds, inputs, inputStreamIds);
    startInitState();
}
Also used : OutputCollector(backtype.storm.task.OutputCollector) InvalidTopologyException(backtype.storm.generated.InvalidTopologyException) SerializationFactory(backtype.storm.serialization.SerializationFactory) BatchCache(com.alibaba.jstorm.transactional.BatchCache) Input(com.esotericsoftware.kryo.io.Input) GlobalStreamId(backtype.storm.generated.GlobalStreamId) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Aggregations

InvalidTopologyException (backtype.storm.generated.InvalidTopologyException)7 AlreadyAliveException (backtype.storm.generated.AlreadyAliveException)4 StormTopology (backtype.storm.generated.StormTopology)4 IOException (java.io.IOException)4 HashMap (java.util.HashMap)4 Map (java.util.Map)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 KeyAlreadyExistsException (backtype.storm.generated.KeyAlreadyExistsException)3 KeyNotFoundException (backtype.storm.generated.KeyNotFoundException)3 NotAliveException (backtype.storm.generated.NotAliveException)3 TopologyAssignException (backtype.storm.generated.TopologyAssignException)3 StormClusterState (com.alibaba.jstorm.cluster.StormClusterState)3 FailedAssignTopologyException (com.alibaba.jstorm.utils.FailedAssignTopologyException)3 TimeCacheMap (com.alibaba.jstorm.utils.TimeCacheMap)3 FileNotFoundException (java.io.FileNotFoundException)3 InvalidParameterException (java.security.InvalidParameterException)3 TreeMap (java.util.TreeMap)3 TException (org.apache.thrift.TException)3 Config (backtype.storm.Config)1 ILocalCluster (backtype.storm.ILocalCluster)1