Search in sources :

Example 36 with IStormClusterState

use of org.apache.storm.cluster.IStormClusterState in project storm by apache.

the class Nimbus method renewCredentials.

private void renewCredentials() throws Exception {
    if (!isLeader()) {
        LOG.info("not a leader, skipping credential renewal.");
        return;
    }
    IStormClusterState state = stormClusterState;
    Collection<ICredentialsRenewer> renewers = credRenewers;
    Map<String, StormBase> assignedBases = state.topologyBases();
    if (assignedBases != null) {
        for (Entry<String, StormBase> entry : assignedBases.entrySet()) {
            String id = entry.getKey();
            String ownerPrincipal = entry.getValue().get_principal();
            Map<String, Object> topoConf = Collections.unmodifiableMap(Utils.merge(conf, tryReadTopoConf(id, topoCache)));
            synchronized (credUpdateLock) {
                Credentials origCreds = state.credentials(id, null);
                if (origCreds != null) {
                    Map<String, String> origCredsMap = origCreds.get_creds();
                    Map<String, String> newCredsMap = new HashMap<>(origCredsMap);
                    for (ICredentialsRenewer renewer : renewers) {
                        LOG.info("Renewing Creds For {} with {} owned by {}", id, renewer, ownerPrincipal);
                        renewer.renew(newCredsMap, topoConf, ownerPrincipal);
                    }
                    // Update worker tokens if needed
                    upsertWorkerTokensInCreds(newCredsMap, ownerPrincipal, id);
                    if (!newCredsMap.equals(origCredsMap)) {
                        state.setCredentials(id, new Credentials(newCredsMap), topoConf);
                    }
                }
            }
        }
    }
}
Also used : ICredentialsRenewer(org.apache.storm.security.auth.ICredentialsRenewer) HashMap(java.util.HashMap) StormBase(org.apache.storm.generated.StormBase) IStormClusterState(org.apache.storm.cluster.IStormClusterState) Credentials(org.apache.storm.generated.Credentials)

Example 37 with IStormClusterState

use of org.apache.storm.cluster.IStormClusterState in project storm by apache.

the class Nimbus method getTopologyHistory.

@SuppressWarnings("unchecked")
@Override
public TopologyHistoryInfo getTopologyHistory(String user) throws AuthorizationException, TException {
    try {
        List<String> adminUsers = (List<String>) conf.getOrDefault(Config.NIMBUS_ADMINS, Collections.emptyList());
        List<String> adminGroups = (List<String>) conf.getOrDefault(Config.NIMBUS_ADMINS_GROUPS, Collections.emptyList());
        IStormClusterState state = stormClusterState;
        List<String> assignedIds = state.assignments(null);
        Set<String> ret = new HashSet<>();
        boolean isAdmin = adminUsers.contains(user);
        for (String topoId : assignedIds) {
            Map<String, Object> topoConf = tryReadTopoConf(topoId, topoCache);
            topoConf = Utils.merge(conf, topoConf);
            List<String> groups = ServerConfigUtils.getTopoLogsGroups(topoConf);
            List<String> topoLogUsers = ServerConfigUtils.getTopoLogsUsers(topoConf);
            if (user == null || isAdmin || isUserPartOf(user, groups) || isUserPartOf(user, adminGroups) || topoLogUsers.contains(user)) {
                ret.add(topoId);
            }
        }
        ret.addAll(readTopologyHistory(user, adminUsers));
        return new TopologyHistoryInfo(new ArrayList<>(ret));
    } catch (Exception e) {
        LOG.warn("Get topology history. (user='{}')", user, e);
        if (e instanceof TException) {
            throw (TException) e;
        }
        throw new RuntimeException(e);
    }
}
Also used : TException(org.apache.storm.thrift.TException) WrappedAuthorizationException(org.apache.storm.utils.WrappedAuthorizationException) IOException(java.io.IOException) IllegalStateException(org.apache.storm.generated.IllegalStateException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) WrappedInvalidTopologyException(org.apache.storm.utils.WrappedInvalidTopologyException) AuthorizationException(org.apache.storm.generated.AuthorizationException) NotAliveException(org.apache.storm.generated.NotAliveException) WrappedAlreadyAliveException(org.apache.storm.utils.WrappedAlreadyAliveException) InterruptedIOException(java.io.InterruptedIOException) KeyAlreadyExistsException(org.apache.storm.generated.KeyAlreadyExistsException) TException(org.apache.storm.thrift.TException) WrappedIllegalStateException(org.apache.storm.utils.WrappedIllegalStateException) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) BindException(java.net.BindException) TopologyHistoryInfo(org.apache.storm.generated.TopologyHistoryInfo) ArrayList(java.util.ArrayList) List(java.util.List) IStormClusterState(org.apache.storm.cluster.IStormClusterState) HashSet(java.util.HashSet)

Example 38 with IStormClusterState

use of org.apache.storm.cluster.IStormClusterState in project storm by apache.

the class Nimbus method startTopology.

private void startTopology(String topoName, String topoId, TopologyStatus initStatus, String owner, String principal, Map<String, Object> topoConf, StormTopology stormTopology) throws InvalidTopologyException {
    assert (TopologyStatus.ACTIVE == initStatus || TopologyStatus.INACTIVE == initStatus);
    Map<String, Integer> numExecutors = new HashMap<>();
    StormTopology topology = StormCommon.systemTopology(topoConf, stormTopology);
    for (Entry<String, Object> entry : StormCommon.allComponents(topology).entrySet()) {
        numExecutors.put(entry.getKey(), StormCommon.numStartExecutors(entry.getValue()));
    }
    LOG.info("Activating {}: {}", topoName, topoId);
    StormBase base = new StormBase();
    base.set_name(topoName);
    if (topoConf.containsKey(Config.TOPOLOGY_VERSION)) {
        base.set_topology_version(ObjectReader.getString(topoConf.get(Config.TOPOLOGY_VERSION)));
    }
    base.set_launch_time_secs(Time.currentTimeSecs());
    base.set_status(initStatus);
    base.set_num_workers(ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 0));
    base.set_component_executors(numExecutors);
    base.set_owner(owner);
    base.set_principal(principal);
    base.set_component_debug(new HashMap<>());
    IStormClusterState state = stormClusterState;
    state.activateStorm(topoId, base, topoConf);
    idToExecutors.getAndUpdate(new Assoc<>(topoId, new HashSet<>(computeExecutors(base, topoConf, stormTopology))));
    notifyTopologyActionListener(topoName, "activate");
}
Also used : HashMap(java.util.HashMap) StormTopology(org.apache.storm.generated.StormTopology) StormBase(org.apache.storm.generated.StormBase) IStormClusterState(org.apache.storm.cluster.IStormClusterState) HashSet(java.util.HashSet)

Example 39 with IStormClusterState

use of org.apache.storm.cluster.IStormClusterState in project storm by apache.

the class Nimbus method getOwnerResourceSummaries.

@Override
public List<OwnerResourceSummary> getOwnerResourceSummaries(String owner) throws AuthorizationException, TException {
    try {
        getOwnerResourceSummariesCalls.mark();
        checkAuthorization(null, null, "getOwnerResourceSummaries");
        IStormClusterState state = stormClusterState;
        Map<String, Assignment> topoIdToAssignments = state.assignmentsInfo();
        Map<String, StormBase> topoIdToBases = state.topologyBases();
        Map<String, Number> clusterSchedulerConfig = scheduler.config();
        // put [owner-> StormBase-list] mapping to ownerToBasesMap
        // if this owner (the input parameter) is null, add all the owners with stormbase and guarantees
        // else, add only this owner (the input paramter) to the map
        Map<String, List<StormBase>> ownerToBasesMap = new HashMap<>();
        if (owner == null) {
            // add all the owners to the map
            for (StormBase base : topoIdToBases.values()) {
                String baseOwner = base.get_owner();
                if (!ownerToBasesMap.containsKey(baseOwner)) {
                    List<StormBase> stormbases = new ArrayList<>();
                    stormbases.add(base);
                    ownerToBasesMap.put(baseOwner, stormbases);
                } else {
                    ownerToBasesMap.get(baseOwner).add(base);
                }
            }
            // in addition, add all the owners with guarantees
            List<String> ownersWithGuarantees = new ArrayList<>(clusterSchedulerConfig.keySet());
            for (String ownerWithGuarantees : ownersWithGuarantees) {
                if (!ownerToBasesMap.containsKey(ownerWithGuarantees)) {
                    ownerToBasesMap.put(ownerWithGuarantees, new ArrayList<>());
                }
            }
        } else {
            // only put this owner to the map
            List<StormBase> stormbases = new ArrayList<>();
            for (StormBase base : topoIdToBases.values()) {
                if (owner.equals(base.get_owner())) {
                    stormbases.add(base);
                }
            }
            ownerToBasesMap.put(owner, stormbases);
        }
        List<OwnerResourceSummary> ret = new ArrayList<>();
        // for each owner, get resources, configs, and aggregate
        for (Entry<String, List<StormBase>> ownerToBasesEntry : ownerToBasesMap.entrySet()) {
            String theOwner = ownerToBasesEntry.getKey();
            TopologyResources totalResourcesAggregate = new TopologyResources();
            int totalExecutors = 0;
            int totalWorkers = 0;
            int totalTasks = 0;
            for (StormBase base : ownerToBasesEntry.getValue()) {
                try {
                    String topoId = toTopoId(base.get_name());
                    TopologyResources resources = getResourcesForTopology(topoId, base);
                    totalResourcesAggregate = totalResourcesAggregate.add(resources);
                    Assignment ownerAssignment = topoIdToAssignments.get(topoId);
                    if (ownerAssignment != null && ownerAssignment.get_executor_node_port() != null) {
                        totalExecutors += ownerAssignment.get_executor_node_port().keySet().size();
                        totalWorkers += new HashSet(ownerAssignment.get_executor_node_port().values()).size();
                        for (List<Long> executorId : ownerAssignment.get_executor_node_port().keySet()) {
                            totalTasks += StormCommon.executorIdToTasks(executorId).size();
                        }
                    }
                } catch (NotAliveException e) {
                    LOG.warn("{} is not alive.", base.get_name());
                }
            }
            double requestedTotalMemory = totalResourcesAggregate.getRequestedMemOnHeap() + totalResourcesAggregate.getRequestedMemOffHeap();
            double assignedTotalMemory = totalResourcesAggregate.getAssignedMemOnHeap() + totalResourcesAggregate.getAssignedMemOffHeap();
            OwnerResourceSummary ownerResourceSummary = new OwnerResourceSummary(theOwner);
            ownerResourceSummary.set_total_topologies(ownerToBasesEntry.getValue().size());
            ownerResourceSummary.set_total_executors(totalExecutors);
            ownerResourceSummary.set_total_workers(totalWorkers);
            ownerResourceSummary.set_total_tasks(totalTasks);
            ownerResourceSummary.set_memory_usage(assignedTotalMemory);
            ownerResourceSummary.set_cpu_usage(totalResourcesAggregate.getAssignedCpu());
            ownerResourceSummary.set_requested_on_heap_memory(totalResourcesAggregate.getRequestedMemOnHeap());
            ownerResourceSummary.set_requested_off_heap_memory(totalResourcesAggregate.getRequestedMemOffHeap());
            ownerResourceSummary.set_requested_total_memory(requestedTotalMemory);
            ownerResourceSummary.set_requested_cpu(totalResourcesAggregate.getRequestedCpu());
            ownerResourceSummary.set_assigned_on_heap_memory(totalResourcesAggregate.getAssignedMemOnHeap());
            ownerResourceSummary.set_assigned_off_heap_memory(totalResourcesAggregate.getAssignedMemOffHeap());
            if (clusterSchedulerConfig.containsKey(theOwner)) {
                if (underlyingScheduler instanceof ResourceAwareScheduler) {
                    Map<String, Object> schedulerConfig = (Map) clusterSchedulerConfig.get(theOwner);
                    if (schedulerConfig != null) {
                        ownerResourceSummary.set_memory_guarantee((double) schedulerConfig.getOrDefault("memory", 0));
                        ownerResourceSummary.set_cpu_guarantee((double) schedulerConfig.getOrDefault("cpu", 0));
                        ownerResourceSummary.set_memory_guarantee_remaining(ownerResourceSummary.get_memory_guarantee() - ownerResourceSummary.get_memory_usage());
                        ownerResourceSummary.set_cpu_guarantee_remaining(ownerResourceSummary.get_cpu_guarantee() - ownerResourceSummary.get_cpu_usage());
                    }
                } else if (underlyingScheduler instanceof MultitenantScheduler) {
                    ownerResourceSummary.set_isolated_node_guarantee((int) clusterSchedulerConfig.getOrDefault(theOwner, 0));
                }
            }
            LOG.debug("{}", ownerResourceSummary.toString());
            ret.add(ownerResourceSummary);
        }
        return ret;
    } catch (Exception e) {
        LOG.warn("Get owner resource summaries exception. (owner = '{}')", owner);
        if (e instanceof TException) {
            throw (TException) e;
        }
        throw new RuntimeException(e);
    }
}
Also used : TException(org.apache.storm.thrift.TException) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StormBase(org.apache.storm.generated.StormBase) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) NotAliveException(org.apache.storm.generated.NotAliveException) KeySequenceNumber(org.apache.storm.blobstore.KeySequenceNumber) ArrayList(java.util.ArrayList) List(java.util.List) OwnerResourceSummary(org.apache.storm.generated.OwnerResourceSummary) IStormClusterState(org.apache.storm.cluster.IStormClusterState) HashSet(java.util.HashSet) WorkerMetricPoint(org.apache.storm.generated.WorkerMetricPoint) DataPoint(org.apache.storm.metric.api.DataPoint) WrappedAuthorizationException(org.apache.storm.utils.WrappedAuthorizationException) IOException(java.io.IOException) IllegalStateException(org.apache.storm.generated.IllegalStateException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) WrappedInvalidTopologyException(org.apache.storm.utils.WrappedInvalidTopologyException) AuthorizationException(org.apache.storm.generated.AuthorizationException) NotAliveException(org.apache.storm.generated.NotAliveException) WrappedAlreadyAliveException(org.apache.storm.utils.WrappedAlreadyAliveException) InterruptedIOException(java.io.InterruptedIOException) KeyAlreadyExistsException(org.apache.storm.generated.KeyAlreadyExistsException) TException(org.apache.storm.thrift.TException) WrappedIllegalStateException(org.apache.storm.utils.WrappedIllegalStateException) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) BindException(java.net.BindException) MultitenantScheduler(org.apache.storm.scheduler.multitenant.MultitenantScheduler) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) NavigableMap(java.util.NavigableMap) RotatingMap(org.apache.storm.utils.RotatingMap) ImmutableMap(org.apache.storm.shade.com.google.common.collect.ImmutableMap) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) HashMap(java.util.HashMap)

Example 40 with IStormClusterState

use of org.apache.storm.cluster.IStormClusterState in project storm by apache.

the class Nimbus method getTopologySummaryImpl.

private TopologySummary getTopologySummaryImpl(String topoId, StormBase base) throws IOException, TException {
    IStormClusterState state = stormClusterState;
    Assignment assignment = state.assignmentInfo(topoId, null);
    int numTasks = 0;
    int numExecutors = 0;
    int numWorkers = 0;
    if (assignment != null && assignment.is_set_executor_node_port()) {
        for (List<Long> ids : assignment.get_executor_node_port().keySet()) {
            numTasks += StormCommon.executorIdToTasks(ids).size();
        }
        numExecutors = assignment.get_executor_node_port_size();
        numWorkers = new HashSet<>(assignment.get_executor_node_port().values()).size();
    }
    TopologySummary summary = new TopologySummary(topoId, base.get_name(), numTasks, numExecutors, numWorkers, Time.deltaSecs(base.get_launch_time_secs()), extractStatusStr(base));
    try {
        StormTopology topo = tryReadTopology(topoId, topoCache);
        if (topo != null && topo.is_set_storm_version()) {
            summary.set_storm_version(topo.get_storm_version());
        }
    } catch (NotAliveException e) {
    // Ignored it is not set
    }
    if (base.is_set_owner()) {
        summary.set_owner(base.get_owner());
    }
    if (base.is_set_topology_version()) {
        summary.set_topology_version(base.get_topology_version());
    }
    String status = idToSchedStatus.get().get(topoId);
    if (status != null) {
        summary.set_sched_status(status);
    }
    TopologyResources resources = getResourcesForTopology(topoId, base);
    if (resources != null) {
        summary.set_requested_memonheap(resources.getRequestedMemOnHeap());
        summary.set_requested_memoffheap(resources.getRequestedMemOffHeap());
        summary.set_requested_cpu(resources.getRequestedCpu());
        summary.set_requested_generic_resources(resources.getRequestedGenericResources());
        summary.set_assigned_memonheap(resources.getAssignedMemOnHeap());
        summary.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
        summary.set_assigned_cpu(resources.getAssignedCpu());
        summary.set_assigned_generic_resources(resources.getAssignedGenericResources());
    }
    try {
        summary.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
    } catch (Exception e) {
        // This could fail if a blob gets deleted by mistake.  Don't crash nimbus.
        LOG.error("Unable to find blob entry", e);
    }
    return summary;
}
Also used : Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) NotAliveException(org.apache.storm.generated.NotAliveException) StormTopology(org.apache.storm.generated.StormTopology) AtomicLong(java.util.concurrent.atomic.AtomicLong) TopologySummary(org.apache.storm.generated.TopologySummary) IStormClusterState(org.apache.storm.cluster.IStormClusterState) WorkerMetricPoint(org.apache.storm.generated.WorkerMetricPoint) DataPoint(org.apache.storm.metric.api.DataPoint) WrappedAuthorizationException(org.apache.storm.utils.WrappedAuthorizationException) IOException(java.io.IOException) IllegalStateException(org.apache.storm.generated.IllegalStateException) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) WrappedNotAliveException(org.apache.storm.utils.WrappedNotAliveException) WrappedInvalidTopologyException(org.apache.storm.utils.WrappedInvalidTopologyException) AuthorizationException(org.apache.storm.generated.AuthorizationException) NotAliveException(org.apache.storm.generated.NotAliveException) WrappedAlreadyAliveException(org.apache.storm.utils.WrappedAlreadyAliveException) InterruptedIOException(java.io.InterruptedIOException) KeyAlreadyExistsException(org.apache.storm.generated.KeyAlreadyExistsException) TException(org.apache.storm.thrift.TException) WrappedIllegalStateException(org.apache.storm.utils.WrappedIllegalStateException) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) BindException(java.net.BindException) HashSet(java.util.HashSet)

Aggregations

IStormClusterState (org.apache.storm.cluster.IStormClusterState)49 KeyNotFoundException (org.apache.storm.generated.KeyNotFoundException)24 IOException (java.io.IOException)23 AuthorizationException (org.apache.storm.generated.AuthorizationException)21 KeyAlreadyExistsException (org.apache.storm.generated.KeyAlreadyExistsException)21 NotAliveException (org.apache.storm.generated.NotAliveException)21 InterruptedIOException (java.io.InterruptedIOException)20 BindException (java.net.BindException)20 AlreadyAliveException (org.apache.storm.generated.AlreadyAliveException)20 InvalidTopologyException (org.apache.storm.generated.InvalidTopologyException)20 WrappedNotAliveException (org.apache.storm.utils.WrappedNotAliveException)20 HashMap (java.util.HashMap)19 TException (org.apache.storm.thrift.TException)19 IllegalStateException (org.apache.storm.generated.IllegalStateException)18 WrappedAlreadyAliveException (org.apache.storm.utils.WrappedAlreadyAliveException)18 WrappedAuthorizationException (org.apache.storm.utils.WrappedAuthorizationException)18 WrappedIllegalStateException (org.apache.storm.utils.WrappedIllegalStateException)18 WrappedInvalidTopologyException (org.apache.storm.utils.WrappedInvalidTopologyException)18 ArrayList (java.util.ArrayList)12 HashSet (java.util.HashSet)10