Search in sources :

Example 1 with DefaultWatcherCallBack

use of org.apache.storm.callback.DefaultWatcherCallBack in project storm by apache.

the class AdminCommands method initialize.

private static void initialize() {
    conf = ConfigUtils.readStormConfig();
    nimbusBlobStore = Utils.getNimbusBlobStore(conf, NimbusInfo.fromConf(conf));
    List<String> servers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
    Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
    List<ACL> acls = null;
    if (Utils.isZkAuthenticationConfiguredStormServer(conf)) {
        acls = adminZkAcls();
    }
    try {
        stormClusterState = ClusterUtils.mkStormClusterState(conf, acls, new ClusterStateContext(DaemonType.NIMBUS));
    } catch (Exception e) {
        LOG.error("admin can't create stormClusterState");
        new RuntimeException(e);
    }
    CuratorFramework zk = Zookeeper.mkClient(conf, servers, port, "", new DefaultWatcherCallBack(), conf);
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) DefaultWatcherCallBack(org.apache.storm.callback.DefaultWatcherCallBack) ACL(org.apache.zookeeper.data.ACL) ClusterStateContext(org.apache.storm.cluster.ClusterStateContext)

Example 2 with DefaultWatcherCallBack

use of org.apache.storm.callback.DefaultWatcherCallBack in project storm by apache.

the class AclEnforcement method verifyAcls.

/**
 * Verify the ZK ACLs are correct and optionally fix them if needed.
 * @param conf the cluster config.
 * @param fixUp true if we want to fix the ACLs else false.
 * @throws Exception on any error.
 */
public static void verifyAcls(Map<String, Object> conf, final boolean fixUp) throws Exception {
    if (!Utils.isZkAuthenticationConfiguredStormServer(conf)) {
        LOG.info("SECURITY IS DISABLED NO FURTHER CHECKS...");
        // There is no security so we are done.
        return;
    }
    ACL superUserAcl = Utils.getSuperUserAcl(conf);
    List<ACL> superAcl = new ArrayList<>(1);
    superAcl.add(superUserAcl);
    List<ACL> drpcFullAcl = new ArrayList<>(2);
    drpcFullAcl.add(superUserAcl);
    String drpcAclString = (String) conf.get(Config.STORM_ZOOKEEPER_DRPC_ACL);
    if (drpcAclString != null) {
        Id drpcAclId = Utils.parseZkId(drpcAclString, Config.STORM_ZOOKEEPER_DRPC_ACL);
        ACL drpcUserAcl = new ACL(ZooDefs.Perms.READ, drpcAclId);
        drpcFullAcl.add(drpcUserAcl);
    }
    List<String> zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
    int port = ObjectReader.getInt(conf.get(Config.STORM_ZOOKEEPER_PORT));
    String stormRoot = (String) conf.get(Config.STORM_ZOOKEEPER_ROOT);
    try (CuratorFramework zk = ClientZookeeper.mkClient(conf, zkServers, port, "", new DefaultWatcherCallBack(), conf, DaemonType.NIMBUS)) {
        if (zk.checkExists().forPath(stormRoot) != null) {
            // First off we want to verify that ROOT is good
            verifyAclStrict(zk, superAcl, stormRoot, fixUp);
        } else {
            LOG.warn("{} does not exist no need to check any more...", stormRoot);
            return;
        }
    }
    // Now that the root is fine we can start to look at the other paths under it.
    try (CuratorFramework zk = ClientZookeeper.mkClient(conf, zkServers, port, stormRoot, new DefaultWatcherCallBack(), conf, DaemonType.NIMBUS)) {
        // Next verify that the blob store is correct before we start it up.
        if (zk.checkExists().forPath(ClusterUtils.BLOBSTORE_SUBTREE) != null) {
            verifyAclStrictRecursive(zk, superAcl, ClusterUtils.BLOBSTORE_SUBTREE, fixUp);
        }
        if (zk.checkExists().forPath(ClusterUtils.BLOBSTORE_MAX_KEY_SEQUENCE_NUMBER_SUBTREE) != null) {
            verifyAclStrict(zk, superAcl, ClusterUtils.BLOBSTORE_MAX_KEY_SEQUENCE_NUMBER_SUBTREE, fixUp);
        }
        // The blobstore is good, now lets get the list of all topo Ids
        Set<String> topoIds = new HashSet<>();
        if (zk.checkExists().forPath(ClusterUtils.STORMS_SUBTREE) != null) {
            topoIds.addAll(zk.getChildren().forPath(ClusterUtils.STORMS_SUBTREE));
        }
        Map<String, Id> topoToZkCreds = new HashMap<>();
        // Now lets get the creds for the topos so we can verify those as well.
        BlobStore bs = ServerUtils.getNimbusBlobStore(conf, NimbusInfo.fromConf(conf), null);
        try {
            Subject nimbusSubject = new Subject();
            nimbusSubject.getPrincipals().add(new NimbusPrincipal());
            for (String topoId : topoIds) {
                try {
                    String blobKey = topoId + "-stormconf.ser";
                    Map<String, Object> topoConf = Utils.fromCompressedJsonConf(bs.readBlob(blobKey, nimbusSubject));
                    String payload = (String) topoConf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD);
                    try {
                        topoToZkCreds.put(topoId, new Id("digest", DigestAuthenticationProvider.generateDigest(payload)));
                    } catch (NoSuchAlgorithmException e) {
                        throw new RuntimeException(e);
                    }
                } catch (KeyNotFoundException knf) {
                    LOG.debug("topo removed {}", topoId, knf);
                }
            }
        } finally {
            if (bs != null) {
                bs.shutdown();
            }
        }
        verifyParentWithReadOnlyTopoChildren(zk, superUserAcl, ClusterUtils.STORMS_SUBTREE, topoToZkCreds, fixUp);
        verifyParentWithReadOnlyTopoChildren(zk, superUserAcl, ClusterUtils.ASSIGNMENTS_SUBTREE, topoToZkCreds, fixUp);
        // There is a race on credentials where they can be leaked in some versions of storm.
        verifyParentWithReadOnlyTopoChildrenDeleteDead(zk, superUserAcl, ClusterUtils.CREDENTIALS_SUBTREE, topoToZkCreds, fixUp);
        // There is a race on logconfig where they can be leaked in some versions of storm.
        verifyParentWithReadOnlyTopoChildrenDeleteDead(zk, superUserAcl, ClusterUtils.LOGCONFIG_SUBTREE, topoToZkCreds, fixUp);
        // There is a race on backpressure too...
        verifyParentWithReadWriteTopoChildrenDeleteDead(zk, superUserAcl, ClusterUtils.BACKPRESSURE_SUBTREE, topoToZkCreds, fixUp);
        if (zk.checkExists().forPath(ClusterUtils.ERRORS_SUBTREE) != null) {
            // because of this it means we need to auto create at least the topo-id directory for all running topos.
            for (String topoId : topoToZkCreds.keySet()) {
                String path = ClusterUtils.errorStormRoot(topoId);
                if (zk.checkExists().forPath(path) == null) {
                    LOG.warn("Creating missing errors location {}", path);
                    zk.create().withACL(getTopoReadWrite(path, topoId, topoToZkCreds, superUserAcl, fixUp)).forPath(path);
                }
            }
        }
        // Error should not be leaked according to the code, but they are not important enough to fail the build if
        // for some odd reason they are leaked.
        verifyParentWithReadWriteTopoChildrenDeleteDead(zk, superUserAcl, ClusterUtils.ERRORS_SUBTREE, topoToZkCreds, fixUp);
        if (zk.checkExists().forPath(ClusterUtils.SECRET_KEYS_SUBTREE) != null) {
            verifyAclStrict(zk, superAcl, ClusterUtils.SECRET_KEYS_SUBTREE, fixUp);
            verifyAclStrictRecursive(zk, superAcl, ClusterUtils.secretKeysPath(WorkerTokenServiceType.NIMBUS), fixUp);
            verifyAclStrictRecursive(zk, drpcFullAcl, ClusterUtils.secretKeysPath(WorkerTokenServiceType.DRPC), fixUp);
        }
        if (zk.checkExists().forPath(ClusterUtils.NIMBUSES_SUBTREE) != null) {
            verifyAclStrictRecursive(zk, superAcl, ClusterUtils.NIMBUSES_SUBTREE, fixUp);
        }
        if (zk.checkExists().forPath("/leader-lock") != null) {
            verifyAclStrictRecursive(zk, superAcl, "/leader-lock", fixUp);
        }
        if (zk.checkExists().forPath(ClusterUtils.PROFILERCONFIG_SUBTREE) != null) {
            verifyAclStrictRecursive(zk, superAcl, ClusterUtils.PROFILERCONFIG_SUBTREE, fixUp);
        }
        if (zk.checkExists().forPath(ClusterUtils.SUPERVISORS_SUBTREE) != null) {
            verifyAclStrictRecursive(zk, superAcl, ClusterUtils.SUPERVISORS_SUBTREE, fixUp);
        }
        // When moving to pacemaker workerbeats can be leaked too...
        verifyParentWithReadWriteTopoChildrenDeleteDead(zk, superUserAcl, ClusterUtils.WORKERBEATS_SUBTREE, topoToZkCreds, fixUp);
    }
}
Also used : HashMap(java.util.HashMap) DefaultWatcherCallBack(org.apache.storm.callback.DefaultWatcherCallBack) ArrayList(java.util.ArrayList) ACL(org.apache.storm.shade.org.apache.zookeeper.data.ACL) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) Subject(javax.security.auth.Subject) NimbusPrincipal(org.apache.storm.security.auth.NimbusPrincipal) CuratorFramework(org.apache.storm.shade.org.apache.curator.framework.CuratorFramework) ArrayList(java.util.ArrayList) List(java.util.List) Id(org.apache.storm.shade.org.apache.zookeeper.data.Id) BlobStore(org.apache.storm.blobstore.BlobStore) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) HashSet(java.util.HashSet)

Example 3 with DefaultWatcherCallBack

use of org.apache.storm.callback.DefaultWatcherCallBack in project storm by apache.

the class Nimbus method makeZKClient.

@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
private static CuratorFramework makeZKClient(Map<String, Object> conf) {
    List<String> servers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
    Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
    String root = (String) conf.get(Config.STORM_ZOOKEEPER_ROOT);
    CuratorFramework ret = null;
    if (servers != null && port != null) {
        ret = ClientZookeeper.mkClient(conf, servers, port, root, new DefaultWatcherCallBack(), conf, DaemonType.NIMBUS);
    }
    return ret;
}
Also used : CuratorFramework(org.apache.storm.shade.org.apache.curator.framework.CuratorFramework) DefaultWatcherCallBack(org.apache.storm.callback.DefaultWatcherCallBack) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

DefaultWatcherCallBack (org.apache.storm.callback.DefaultWatcherCallBack)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 CuratorFramework (org.apache.storm.shade.org.apache.curator.framework.CuratorFramework)2 NoSuchAlgorithmException (java.security.NoSuchAlgorithmException)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Subject (javax.security.auth.Subject)1 CuratorFramework (org.apache.curator.framework.CuratorFramework)1 BlobStore (org.apache.storm.blobstore.BlobStore)1 ClusterStateContext (org.apache.storm.cluster.ClusterStateContext)1 KeyNotFoundException (org.apache.storm.generated.KeyNotFoundException)1 NimbusPrincipal (org.apache.storm.security.auth.NimbusPrincipal)1 ACL (org.apache.storm.shade.org.apache.zookeeper.data.ACL)1 Id (org.apache.storm.shade.org.apache.zookeeper.data.Id)1 ACL (org.apache.zookeeper.data.ACL)1