Search in sources :

Example 1 with SaslDataTransferClient

use of org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputSaslHelper method createSaslAdaptor.

private static SaslAdaptor createSaslAdaptor() throws NoSuchFieldException, NoSuchMethodException {
    Field saslPropsResolverField = SaslDataTransferClient.class.getDeclaredField("saslPropsResolver");
    saslPropsResolverField.setAccessible(true);
    Field trustedChannelResolverField = SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver");
    trustedChannelResolverField.setAccessible(true);
    Field fallbackToSimpleAuthField = SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth");
    fallbackToSimpleAuthField.setAccessible(true);
    return new SaslAdaptor() {

        @Override
        public TrustedChannelResolver getTrustedChannelResolver(SaslDataTransferClient saslClient) {
            try {
                return (TrustedChannelResolver) trustedChannelResolverField.get(saslClient);
            } catch (IllegalAccessException e) {
                throw new RuntimeException(e);
            }
        }

        @Override
        public SaslPropertiesResolver getSaslPropsResolver(SaslDataTransferClient saslClient) {
            try {
                return (SaslPropertiesResolver) saslPropsResolverField.get(saslClient);
            } catch (IllegalAccessException e) {
                throw new RuntimeException(e);
            }
        }

        @Override
        public AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient) {
            try {
                return (AtomicBoolean) fallbackToSimpleAuthField.get(saslClient);
            } catch (IllegalAccessException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : Field(java.lang.reflect.Field) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TrustedChannelResolver(org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver) SaslPropertiesResolver(org.apache.hadoop.security.SaslPropertiesResolver) SaslDataTransferClient(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient)

Example 2 with SaslDataTransferClient

use of org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputSaslHelper method trySaslNegotiate.

static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Void> saslPromise) throws IOException {
    SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
    SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
    TrustedChannelResolver trustedChannelResolver = SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
    AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
    InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
    if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
        saslPromise.trySuccess(null);
        return;
    }
    DataEncryptionKey encryptionKey = client.newDataEncryptionKey();
    if (encryptionKey != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise);
    } else if (!UserGroupInformation.isSecurityEnabled()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    } else if (dnInfo.getXferPort() < 1024) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in secured configuration with " + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in secured configuration with " + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    } else if (saslPropsResolver != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise);
    } else {
        // edge case.
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in secured configuration with no SASL " + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    }
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DataEncryptionKey(org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey) InetSocketAddress(java.net.InetSocketAddress) TrustedChannelResolver(org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver) SaslPropertiesResolver(org.apache.hadoop.security.SaslPropertiesResolver) InetAddress(java.net.InetAddress) SaslDataTransferClient(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient)

Example 3 with SaslDataTransferClient

use of org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient in project hadoop by apache.

the class DataNode method startDataNode.

/**
   * This method starts the data node with the specified conf.
   * 
   * If conf's CONFIG_PROPERTY_SIMULATED property is set
   * then a simulated storage based data node is created.
   * 
   * @param dataDirectories - only for a non-simulated storage data node
   * @throws IOException
   */
void startDataNode(List<StorageLocation> dataDirectories, SecureResources resources) throws IOException {
    // settings global for all BPs in the Data Node
    this.secureResources = resources;
    synchronized (this) {
        this.dataDirs = dataDirectories;
    }
    this.dnConf = new DNConf(this);
    checkSecureConfig(dnConf, getConf(), resources);
    if (dnConf.maxLockedMemory > 0) {
        if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
            throw new RuntimeException(String.format("Cannot start datanode because the configured max locked memory" + " size (%s) is greater than zero and native code is not available.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
        }
        if (Path.WINDOWS) {
            NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
        } else {
            long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
            if (dnConf.maxLockedMemory > ulimit) {
                throw new RuntimeException(String.format("Cannot start datanode because the configured max locked memory" + " size (%s) of %d bytes is more than the datanode's available" + " RLIMIT_MEMLOCK ulimit of %d bytes.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, dnConf.maxLockedMemory, ulimit));
            }
        }
    }
    LOG.info("Starting DataNode with maxLockedMemory = " + dnConf.maxLockedMemory);
    int volFailuresTolerated = dnConf.getVolFailuresTolerated();
    int volsConfigured = dnConf.getVolsConfigured();
    if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
        throw new DiskErrorException("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated + ". Value configured is either less than 0 or >= " + "to the number of configured volumes (" + volsConfigured + ").");
    }
    storage = new DataStorage();
    // global DN settings
    registerMXBean();
    initDataXceiver();
    startInfoServer();
    pauseMonitor = new JvmPauseMonitor();
    pauseMonitor.init(getConf());
    pauseMonitor.start();
    // BlockPoolTokenSecretManager is required to create ipc server.
    this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
    // Login is done by now. Set the DN user name.
    dnUserName = UserGroupInformation.getCurrentUser().getUserName();
    LOG.info("dnUserName = " + dnUserName);
    LOG.info("supergroup = " + supergroup);
    initIpcServer();
    metrics = DataNodeMetrics.create(getConf(), getDisplayName());
    peerMetrics = dnConf.peerStatsEnabled ? DataNodePeerMetrics.create(getConf(), getDisplayName()) : null;
    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
    ecWorker = new ErasureCodingWorker(getConf(), this);
    blockRecoveryWorker = new BlockRecoveryWorker(this);
    blockPoolManager = new BlockPoolManager(this);
    blockPoolManager.refreshNamenodes(getConf());
    // Create the ReadaheadPool from the DataNode context so we can
    // exit without having to explicitly shutdown its thread pool.
    readaheadPool = ReadaheadPool.getInstance();
    saslClient = new SaslDataTransferClient(dnConf.getConf(), dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
    saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
    startMetricsLogger();
    if (dnConf.diskStatsEnabled) {
        diskMetrics = new DataNodeDiskMetrics(this, dnConf.outliersReportIntervalMs);
    }
}
Also used : DataNodeDiskMetrics(org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeDiskMetrics) ErasureCodingWorker(org.apache.hadoop.hdfs.server.datanode.erasurecode.ErasureCodingWorker) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) SaslDataTransferServer(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer) BlockPoolTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager) JvmPauseMonitor(org.apache.hadoop.util.JvmPauseMonitor) SaslDataTransferClient(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient)

Aggregations

SaslDataTransferClient (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient)3 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 TrustedChannelResolver (org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver)2 SaslPropertiesResolver (org.apache.hadoop.security.SaslPropertiesResolver)2 Field (java.lang.reflect.Field)1 InetAddress (java.net.InetAddress)1 InetSocketAddress (java.net.InetSocketAddress)1 SaslDataTransferServer (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer)1 BlockPoolTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager)1 DataEncryptionKey (org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey)1 ErasureCodingWorker (org.apache.hadoop.hdfs.server.datanode.erasurecode.ErasureCodingWorker)1 DataNodeDiskMetrics (org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeDiskMetrics)1 DiskErrorException (org.apache.hadoop.util.DiskChecker.DiskErrorException)1 JvmPauseMonitor (org.apache.hadoop.util.JvmPauseMonitor)1