Search in sources :

Example 1 with PrivilegedAction

use of java.security.PrivilegedAction in project elasticsearch by elastic.

the class LeafDocLookup method get.

@Override
public ScriptDocValues<?> get(Object key) {
    // assume its a string...
    String fieldName = key.toString();
    ScriptDocValues<?> scriptValues = localCacheFieldData.get(fieldName);
    if (scriptValues == null) {
        final MappedFieldType fieldType = mapperService.fullName(fieldName);
        if (fieldType == null) {
            throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + "");
        }
        // load fielddata on behalf of the script: otherwise it would need additional permissions
        // to deal with pagedbytes/ramusagestimator/etc
        scriptValues = AccessController.doPrivileged(new PrivilegedAction<ScriptDocValues<?>>() {

            @Override
            public ScriptDocValues<?> run() {
                return fieldDataService.getForField(fieldType).load(reader).getScriptValues();
            }
        });
        localCacheFieldData.put(fieldName, scriptValues);
    }
    scriptValues.setNextDocId(docId);
    return scriptValues;
}
Also used : PrivilegedAction(java.security.PrivilegedAction) MappedFieldType(org.elasticsearch.index.mapper.MappedFieldType)

Example 2 with PrivilegedAction

use of java.security.PrivilegedAction in project weave by continuuity.

the class AMRMClientImpl method start.

@Override
public synchronized void start() {
    final YarnConfiguration conf = new YarnConfiguration(getConfig());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
    UserGroupInformation currentUser;
    try {
        currentUser = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new YarnException(e);
    }
    if (UserGroupInformation.isSecurityEnabled()) {
        String tokenURLEncodedStr = System.getenv().get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
        Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();
        try {
            token.decodeFromUrlString(tokenURLEncodedStr);
        } catch (IOException e) {
            throw new YarnException(e);
        }
        SecurityUtil.setTokenService(token, rmAddress);
        if (LOG.isDebugEnabled()) {
            LOG.debug("AppMasterToken is " + token);
        }
        currentUser.addToken(token);
    }
    rmClient = currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {

        @Override
        public AMRMProtocol run() {
            return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf);
        }
    });
    LOG.debug("Connecting to ResourceManager at " + rmAddress);
    super.start();
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) PrivilegedAction(java.security.PrivilegedAction) InetSocketAddress(java.net.InetSocketAddress) Token(org.apache.hadoop.security.token.Token) AMRMProtocol(org.apache.hadoop.yarn.api.AMRMProtocol) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) IOException(java.io.IOException) YarnException(org.apache.hadoop.yarn.YarnException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 3 with PrivilegedAction

use of java.security.PrivilegedAction in project hbase by apache.

the class SecureBulkLoadManager method secureBulkLoadHFiles.

public Map<byte[], List<Path>> secureBulkLoadHFiles(final Region region, final BulkLoadHFileRequest request) throws IOException {
    final List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
    for (ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
        familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
    }
    Token userToken = null;
    if (userProvider.isHadoopSecurityEnabled()) {
        userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService()));
    }
    final String bulkToken = request.getBulkToken();
    User user = getActiveUser();
    final UserGroupInformation ugi = user.getUGI();
    if (userProvider.isHadoopSecurityEnabled()) {
        try {
            Token tok = TokenUtil.obtainToken(conn);
            if (tok != null) {
                boolean b = ugi.addToken(tok);
                LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
            }
        } catch (IOException ioe) {
            LOG.warn("unable to add token", ioe);
        }
    }
    if (userToken != null) {
        ugi.addToken(userToken);
    } else if (userProvider.isHadoopSecurityEnabled()) {
        //for mini cluster testing
        throw new DoNotRetryIOException("User token cannot be null");
    }
    boolean bypass = false;
    if (region.getCoprocessorHost() != null) {
        bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
    }
    boolean loaded = false;
    Map<byte[], List<Path>> map = null;
    try {
        if (!bypass) {
            // ('request user'), another for the target fs (HBase region server principal).
            if (userProvider.isHadoopSecurityEnabled()) {
                FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
                targetfsDelegationToken.acquireDelegationToken(fs);
                Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
                if (targetFsToken != null && (userToken == null || !targetFsToken.getService().equals(userToken.getService()))) {
                    ugi.addToken(targetFsToken);
                }
            }
            map = ugi.doAs(new PrivilegedAction<Map<byte[], List<Path>>>() {

                @Override
                public Map<byte[], List<Path>> run() {
                    FileSystem fs = null;
                    try {
                        fs = FileSystem.get(conf);
                        for (Pair<byte[], String> el : familyPaths) {
                            Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
                            if (!fs.exists(stageFamily)) {
                                fs.mkdirs(stageFamily);
                                fs.setPermission(stageFamily, PERM_ALL_ACCESS);
                            }
                        }
                        //To enable access prior to staging
                        return region.bulkLoadHFiles(familyPaths, true, new SecureBulkLoadListener(fs, bulkToken, conf), request.getCopyFile());
                    } catch (Exception e) {
                        LOG.error("Failed to complete bulk load", e);
                    }
                    return null;
                }
            });
            if (map != null) {
                loaded = true;
            }
        }
    } finally {
        if (region.getCoprocessorHost() != null) {
            region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map, loaded);
        }
    }
    return map;
}
Also used : Path(org.apache.hadoop.fs.Path) User(org.apache.hadoop.hbase.security.User) BulkLoadHFileRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) Text(org.apache.hadoop.io.Text) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) PrivilegedAction(java.security.PrivilegedAction) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 4 with PrivilegedAction

use of java.security.PrivilegedAction in project hadoop by apache.

the class TestAMRMClient method testAMRMClientOnAMRMTokenRollOver.

@Test(timeout = 60000)
public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException {
    AMRMClient<ContainerRequest> amClient = null;
    try {
        AMRMTokenSecretManager amrmTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getAMRMTokenSecretManager();
        // start am rm client
        amClient = AMRMClient.<ContainerRequest>createAMRMClient();
        amClient.init(conf);
        amClient.start();
        Long startTime = System.currentTimeMillis();
        amClient.registerApplicationMaster("Host", 10000, "");
        org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken_1 = getAMRMToken();
        Assert.assertNotNull(amrmToken_1);
        Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
        // At mean time, the old AMRMToken should continue to work
        while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
            amClient.allocate(0.1f);
            sleep(1000);
        }
        amClient.allocate(0.1f);
        org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken_2 = getAMRMToken();
        Assert.assertNotNull(amrmToken_2);
        Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
        Assert.assertNotEquals(amrmToken_1, amrmToken_2);
        // can do the allocate call with latest AMRMToken
        AllocateResponse response = amClient.allocate(0.1f);
        // Verify latest AMRMToken can be used to send allocation request.
        UserGroupInformation testUser1 = UserGroupInformation.createRemoteUser("testUser1");
        AMRMTokenIdentifierForTest newVersionTokenIdentifier = new AMRMTokenIdentifierForTest(amrmToken_2.decodeIdentifier(), "message");
        Assert.assertEquals("Message is changed after set to newVersionTokenIdentifier", "message", newVersionTokenIdentifier.getMessage());
        org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newVersionToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(newVersionTokenIdentifier.getBytes(), amrmTokenSecretManager.retrievePassword(newVersionTokenIdentifier), newVersionTokenIdentifier.getKind(), new Text());
        SecurityUtil.setTokenService(newVersionToken, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress());
        testUser1.addToken(newVersionToken);
        AllocateRequest request = Records.newRecord(AllocateRequest.class);
        request.setResponseId(response.getResponseId());
        testUser1.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {

            @Override
            public ApplicationMasterProtocol run() {
                return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(), conf);
            }
        }).allocate(request);
        // and can not use this rolled-over token to make a allocate all.
        while (true) {
            if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getCurrnetMasterKeyData().getMasterKey().getKeyId()) {
                if (amrmTokenSecretManager.getNextMasterKeyData() == null) {
                    break;
                } else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey().getKeyId()) {
                    break;
                }
            }
            amClient.allocate(0.1f);
            sleep(1000);
        }
        try {
            UserGroupInformation testUser2 = UserGroupInformation.createRemoteUser("testUser2");
            SecurityUtil.setTokenService(amrmToken_2, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress());
            testUser2.addToken(amrmToken_2);
            testUser2.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {

                @Override
                public ApplicationMasterProtocol run() {
                    return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(), conf);
                }
            }).allocate(Records.newRecord(AllocateRequest.class));
            Assert.fail("The old Token should not work");
        } catch (Exception ex) {
            Assert.assertTrue(ex instanceof InvalidToken);
            Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId()));
        }
        amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
    } finally {
        if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
            amClient.stop();
        }
    }
}
Also used : AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Text(org.apache.hadoop.io.Text) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) AMRMTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) InvalidContainerRequestException(org.apache.hadoop.yarn.client.api.InvalidContainerRequestException) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) PrivilegedAction(java.security.PrivilegedAction) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 5 with PrivilegedAction

use of java.security.PrivilegedAction in project hadoop by apache.

the class TestAMRMClientOnRMRestart method testAMRMClientOnAMRMTokenRollOverOnRMRestart.

// Test verify for AM issued with rolled-over AMRMToken
// is still able to communicate with restarted RM.
@Test(timeout = 30000)
public void testAMRMClientOnAMRMTokenRollOverOnRMRestart() throws Exception {
    conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, rolling_interval_sec);
    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
    MemoryRMStateStore memStore = new MemoryRMStateStore();
    memStore.init(conf);
    // start first RM
    MyResourceManager2 rm1 = new MyResourceManager2(conf, memStore);
    rm1.start();
    DrainDispatcher dispatcher = (DrainDispatcher) rm1.getRMContext().getDispatcher();
    Long startTime = System.currentTimeMillis();
    // Submit the application
    RMApp app = rm1.submitApp(1024);
    dispatcher.await();
    MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
    nm1.registerNode();
    // Node heartbeat
    nm1.nodeHeartbeat(true);
    dispatcher.await();
    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
    rm1.sendAMLaunched(appAttemptId);
    dispatcher.await();
    AMRMTokenSecretManager amrmTokenSecretManagerForRM1 = rm1.getRMContext().getAMRMTokenSecretManager();
    org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token = amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId);
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    ugi.addTokenIdentifier(token.decodeIdentifier());
    AMRMClient<ContainerRequest> amClient = new MyAMRMClientImpl(rm1);
    amClient.init(conf);
    amClient.start();
    amClient.registerApplicationMaster("h1", 10000, "");
    amClient.allocate(0.1f);
    // At mean time, the old AMRMToken should continue to work
    while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
        amClient.allocate(0.1f);
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // DO NOTHING
        }
    }
    Assert.assertTrue(amrmTokenSecretManagerForRM1.getMasterKey().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId());
    amClient.allocate(0.1f);
    // active the nextMasterKey, and replace the currentMasterKey
    org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newToken = amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId);
    int waitCount = 0;
    while (waitCount++ <= 50) {
        if (amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId()) {
            break;
        }
        try {
            amClient.allocate(0.1f);
        } catch (Exception ex) {
            break;
        }
        Thread.sleep(500);
    }
    Assert.assertTrue(amrmTokenSecretManagerForRM1.getNextMasterKeyData() == null);
    Assert.assertTrue(amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId());
    // start 2nd RM
    conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, "0.0.0.0:" + ServerSocketUtil.getPort(45020, 10));
    final MyResourceManager2 rm2 = new MyResourceManager2(conf, memStore);
    rm2.start();
    nm1.setResourceTrackerService(rm2.getResourceTrackerService());
    ((MyAMRMClientImpl) amClient).updateRMProxy(rm2);
    dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher();
    AMRMTokenSecretManager amrmTokenSecretManagerForRM2 = rm2.getRMContext().getAMRMTokenSecretManager();
    Assert.assertTrue(amrmTokenSecretManagerForRM2.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId());
    Assert.assertTrue(amrmTokenSecretManagerForRM2.getNextMasterKeyData() == null);
    try {
        UserGroupInformation testUser = UserGroupInformation.createRemoteUser("testUser");
        SecurityUtil.setTokenService(token, rm2.getApplicationMasterService().getBindAddress());
        testUser.addToken(token);
        testUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {

            @Override
            public ApplicationMasterProtocol run() {
                return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class, rm2.getApplicationMasterService().getBindAddress(), conf);
            }
        }).allocate(Records.newRecord(AllocateRequest.class));
        Assert.fail("The old Token should not work");
    } catch (Exception ex) {
        Assert.assertTrue(ex instanceof InvalidToken);
        Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + token.decodeIdentifier().getApplicationAttemptId()));
    }
    // make sure the recovered AMRMToken works for new RM
    amClient.allocate(0.1f);
    amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
    amClient.stop();
    rm1.stop();
    rm2.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) AMRMTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager) IOException(java.io.IOException) MemoryRMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) PrivilegedAction(java.security.PrivilegedAction) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) UpdateContainerRequest(org.apache.hadoop.yarn.api.records.UpdateContainerRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

PrivilegedAction (java.security.PrivilegedAction)358 IOException (java.io.IOException)85 Subject (javax.security.auth.Subject)61 AccessControlContext (java.security.AccessControlContext)31 File (java.io.File)29 HashMap (java.util.HashMap)29 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)29 Method (java.lang.reflect.Method)23 ArrayList (java.util.ArrayList)23 ClientResponse (com.sun.jersey.api.client.ClientResponse)21 InputStream (java.io.InputStream)21 URL (java.net.URL)21 FileNotFoundException (java.io.FileNotFoundException)18 UnsupportedEncodingException (java.io.UnsupportedEncodingException)18 Iterator (java.util.Iterator)18 MalformedURLException (java.net.MalformedURLException)17 List (java.util.List)17 UnknownHostException (java.net.UnknownHostException)16 Principal (java.security.Principal)15 PrivilegedActionException (java.security.PrivilegedActionException)15