use of org.apache.hadoop.security.authorize.AuthorizationException in project hadoop by apache.
the class DelegationTokenAuthenticationFilter method doFilter.
@Override
protected void doFilter(FilterChain filterChain, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
boolean requestCompleted = false;
UserGroupInformation ugi = null;
AuthenticationToken authToken = (AuthenticationToken) request.getUserPrincipal();
if (authToken != null && authToken != AuthenticationToken.ANONYMOUS) {
// if the request was authenticated because of a delegation token,
// then we ignore proxyuser (this is the same as the RPC behavior).
ugi = (UserGroupInformation) request.getAttribute(DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE);
if (ugi == null) {
String realUser = request.getUserPrincipal().getName();
ugi = UserGroupInformation.createRemoteUser(realUser, handlerAuthMethod);
String doAsUser = getDoAs(request);
if (doAsUser != null) {
ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
try {
ProxyUsers.authorize(ugi, request.getRemoteAddr());
} catch (AuthorizationException ex) {
HttpExceptionUtils.createServletExceptionResponse(response, HttpServletResponse.SC_FORBIDDEN, ex);
requestCompleted = true;
if (LOG.isDebugEnabled()) {
LOG.debug("Authentication exception: " + ex.getMessage(), ex);
} else {
LOG.warn("Authentication exception: " + ex.getMessage());
}
}
}
}
UGI_TL.set(ugi);
}
if (!requestCompleted) {
final UserGroupInformation ugiF = ugi;
try {
request = new HttpServletRequestWrapper(request) {
@Override
public String getAuthType() {
return (ugiF != null) ? handlerAuthMethod.toString() : null;
}
@Override
public String getRemoteUser() {
return (ugiF != null) ? ugiF.getShortUserName() : null;
}
@Override
public Principal getUserPrincipal() {
return (ugiF != null) ? new Principal() {
@Override
public String getName() {
return ugiF.getUserName();
}
} : null;
}
};
super.doFilter(filterChain, request, response);
} finally {
UGI_TL.remove();
}
}
}
use of org.apache.hadoop.security.authorize.AuthorizationException in project hadoop by apache.
the class TestRPC method doRPCs.
private void doRPCs(Configuration myConf, boolean expectFailure) throws Exception {
Server server;
TestRpcService proxy = null;
server = setupTestServer(myConf, 5);
server.refreshServiceAcl(myConf, new TestPolicyProvider());
TestProtos.EmptyRequestProto emptyRequestProto = TestProtos.EmptyRequestProto.newBuilder().build();
try {
proxy = getClient(addr, conf);
proxy.ping(null, emptyRequestProto);
if (expectFailure) {
fail("Expect RPC.getProxy to fail with AuthorizationException!");
}
} catch (ServiceException e) {
if (expectFailure) {
RemoteException re = (RemoteException) e.getCause();
assertTrue(re.unwrapRemoteException() instanceof AuthorizationException);
assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, re.getErrorCode());
} else {
throw e;
}
} finally {
MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name());
if (expectFailure) {
assertCounter("RpcAuthorizationFailures", 1L, rb);
} else {
assertCounter("RpcAuthorizationSuccesses", 1L, rb);
}
//since we don't have authentication turned ON, we should see
// 0 for the authentication successes and 0 for failure
assertCounter("RpcAuthenticationFailures", 0L, rb);
assertCounter("RpcAuthenticationSuccesses", 0L, rb);
stop(server, proxy);
}
}
use of org.apache.hadoop.security.authorize.AuthorizationException in project hadoop by apache.
the class RpcProgramNfs3 method access.
@VisibleForTesting
ACCESS3Response access(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
ACCESS3Request request;
try {
request = ACCESS3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid ACCESS request");
return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
Nfs3FileAttributes attrs;
if (LOG.isDebugEnabled()) {
LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
attrs = writeManager.getFileAttr(dfsClient, handle, iug);
if (attrs == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
}
if (iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
int access = Nfs3Constant.ACCESS3_LOOKUP | Nfs3Constant.ACCESS3_DELETE | Nfs3Constant.ACCESS3_EXECUTE | Nfs3Constant.ACCESS3_EXTEND | Nfs3Constant.ACCESS3_MODIFY | Nfs3Constant.ACCESS3_READ;
return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
}
int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
} catch (RemoteException r) {
LOG.warn("Exception ", r);
IOException io = r.unwrapRemoteException();
/**
* AuthorizationException can be thrown if the user can't be proxy'ed.
*/
if (io instanceof AuthorizationException) {
return new ACCESS3Response(Nfs3Status.NFS3ERR_ACCES);
} else {
return new ACCESS3Response(Nfs3Status.NFS3ERR_IO);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new ACCESS3Response(status);
}
}
use of org.apache.hadoop.security.authorize.AuthorizationException in project hadoop by apache.
the class RpcProgramNfs3 method fsstat.
@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
FSSTAT3Request request;
try {
request = FSSTAT3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid FSSTAT request");
return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
FsStatus fsStatus = dfsClient.getDiskStatus();
long totalBytes = fsStatus.getCapacity();
long freeBytes = fsStatus.getRemaining();
Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
if (attrs == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
}
long maxFsObjects = config.getLong("dfs.max.objects", 0);
if (maxFsObjects == 0) {
// A value of zero in HDFS indicates no limit to the number
// of objects that dfs supports. Using Integer.MAX_VALUE instead of
// Long.MAX_VALUE so 32bit client won't complain.
maxFsObjects = Integer.MAX_VALUE;
}
return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
} catch (RemoteException r) {
LOG.warn("Exception ", r);
IOException io = r.unwrapRemoteException();
/**
* AuthorizationException can be thrown if the user can't be proxy'ed.
*/
if (io instanceof AuthorizationException) {
return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
} else {
return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new FSSTAT3Response(status);
}
}
use of org.apache.hadoop.security.authorize.AuthorizationException in project hadoop by apache.
the class TestKMS method testACLs.
@Test
@SuppressWarnings("checkstyle:methodlength")
public void testACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(), KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
//nothing allowed
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.createKey("k", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.createKey("k", new byte[16], new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k", new byte[16]);
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeys();
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeysMetadata("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
// we are using JavaKeyStoreProvider for testing, so we know how
// the keyversion is created.
kp.getKeyVersion("k@0");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getCurrentKey("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getMetadata("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeyVersions("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.createKey("k0", new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("DELETE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.deleteKey("k0");
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16], new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.rollNewVersion("k1");
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.rollNewVersion("k1", new byte[16]);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
final KeyVersion currKv = doAs("GET", new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getKeyVersion("k1@0");
KeyVersion kv = kp.getCurrentKey("k1");
return kv;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
final EncryptedKeyVersion encKv = doAs("GENERATE_EEK", new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 = kpCE.generateEncryptedKey(currKv.getName());
return ek1;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpCE.reencryptEncryptedKey(encKv);
return null;
}
});
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpCE.decryptEncryptedKey(encKv);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET_KEYS", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getKeys();
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET_METADATA", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getMetadata("k1");
kp.getKeysMetadata("k1");
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
//stop the reloader, to avoid running while we are writing the new file
KMSWebApp.getACLs().stopReloader();
// test ACL reloading
// to ensure the ACLs file modifiedTime is newer
Thread.sleep(10);
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "foo");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "foo");
writeConf(testDir, conf);
Thread.sleep(1000);
// forcing a reload by hand.
KMSWebApp.getACLs().run();
// should not be able to create a key now
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("k2", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpCE.generateEncryptedKey("k1");
} catch (IOException ex) {
// through the ValueQueue. See KMSCP#generateEncryptedKey.
if (ex.getCause().getCause() instanceof AuthorizationException) {
LOG.info("Caught expected exception.", ex);
} else {
throw ex;
}
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpCE.reencryptEncryptedKey(encKv);
} catch (AuthorizationException ex) {
LOG.info("Caught expected exception.", ex);
}
return null;
}
});
return null;
}
});
}
Aggregations