use of org.apache.hadoop.security.authentication.client.AuthenticationException in project hadoop by apache.
the class TransferFsImage method uploadImage.
/*
* Uploads the imagefile using HTTP PUT method
*/
private static void uploadImage(URL url, Configuration conf, NNStorage storage, NameNodeFile nnf, long txId, Canceler canceler) throws IOException {
File imageFile = storage.findImageFile(nnf, txId);
if (imageFile == null) {
throw new IOException("Could not find image with txid " + txId);
}
HttpURLConnection connection = null;
try {
URIBuilder uriBuilder = new URIBuilder(url.toURI());
// write all params for image upload request as query itself.
// Request body contains the image to be uploaded.
Map<String, String> params = ImageServlet.getParamsForPutImage(storage, txId, imageFile.length(), nnf);
for (Entry<String, String> entry : params.entrySet()) {
uriBuilder.addParameter(entry.getKey(), entry.getValue());
}
URL urlWithParams = uriBuilder.build().toURL();
connection = (HttpURLConnection) connectionFactory.openConnection(urlWithParams, UserGroupInformation.isSecurityEnabled());
// Set the request to PUT
connection.setRequestMethod("PUT");
connection.setDoOutput(true);
int chunkSize = conf.getInt(DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY, DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT);
if (imageFile.length() > chunkSize) {
// using chunked streaming mode to support upload of 2GB+ files and to
// avoid internal buffering.
// this mode should be used only if more than chunkSize data is present
// to upload. otherwise upload may not happen sometimes.
connection.setChunkedStreamingMode(chunkSize);
}
setTimeout(connection);
// set headers for verification
ImageServlet.setVerificationHeadersForPut(connection, imageFile);
// Write the file to output stream.
writeFileToPutRequest(conf, connection, imageFile, canceler);
int responseCode = connection.getResponseCode();
if (responseCode != HttpURLConnection.HTTP_OK) {
throw new HttpPutFailedException(String.format("Image uploading failed, status: %d, url: %s, message: %s", responseCode, urlWithParams, connection.getResponseMessage()), responseCode);
}
} catch (AuthenticationException | URISyntaxException e) {
throw new IOException(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
use of org.apache.hadoop.security.authentication.client.AuthenticationException in project hadoop by apache.
the class DFSck method doWork.
private int doWork(final String[] args) throws IOException {
final StringBuilder url = new StringBuilder();
url.append("/fsck?ugi=").append(ugi.getShortUserName());
String dir = null;
boolean doListCorruptFileBlocks = false;
for (int idx = 0; idx < args.length; idx++) {
if (args[idx].equals("-move")) {
url.append("&move=1");
} else if (args[idx].equals("-delete")) {
url.append("&delete=1");
} else if (args[idx].equals("-files")) {
url.append("&files=1");
} else if (args[idx].equals("-openforwrite")) {
url.append("&openforwrite=1");
} else if (args[idx].equals("-blocks")) {
url.append("&blocks=1");
} else if (args[idx].equals("-locations")) {
url.append("&locations=1");
} else if (args[idx].equals("-racks")) {
url.append("&racks=1");
} else if (args[idx].equals("-replicaDetails")) {
url.append("&replicadetails=1");
} else if (args[idx].equals("-upgradedomains")) {
url.append("&upgradedomains=1");
} else if (args[idx].equals("-storagepolicies")) {
url.append("&storagepolicies=1");
} else if (args[idx].equals("-showprogress")) {
url.append("&showprogress=1");
} else if (args[idx].equals("-list-corruptfileblocks")) {
url.append("&listcorruptfileblocks=1");
doListCorruptFileBlocks = true;
} else if (args[idx].equals("-includeSnapshots")) {
url.append("&includeSnapshots=1");
} else if (args[idx].equals("-maintenance")) {
url.append("&maintenance=1");
} else if (args[idx].equals("-blockId")) {
StringBuilder sb = new StringBuilder();
idx++;
while (idx < args.length && !args[idx].startsWith("-")) {
sb.append(args[idx]);
sb.append(" ");
idx++;
}
url.append("&blockId=").append(URLEncoder.encode(sb.toString(), "UTF-8"));
} else if (!args[idx].startsWith("-")) {
if (null == dir) {
dir = args[idx];
} else {
System.err.println("fsck: can only operate on one path at a time '" + args[idx] + "'");
printUsage(System.err);
return -1;
}
} else {
System.err.println("fsck: Illegal option '" + args[idx] + "'");
printUsage(System.err);
return -1;
}
}
if (null == dir) {
dir = "/";
}
Path dirpath = null;
URI namenodeAddress = null;
try {
dirpath = getResolvedPath(dir);
namenodeAddress = getCurrentNamenodeAddress(dirpath);
} catch (IOException ioe) {
System.err.println("FileSystem is inaccessible due to:\n" + ioe.toString());
}
if (namenodeAddress == null) {
//Error message already output in {@link #getCurrentNamenodeAddress()}
System.err.println("DFSck exiting.");
return 0;
}
url.insert(0, namenodeAddress.toString());
url.append("&path=").append(URLEncoder.encode(Path.getPathWithoutSchemeAndAuthority(dirpath).toString(), "UTF-8"));
System.err.println("Connecting to namenode via " + url.toString());
if (doListCorruptFileBlocks) {
return listCorruptFileBlocks(dir, url.toString());
}
URL path = new URL(url.toString());
URLConnection connection;
try {
connection = connectionFactory.openConnection(path, isSpnegoEnabled);
} catch (AuthenticationException e) {
throw new IOException(e);
}
InputStream stream = connection.getInputStream();
BufferedReader input = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
String line = null;
String lastLine = null;
int errCode = -1;
try {
while ((line = input.readLine()) != null) {
out.println(line);
lastLine = line;
}
} finally {
input.close();
}
if (lastLine.endsWith(NamenodeFsck.HEALTHY_STATUS)) {
errCode = 0;
} else if (lastLine.endsWith(NamenodeFsck.CORRUPT_STATUS)) {
errCode = 1;
} else if (lastLine.endsWith(NamenodeFsck.NONEXISTENT_STATUS)) {
errCode = 0;
} else if (lastLine.contains("Incorrect blockId format:")) {
errCode = 0;
} else if (lastLine.endsWith(NamenodeFsck.DECOMMISSIONED_STATUS)) {
errCode = 2;
} else if (lastLine.endsWith(NamenodeFsck.DECOMMISSIONING_STATUS)) {
errCode = 3;
} else if (lastLine.endsWith(NamenodeFsck.IN_MAINTENANCE_STATUS)) {
errCode = 4;
} else if (lastLine.endsWith(NamenodeFsck.ENTERING_MAINTENANCE_STATUS)) {
errCode = 5;
}
return errCode;
}
use of org.apache.hadoop.security.authentication.client.AuthenticationException in project hadoop by apache.
the class TestTimelineAuthenticationFilter method testDelegationTokenOperations.
@Test
public void testDelegationTokenOperations() throws Exception {
TimelineClient httpUserClient = KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<TimelineClient>() {
@Override
public TimelineClient call() throws Exception {
return createTimelineClientForUGI();
}
});
UserGroupInformation httpUser = KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<UserGroupInformation>() {
@Override
public UserGroupInformation call() throws Exception {
return UserGroupInformation.getCurrentUser();
}
});
// Let HTTP user to get the delegation for itself
Token<TimelineDelegationTokenIdentifier> token = httpUserClient.getDelegationToken(httpUser.getShortUserName());
Assert.assertNotNull(token);
TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
Assert.assertNotNull(tDT);
Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
// Renew token
Assert.assertFalse(token.getService().toString().isEmpty());
// Renew the token from the token service address
long renewTime1 = httpUserClient.renewDelegationToken(token);
Thread.sleep(100);
token.setService(new Text());
Assert.assertTrue(token.getService().toString().isEmpty());
// If the token service address is not avaiable, it still can be renewed
// from the configured address
long renewTime2 = httpUserClient.renewDelegationToken(token);
Assert.assertTrue(renewTime1 < renewTime2);
// Cancel token
Assert.assertTrue(token.getService().toString().isEmpty());
// If the token service address is not avaiable, it still can be canceled
// from the configured address
httpUserClient.cancelDelegationToken(token);
// Renew should not be successful because the token is canceled
try {
httpUserClient.renewDelegationToken(token);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("Renewal request for unknown token"));
}
// Let HTTP user to get the delegation token for FOO user
UserGroupInformation fooUgi = UserGroupInformation.createProxyUser(FOO_USER, httpUser);
TimelineClient fooUserClient = fooUgi.doAs(new PrivilegedExceptionAction<TimelineClient>() {
@Override
public TimelineClient run() throws Exception {
return createTimelineClientForUGI();
}
});
token = fooUserClient.getDelegationToken(httpUser.getShortUserName());
Assert.assertNotNull(token);
tDT = token.decodeIdentifier();
Assert.assertNotNull(tDT);
Assert.assertEquals(new Text(FOO_USER), tDT.getOwner());
Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser());
// Renew token as the renewer
final Token<TimelineDelegationTokenIdentifier> tokenToRenew = token;
renewTime1 = httpUserClient.renewDelegationToken(tokenToRenew);
renewTime2 = httpUserClient.renewDelegationToken(tokenToRenew);
Assert.assertTrue(renewTime1 < renewTime2);
// Cancel token
Assert.assertFalse(tokenToRenew.getService().toString().isEmpty());
// Cancel the token from the token service address
fooUserClient.cancelDelegationToken(tokenToRenew);
// Renew should not be successful because the token is canceled
try {
httpUserClient.renewDelegationToken(tokenToRenew);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("Renewal request for unknown token"));
}
// Let HTTP user to get the delegation token for BAR user
UserGroupInformation barUgi = UserGroupInformation.createProxyUser(BAR_USER, httpUser);
TimelineClient barUserClient = barUgi.doAs(new PrivilegedExceptionAction<TimelineClient>() {
@Override
public TimelineClient run() {
return createTimelineClientForUGI();
}
});
try {
barUserClient.getDelegationToken(httpUser.getShortUserName());
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e.getCause() instanceof AuthorizationException || e.getCause() instanceof AuthenticationException);
}
}
use of org.apache.hadoop.security.authentication.client.AuthenticationException in project hadoop by apache.
the class AppBlock method render.
@Override
protected void render(Block html) {
String webUiType = $(WEB_UI_TYPE);
String aid = $(APPLICATION_ID);
if (aid.isEmpty()) {
puts("Bad request: requires Application ID");
return;
}
try {
appID = Apps.toAppID(aid);
} catch (Exception e) {
puts("Invalid Application ID: " + aid);
return;
}
UserGroupInformation callerUGI = getCallerUGI();
ApplicationReport appReport;
try {
final GetApplicationReportRequest request = GetApplicationReportRequest.newInstance(appID);
if (callerUGI == null) {
throw new AuthenticationException("Failed to get user name from request");
} else {
appReport = callerUGI.doAs(new PrivilegedExceptionAction<ApplicationReport>() {
@Override
public ApplicationReport run() throws Exception {
return appBaseProt.getApplicationReport(request).getApplicationReport();
}
});
}
} catch (Exception e) {
String message = "Failed to read the application " + appID + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
if (appReport == null) {
puts("Application not found: " + aid);
return;
}
AppInfo app = new AppInfo(appReport);
setTitle(join("Application ", aid));
if (webUiType != null && webUiType.equals(YarnWebParams.RM_WEB_UI) && conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED, YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)) {
// Application Kill
html.div().button().$onclick("confirmAction()").b("Kill Application")._()._();
StringBuilder script = new StringBuilder();
script.append("function confirmAction() {").append(" b = confirm(\"Are you sure?\");").append(" if (b == true) {").append(" $.ajax({").append(" type: 'PUT',").append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',").append(" contentType: 'application/json',").append(getCSRFHeaderString(conf)).append(" data: '{\"state\":\"KILLED\"}',").append(" dataType: 'json'").append(" }).done(function(data){").append(" setTimeout(function(){").append(" location.href = '/cluster/app/").append(aid).append("';").append(" }, 1000);").append(" }).fail(function(data){").append(" console.log(data);").append(" });").append(" }").append("}");
html.script().$type("text/javascript")._(script.toString())._();
}
String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) + "/cluster/scheduler?openQueues=" + app.getQueue();
generateOverviewTable(app, schedulerPath, webUiType, appReport);
Collection<ApplicationAttemptReport> attempts;
try {
final GetApplicationAttemptsRequest request = GetApplicationAttemptsRequest.newInstance(appID);
attempts = callerUGI.doAs(new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>>() {
@Override
public Collection<ApplicationAttemptReport> run() throws Exception {
return appBaseProt.getApplicationAttempts(request).getApplicationAttemptList();
}
});
} catch (Exception e) {
String message = "Failed to read the attempts of the application " + appID + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
createApplicationMetricsTable(html);
html._(InfoBlock.class);
generateApplicationTable(html, callerUGI, attempts);
}
use of org.apache.hadoop.security.authentication.client.AuthenticationException in project incubator-atlas by apache.
the class AtlasAuthenticationFilter method getToken.
@Override
protected AuthenticationToken getToken(HttpServletRequest request) throws IOException, AuthenticationException {
AuthenticationToken token = null;
String tokenStr = null;
Cookie[] cookies = request.getCookies();
if (cookies != null) {
for (Cookie cookie : cookies) {
if (cookie.getName().equals(AuthenticatedURL.AUTH_COOKIE)) {
tokenStr = cookie.getValue();
try {
tokenStr = this.signer.verifyAndExtract(tokenStr);
} catch (SignerException ex) {
throw new AuthenticationException(ex);
}
}
}
}
if (tokenStr != null) {
token = AuthenticationToken.parse(tokenStr);
if (token != null) {
AuthenticationHandler authHandler = getAuthenticationHandler();
if (!token.getType().equals(authHandler.getType())) {
throw new AuthenticationException("Invalid AuthenticationToken type");
}
if (token.isExpired()) {
throw new AuthenticationException("AuthenticationToken expired");
}
}
}
return token;
}
Aggregations