use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestRMWebServicesAppsModification method testAppSubmit.
public void testAppSubmit(String acceptMedia, String contentMedia) throws Exception {
// create a test app and submit it via rest(after getting an app-id) then
// get the app details from the rmcontext and check that everything matches
client().addFilter(new LoggingFilter(System.out));
String lrKey = "example";
String queueName = "testqueue";
// create the queue
String[] queues = { "default", "testqueue" };
CapacitySchedulerConfiguration csconf = new CapacitySchedulerConfiguration();
csconf.setQueues("root", queues);
csconf.setCapacity("root.default", 50.0f);
csconf.setCapacity("root.testqueue", 50.0f);
rm.getResourceScheduler().reinitialize(csconf, rm.getRMContext());
String appName = "test";
String appType = "test-type";
String urlPath = "apps";
String appId = testGetNewApplication(acceptMedia);
List<String> commands = new ArrayList<>();
commands.add("/bin/sleep 5");
HashMap<String, String> environment = new HashMap<>();
environment.put("APP_VAR", "ENV_SETTING");
HashMap<ApplicationAccessType, String> acls = new HashMap<>();
acls.put(ApplicationAccessType.MODIFY_APP, "testuser1, testuser2");
acls.put(ApplicationAccessType.VIEW_APP, "testuser3, testuser4");
Set<String> tags = new HashSet<>();
tags.add("tag1");
tags.add("tag 2");
CredentialsInfo credentials = new CredentialsInfo();
HashMap<String, String> tokens = new HashMap<>();
HashMap<String, String> secrets = new HashMap<>();
secrets.put("secret1", Base64.encodeBase64String("mysecret".getBytes("UTF8")));
credentials.setSecrets(secrets);
credentials.setTokens(tokens);
ApplicationSubmissionContextInfo appInfo = new ApplicationSubmissionContextInfo();
appInfo.setApplicationId(appId);
appInfo.setApplicationName(appName);
appInfo.setMaxAppAttempts(2);
appInfo.setQueue(queueName);
appInfo.setApplicationType(appType);
appInfo.setPriority(0);
HashMap<String, LocalResourceInfo> lr = new HashMap<>();
LocalResourceInfo y = new LocalResourceInfo();
y.setUrl(new URI("http://www.test.com/file.txt"));
y.setSize(100);
y.setTimestamp(System.currentTimeMillis());
y.setType(LocalResourceType.FILE);
y.setVisibility(LocalResourceVisibility.APPLICATION);
lr.put(lrKey, y);
appInfo.getContainerLaunchContextInfo().setResources(lr);
appInfo.getContainerLaunchContextInfo().setCommands(commands);
appInfo.getContainerLaunchContextInfo().setEnvironment(environment);
appInfo.getContainerLaunchContextInfo().setAcls(acls);
appInfo.getContainerLaunchContextInfo().getAuxillaryServiceData().put("test", Base64.encodeBase64URLSafeString("value12".getBytes("UTF8")));
appInfo.getContainerLaunchContextInfo().setCredentials(credentials);
appInfo.getResource().setMemory(1024);
appInfo.getResource().setvCores(1);
appInfo.setApplicationTags(tags);
// Set LogAggregationContextInfo
String includePattern = "file1";
String excludePattern = "file2";
String rolledLogsIncludePattern = "file3";
String rolledLogsExcludePattern = "file4";
String className = "policy_class";
String parameters = "policy_parameter";
LogAggregationContextInfo logAggregationContextInfo = new LogAggregationContextInfo();
logAggregationContextInfo.setIncludePattern(includePattern);
logAggregationContextInfo.setExcludePattern(excludePattern);
logAggregationContextInfo.setRolledLogsIncludePattern(rolledLogsIncludePattern);
logAggregationContextInfo.setRolledLogsExcludePattern(rolledLogsExcludePattern);
logAggregationContextInfo.setLogAggregationPolicyClassName(className);
logAggregationContextInfo.setLogAggregationPolicyParameters(parameters);
appInfo.setLogAggregationContextInfo(logAggregationContextInfo);
// Set attemptFailuresValidityInterval
long attemptFailuresValidityInterval = 5000;
appInfo.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
// Set ReservationId
String reservationId = ReservationId.newInstance(System.currentTimeMillis(), 1).toString();
appInfo.setReservationId(reservationId);
ClientResponse response = this.constructWebResource(urlPath).accept(acceptMedia).entity(appInfo, contentMedia).post(ClientResponse.class);
if (!this.isAuthenticationEnabled()) {
assertResponseStatusCode(Status.UNAUTHORIZED, response.getStatusInfo());
return;
}
assertResponseStatusCode(Status.ACCEPTED, response.getStatusInfo());
assertTrue(!response.getHeaders().getFirst(HttpHeaders.LOCATION).isEmpty());
String locURL = response.getHeaders().getFirst(HttpHeaders.LOCATION);
assertTrue(locURL.contains("/apps/application"));
appId = locURL.substring(locURL.indexOf("/apps/") + "/apps/".length());
WebResource res = resource().uri(new URI(locURL));
res = res.queryParam("user.name", webserviceUserName);
response = res.get(ClientResponse.class);
assertResponseStatusCode(Status.OK, response.getStatusInfo());
RMApp app = rm.getRMContext().getRMApps().get(ApplicationId.fromString(appId));
assertEquals(appName, app.getName());
assertEquals(webserviceUserName, app.getUser());
assertEquals(2, app.getMaxAppAttempts());
if (app.getQueue().contains("root.")) {
queueName = "root." + queueName;
}
assertEquals(queueName, app.getQueue());
assertEquals(appType, app.getApplicationType());
assertEquals(tags, app.getApplicationTags());
ContainerLaunchContext ctx = app.getApplicationSubmissionContext().getAMContainerSpec();
assertEquals(commands, ctx.getCommands());
assertEquals(environment, ctx.getEnvironment());
assertEquals(acls, ctx.getApplicationACLs());
Map<String, LocalResource> appLRs = ctx.getLocalResources();
assertTrue(appLRs.containsKey(lrKey));
LocalResource exampleLR = appLRs.get(lrKey);
assertEquals(URL.fromURI(y.getUrl()), exampleLR.getResource());
assertEquals(y.getSize(), exampleLR.getSize());
assertEquals(y.getTimestamp(), exampleLR.getTimestamp());
assertEquals(y.getType(), exampleLR.getType());
assertEquals(y.getPattern(), exampleLR.getPattern());
assertEquals(y.getVisibility(), exampleLR.getVisibility());
Credentials cs = new Credentials();
ByteArrayInputStream str = new ByteArrayInputStream(app.getApplicationSubmissionContext().getAMContainerSpec().getTokens().array());
DataInputStream di = new DataInputStream(str);
cs.readTokenStorageStream(di);
Text key = new Text("secret1");
assertTrue("Secrets missing from credentials object", cs.getAllSecretKeys().contains(key));
assertEquals("mysecret", new String(cs.getSecretKey(key), "UTF-8"));
// Check LogAggregationContext
ApplicationSubmissionContext asc = app.getApplicationSubmissionContext();
LogAggregationContext lac = asc.getLogAggregationContext();
assertEquals(includePattern, lac.getIncludePattern());
assertEquals(excludePattern, lac.getExcludePattern());
assertEquals(rolledLogsIncludePattern, lac.getRolledLogsIncludePattern());
assertEquals(rolledLogsExcludePattern, lac.getRolledLogsExcludePattern());
assertEquals(className, lac.getLogAggregationPolicyClassName());
assertEquals(parameters, lac.getLogAggregationPolicyParameters());
// Check attemptFailuresValidityInterval
assertEquals(attemptFailuresValidityInterval, asc.getAttemptFailuresValidityInterval());
// Check ReservationId
assertEquals(reservationId, app.getReservationId().toString());
response = this.constructWebResource("apps", appId).accept(acceptMedia).get(ClientResponse.class);
assertResponseStatusCode(Status.OK, response.getStatusInfo());
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TokenCache method loadTokens.
/**
* load job token from a file
* @deprecated Use {@link Credentials#readTokenStorageFile} instead,
* this method is included for compatibility against Hadoop-1.
* @param conf
* @throws IOException
*/
@InterfaceAudience.Private
@Deprecated
public static Credentials loadTokens(String jobTokenFile, JobConf conf) throws IOException {
Path localJobTokenFile = new Path("file:///" + jobTokenFile);
Credentials ts = Credentials.readTokenStorageFile(localJobTokenFile, conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Task: Loaded jobTokenFile from: " + localJobTokenFile.toUri().getPath() + "; num of sec keys = " + ts.numberOfSecretKeys() + " Number of tokens " + ts.numberOfTokens());
}
return ts;
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestAMRMClient method allocateAndStartContainers.
private List<Container> allocateAndStartContainers(final AMRMClient<ContainerRequest> amClient, final NMClient nmClient, int num) throws YarnException, IOException {
// set up allocation requests
for (int i = 0; i < num; ++i) {
amClient.addContainerRequest(new ContainerRequest(capability, nodes, racks, priority));
}
// send allocation requests
amClient.allocate(0.1f);
// let NM heartbeat to RM and trigger allocations
triggerSchedulingWithNMHeartBeat();
// get allocations
AllocateResponse allocResponse = amClient.allocate(0.1f);
List<Container> containers = allocResponse.getAllocatedContainers();
Assert.assertEquals(num, containers.size());
// build container launch context
Credentials ts = new Credentials();
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
// start a process long enough for increase/decrease action to take effect
ContainerLaunchContext clc = BuilderUtils.newContainerLaunchContext(Collections.<String, LocalResource>emptyMap(), new HashMap<String, String>(), Arrays.asList("sleep", "100"), new HashMap<String, ByteBuffer>(), securityTokens, new HashMap<ApplicationAccessType, String>());
// start the containers and make sure they are in RUNNING state
try {
for (int i = 0; i < num; i++) {
Container container = containers.get(i);
nmClient.startContainer(container, clc);
// container status
while (true) {
ContainerStatus status = nmClient.getContainerStatus(container.getId(), container.getNodeId());
if (status.getState() == ContainerState.RUNNING) {
break;
}
sleep(10);
}
}
} catch (YarnException e) {
throw new AssertionError("Exception is not expected: " + e);
}
// let NM's heartbeat to RM to confirm container launch
triggerSchedulingWithNMHeartBeat();
return containers;
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestAMRMClient method getAMRMToken.
@SuppressWarnings("unchecked")
private org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> getAMRMToken() throws IOException {
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
Iterator<org.apache.hadoop.security.token.Token<?>> iter = credentials.getAllTokens().iterator();
org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> result = null;
while (iter.hasNext()) {
org.apache.hadoop.security.token.Token<?> token = iter.next();
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
if (result != null) {
Assert.fail("credentials has more than one AMRM token." + " token1: " + result + " token2: " + token);
}
result = (org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>) token;
}
}
return result;
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class ContainerManagerImpl method startContainerInternal.
@SuppressWarnings("unchecked")
protected void startContainerInternal(ContainerTokenIdentifier containerTokenIdentifier, StartContainerRequest request) throws YarnException, IOException {
ContainerId containerId = containerTokenIdentifier.getContainerID();
String containerIdStr = containerId.toString();
String user = containerTokenIdentifier.getApplicationSubmitter();
LOG.info("Start request for " + containerIdStr + " by user " + user);
ContainerLaunchContext launchContext = request.getContainerLaunchContext();
Credentials credentials = YarnServerSecurityUtils.parseCredentials(launchContext);
Container container = new ContainerImpl(getConfig(), this.dispatcher, launchContext, credentials, metrics, containerTokenIdentifier, context);
ApplicationId applicationID = containerId.getApplicationAttemptId().getApplicationId();
if (context.getContainers().putIfAbsent(containerId, container) != null) {
NMAuditLogger.logFailure(user, AuditConstants.START_CONTAINER, "ContainerManagerImpl", "Container already running on this node!", applicationID, containerId);
throw RPCUtil.getRemoteException("Container " + containerIdStr + " already is running on this node!!");
}
this.readLock.lock();
try {
if (!isServiceStopped()) {
// Create the application
// populate the flow context from the launch context if the timeline
// service v.2 is enabled
FlowContext flowContext = null;
if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
String flowName = launchContext.getEnvironment().get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
String flowVersion = launchContext.getEnvironment().get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
String flowRunIdStr = launchContext.getEnvironment().get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
long flowRunId = 0L;
if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
flowRunId = Long.parseLong(flowRunIdStr);
}
flowContext = new FlowContext(flowName, flowVersion, flowRunId);
}
if (!context.getApplications().containsKey(applicationID)) {
Application application = new ApplicationImpl(dispatcher, user, flowContext, applicationID, credentials, context);
if (context.getApplications().putIfAbsent(applicationID, application) == null) {
LOG.info("Creating a new application reference for app " + applicationID);
LogAggregationContext logAggregationContext = containerTokenIdentifier.getLogAggregationContext();
Map<ApplicationAccessType, String> appAcls = container.getLaunchContext().getApplicationACLs();
context.getNMStateStore().storeApplication(applicationID, buildAppProto(applicationID, user, credentials, appAcls, logAggregationContext));
dispatcher.getEventHandler().handle(new ApplicationInitEvent(applicationID, appAcls, logAggregationContext));
}
}
this.context.getNMStateStore().storeContainer(containerId, containerTokenIdentifier.getVersion(), request);
dispatcher.getEventHandler().handle(new ApplicationContainerInitEvent(container));
this.context.getContainerTokenSecretManager().startContainerSuccessful(containerTokenIdentifier);
NMAuditLogger.logSuccess(user, AuditConstants.START_CONTAINER, "ContainerManageImpl", applicationID, containerId);
// TODO launchedContainer misplaced -> doesn't necessarily mean a container
// launch. A finished Application will not launch containers.
metrics.launchedContainer();
metrics.allocateContainer(containerTokenIdentifier.getResource());
} else {
throw new YarnException("Container start failed as the NodeManager is " + "in the process of shutting down");
}
} finally {
this.readLock.unlock();
}
}
Aggregations