use of org.apache.hadoop.yarn.api.records.URL in project hadoop by apache.
the class TestContainerManager method setupLocalResources.
private Map<String, LocalResource> setupLocalResources(String fileName, String symLink) throws Exception {
// ////// Create the resources for the container
File dir = new File(tmpDir, "dir");
dir.mkdirs();
File file = new File(dir, fileName);
PrintWriter fileWriter = new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
URL resourceURL = URL.fromPath(FileContext.getLocalFSFileContext().makeQualified(new Path(file.getAbsolutePath())));
LocalResource resource = recordFactory.newRecordInstance(LocalResource.class);
resource.setResource(resourceURL);
resource.setSize(-1);
resource.setVisibility(LocalResourceVisibility.APPLICATION);
resource.setType(LocalResourceType.FILE);
resource.setTimestamp(file.lastModified());
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(symLink, resource);
return localResources;
}
use of org.apache.hadoop.yarn.api.records.URL in project hadoop by apache.
the class TestNodeManagerReboot method testClearLocalDirWhenNodeReboot.
@Test(timeout = 2000000)
public void testClearLocalDirWhenNodeReboot() throws IOException, YarnException, InterruptedException {
nm = new MyNodeManager();
nm.start();
final ContainerManagementProtocol containerManager = nm.getContainerManager();
// create files under fileCache
createFiles(nmLocalDir.getAbsolutePath(), ContainerLocalizer.FILECACHE, 100);
localResourceDir.mkdirs();
ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
// Construct the Container-id
ContainerId cId = createContainerId();
URL localResourceUri = URL.fromPath(localFS.makeQualified(new Path(localResourceDir.getAbsolutePath())));
LocalResource localResource = LocalResource.newInstance(localResourceUri, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, -1, localResourceDir.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, localResource);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = new ArrayList<String>();
containerLaunchContext.setCommands(commands);
NodeId nodeId = nm.getNMContext().getNodeId();
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, TestContainerManager.createContainerToken(cId, 0, nodeId, destinationFile, nm.getNMContext().getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
final StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
final UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId().toString());
NMTokenIdentifier nmIdentifier = new NMTokenIdentifier(cId.getApplicationAttemptId(), nodeId, user, 123);
currentUser.addTokenIdentifier(nmIdentifier);
currentUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws YarnException, IOException {
nm.getContainerManager().startContainers(allRequests);
return null;
}
});
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cId);
GetContainerStatusesRequest request = GetContainerStatusesRequest.newInstance(containerIds);
Container container = nm.getNMContext().getContainers().get(request.getContainerIds().get(0));
final int MAX_TRIES = 20;
int numTries = 0;
while (!container.getContainerState().equals(ContainerState.DONE) && numTries <= MAX_TRIES) {
try {
Thread.sleep(500);
} catch (InterruptedException ex) {
// Do nothing
}
numTries++;
}
Assert.assertEquals(ContainerState.DONE, container.getContainerState());
Assert.assertTrue("The container should create a subDir named currentUser: " + user + "under localDir/usercache", numOfLocalDirs(nmLocalDir.getAbsolutePath(), ContainerLocalizer.USERCACHE) > 0);
Assert.assertTrue("There should be files or Dirs under nm_private when " + "container is launched", numOfLocalDirs(nmLocalDir.getAbsolutePath(), ResourceLocalizationService.NM_PRIVATE_DIR) > 0);
// restart the NodeManager
restartNM(MAX_TRIES);
checkNumOfLocalDirs();
verify(delService, times(1)).delete((String) isNull(), argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_")));
verify(delService, times(1)).delete((String) isNull(), argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
verify(delService, times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(user, null, new String[] { destinationFile })));
verify(delService, times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE + "_DEL_", new String[] {})));
// restart the NodeManager again
// this time usercache directory should be empty
restartNM(MAX_TRIES);
checkNumOfLocalDirs();
}
use of org.apache.hadoop.yarn.api.records.URL in project samza by apache.
the class YarnContainerRunner method startContainer.
/**
* Runs a command as a process on the container. All binaries needed by the physical process are packaged in the URL
* specified by packagePath.
*/
private void startContainer(Path packagePath, Container container, Map<String, String> env, final String cmd) throws SamzaContainerLaunchException {
log.info("starting container {} {} {} {}", new Object[] { packagePath, container, env, cmd });
// TODO: SAMZA-1144 remove the customized approach for package resource and use the common one.
// But keep it now for backward compatibility.
// set the local package so that the containers and app master are provisioned with it
LocalResource packageResource = Records.newRecord(LocalResource.class);
URL packageUrl = ConverterUtils.getYarnUrlFromPath(packagePath);
FileStatus fileStatus;
try {
fileStatus = packagePath.getFileSystem(yarnConfiguration).getFileStatus(packagePath);
} catch (IOException ioe) {
log.error("IO Exception when accessing the package status from the filesystem", ioe);
throw new SamzaContainerLaunchException("IO Exception when accessing the package status from the filesystem");
}
packageResource.setResource(packageUrl);
log.info("set package Resource in YarnContainerRunner for {}", packageUrl);
packageResource.setSize(fileStatus.getLen());
packageResource.setTimestamp(fileStatus.getModificationTime());
packageResource.setType(LocalResourceType.ARCHIVE);
packageResource.setVisibility(LocalResourceVisibility.APPLICATION);
ByteBuffer allTokens;
// copy tokens (copied from dist shell example)
try {
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
// now remove the AM->RM token so that containers cannot access it
Iterator iter = credentials.getAllTokens().iterator();
while (iter.hasNext()) {
TokenIdentifier token = ((Token) iter.next()).decodeIdentifier();
if (token != null && token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
iter.remove();
}
}
allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
} catch (IOException ioe) {
log.error("IOException when writing credentials.", ioe);
throw new SamzaContainerLaunchException("IO Exception when writing credentials to output buffer");
}
Map<String, LocalResource> localResourceMap = new HashMap<>();
localResourceMap.put("__package", packageResource);
// include the resources from the universal resource configurations
LocalizerResourceMapper resourceMapper = new LocalizerResourceMapper(new LocalizerResourceConfig(config), yarnConfiguration);
localResourceMap.putAll(resourceMapper.getResourceMap());
ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
context.setEnvironment(env);
context.setTokens(allTokens.duplicate());
context.setCommands(new ArrayList<String>() {
{
add(cmd);
}
});
context.setLocalResources(localResourceMap);
log.debug("setting localResourceMap to {}", localResourceMap);
log.debug("setting context to {}", context);
StartContainerRequest startContainerRequest = Records.newRecord(StartContainerRequest.class);
startContainerRequest.setContainerLaunchContext(context);
try {
nmClient.startContainer(container, context);
} catch (YarnException ye) {
log.error("Received YarnException when starting container: " + container.getId(), ye);
throw new SamzaContainerLaunchException("Received YarnException when starting container: " + container.getId(), ye);
} catch (IOException ioe) {
log.error("Received IOException when starting container: " + container.getId(), ioe);
throw new SamzaContainerLaunchException("Received IOException when starting container: " + container.getId(), ioe);
}
}
use of org.apache.hadoop.yarn.api.records.URL in project hive by apache.
the class DagUtils method createLocalResource.
/*
* Helper method to create a yarn local resource.
*/
private LocalResource createLocalResource(FileSystem remoteFs, Path file, LocalResourceType type, LocalResourceVisibility visibility) {
FileStatus fstat = null;
try {
fstat = remoteFs.getFileStatus(file);
} catch (IOException e) {
e.printStackTrace();
}
URL resourceURL = ConverterUtils.getYarnUrlFromPath(file);
long resourceSize = fstat.getLen();
long resourceModificationTime = fstat.getModificationTime();
LOG.info("Resource modification time: " + resourceModificationTime + " for " + file);
LocalResource lr = Records.newRecord(LocalResource.class);
lr.setResource(resourceURL);
lr.setType(type);
lr.setSize(resourceSize);
lr.setVisibility(visibility);
lr.setTimestamp(resourceModificationTime);
return lr;
}
use of org.apache.hadoop.yarn.api.records.URL in project hadoop by apache.
the class YARNRunner method setupLocalResources.
private Map<String, LocalResource> setupLocalResources(Configuration jobConf, String jobSubmitDir) throws IOException {
Map<String, LocalResource> localResources = new HashMap<>();
Path jobConfPath = new Path(jobSubmitDir, MRJobConfig.JOB_CONF_FILE);
URL yarnUrlForJobSubmitDir = URL.fromPath(defaultFileContext.getDefaultFileSystem().resolvePath(defaultFileContext.makeQualified(new Path(jobSubmitDir))));
LOG.debug("Creating setup context, jobSubmitDir url is " + yarnUrlForJobSubmitDir);
localResources.put(MRJobConfig.JOB_CONF_FILE, createApplicationResource(defaultFileContext, jobConfPath, LocalResourceType.FILE));
if (jobConf.get(MRJobConfig.JAR) != null) {
Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR));
LocalResource rc = createApplicationResource(FileContext.getFileContext(jobJarPath.toUri(), jobConf), jobJarPath, LocalResourceType.PATTERN);
String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
rc.setPattern(pattern);
localResources.put(MRJobConfig.JOB_JAR, rc);
} else {
// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
// mapreduce jar itself which is already on the classpath.
LOG.info("Job jar is not present. " + "Not adding any jar to the list of resources.");
}
// TODO gross hack
for (String s : new String[] { MRJobConfig.JOB_SPLIT, MRJobConfig.JOB_SPLIT_METAINFO }) {
localResources.put(MRJobConfig.JOB_SUBMIT_DIR + "/" + s, createApplicationResource(defaultFileContext, new Path(jobSubmitDir, s), LocalResourceType.FILE));
}
return localResources;
}
Aggregations