use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class JobImpl method loadConfFile.
@Override
public Configuration loadConfFile() throws IOException {
Path confPath = getConfFile();
FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(confPath), confPath.toString());
return jobConf;
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class MockJobs method newJob.
public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
final JobId id = newJobID(appID, i);
final String name = newJobName();
final JobReport report = newJobReport(id);
final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
final TaskCount taskCount = getTaskCount(tasks.values());
final Counters counters = getCounters(tasks.values());
final Path configFile = confFile;
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
final Configuration conf = new Configuration();
conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
return new Job() {
@Override
public JobId getID() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public JobState getState() {
return report.getJobState();
}
@Override
public JobReport getReport() {
return report;
}
@Override
public float getProgress() {
return 0;
}
@Override
public Counters getAllCounters() {
return counters;
}
@Override
public Map<TaskId, Task> getTasks() {
return tasks;
}
@Override
public Task getTask(TaskId taskID) {
return tasks.get(taskID);
}
@Override
public int getTotalMaps() {
return taskCount.maps;
}
@Override
public int getTotalReduces() {
return taskCount.reduces;
}
@Override
public int getCompletedMaps() {
return taskCount.completedMaps;
}
@Override
public int getCompletedReduces() {
return taskCount.completedReduces;
}
@Override
public boolean isUber() {
return false;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
return null;
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
return null;
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public List<String> getDiagnostics() {
return Collections.<String>emptyList();
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
return true;
}
@Override
public String getUserName() {
return "mock";
}
@Override
public String getQueueName() {
return "mockqueue";
}
@Override
public Path getConfFile() {
return configFile;
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return jobACLs;
}
@Override
public List<AMInfo> getAMInfos() {
List<AMInfo> amInfoList = new LinkedList<AMInfo>();
amInfoList.add(createAMInfo(1));
amInfoList.add(createAMInfo(2));
return amInfoList;
}
@Override
public Configuration loadConfFile() throws IOException {
FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(configFile), configFile.toString());
return jobConf;
}
@Override
public void setQueueName(String queueName) {
// do nothing
}
@Override
public void setJobPriority(Priority priority) {
// do nothing
}
};
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class LocalDistributedCacheManager method setup.
/**
* Set up the distributed cache by localizing the resources, and updating
* the configuration with references to the localized resources.
* @param conf
* @throws IOException
*/
public void setup(JobConf conf) throws IOException {
File workDir = new File(System.getProperty("user.dir"));
// Generate YARN local resources objects corresponding to the distributed
// cache configuration
Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>();
MRApps.setupDistributedCache(conf, localResources);
// Generating unique numbers for FSDownload.
AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis());
// Find which resources are to be put on the local classpath
Map<String, Path> classpaths = new HashMap<String, Path>();
Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
if (archiveClassPaths != null) {
for (Path p : archiveClassPaths) {
classpaths.put(p.toUri().getPath().toString(), p);
}
}
Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
if (fileClassPaths != null) {
for (Path p : fileClassPaths) {
classpaths.put(p.toUri().getPath().toString(), p);
}
}
// Localize the resources
LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
FileContext localFSFileContext = FileContext.getLocalFSFileContext();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ExecutorService exec = null;
try {
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("LocalDistributedCacheManager Downloader #%d").build();
exec = HadoopExecutors.newCachedThreadPool(tf);
Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
for (LocalResource resource : localResources.values()) {
Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf, new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource);
Future<Path> future = exec.submit(download);
resourcesToPaths.put(resource, future);
}
for (Entry<String, LocalResource> entry : localResources.entrySet()) {
LocalResource resource = entry.getValue();
Path path;
try {
path = resourcesToPaths.get(resource).get();
} catch (InterruptedException e) {
throw new IOException(e);
} catch (ExecutionException e) {
throw new IOException(e);
}
String pathString = path.toUri().toString();
String link = entry.getKey();
String target = new File(path.toUri()).getPath();
symlink(workDir, target, link);
if (resource.getType() == LocalResourceType.ARCHIVE) {
localArchives.add(pathString);
} else if (resource.getType() == LocalResourceType.FILE) {
localFiles.add(pathString);
} else if (resource.getType() == LocalResourceType.PATTERN) {
//PATTERN is not currently used in local mode
throw new IllegalArgumentException("Resource type PATTERN is not " + "implemented yet. " + resource.getResource());
}
Path resourcePath;
try {
resourcePath = resource.getResource().toPath();
} catch (URISyntaxException e) {
throw new IOException(e);
}
LOG.info(String.format("Localized %s as %s", resourcePath, path));
String cp = resourcePath.toUri().getPath();
if (classpaths.keySet().contains(cp)) {
localClasspaths.add(path.toUri().getPath().toString());
}
}
} finally {
if (exec != null) {
exec.shutdown();
}
}
// Update the configuration object with localized data.
if (!localArchives.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()])));
}
if (!localFiles.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()])));
}
setupCalled = true;
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class JobHistoryUtils method getDefaultFileContext.
/**
* Get default file system URI for the cluster (used to ensure consistency
* of history done/staging locations) over different context
*
* @return Default file context
*/
private static FileContext getDefaultFileContext() {
// If FS_DEFAULT_NAME_KEY was set solely by core-default.xml then we ignore
// ignore it. This prevents defaulting history paths to file system specified
// by core-default.xml which would not make sense in any case. For a test
// case to exploit this functionality it should create core-site.xml
FileContext fc = null;
Configuration defaultConf = new Configuration();
String[] sources;
sources = defaultConf.getPropertySources(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
if (sources != null && (!Arrays.asList(sources).contains("core-default.xml") || sources.length > 1)) {
try {
fc = FileContext.getFileContext(defaultConf);
LOG.info("Default file system [" + fc.getDefaultFileSystem().getUri() + "]");
} catch (UnsupportedFileSystemException e) {
LOG.error("Unable to create default file context [" + defaultConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + "]", e);
}
} else {
LOG.info("Default file system is set solely " + "by core-default.xml therefore - ignoring");
}
return fc;
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class JobHistoryUtils method getPreviousJobHistoryPath.
public static Path getPreviousJobHistoryPath(Configuration conf, ApplicationAttemptId applicationAttemptId) throws IOException {
String jobId = TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
Path histDirPath = FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
return fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath, jobId, (applicationAttemptId.getAttemptId() - 1)));
}
Aggregations