Search in sources :

Example 61 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class JobHistoryUtils method ensurePathInDefaultFileSystem.

/**
   * Ensure that path belongs to cluster's default file system unless
   * 1. it is already fully qualified.
   * 2. current job configuration uses default file system
   * 3. running from a test case without core-site.xml
   *
   * @param sourcePath source path
   * @param conf the job configuration
   * @return full qualified path (if necessary) in default file system
   */
private static String ensurePathInDefaultFileSystem(String sourcePath, Configuration conf) {
    Path path = new Path(sourcePath);
    FileContext fc = getDefaultFileContext();
    if (fc == null || fc.getDefaultFileSystem().getUri().toString().equals(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "")) || path.toUri().getAuthority() != null || path.toUri().getScheme() != null) {
        return sourcePath;
    }
    return fc.makeQualified(path).toString();
}
Also used : Path(org.apache.hadoop.fs.Path) FileContext(org.apache.hadoop.fs.FileContext)

Example 62 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class LoadGenerator method loadScriptFile.

/**
   * Read a script file of the form: lines of text with duration in seconds,
   * read probability and write probability, separated by white space.
   * 
   * @param filename Script file
   * @return 0 if successful, -1 if not
   * @throws IOException if errors with file IO
   */
protected static int loadScriptFile(String filename, boolean readLocally) throws IOException {
    FileContext fc;
    if (readLocally) {
        // read locally - program is run without MR
        fc = FileContext.getLocalFSFileContext();
    } else {
        // use default file system
        fc = FileContext.getFileContext();
    }
    DataInputStream in = null;
    try {
        in = fc.open(new Path(filename));
    } catch (IOException e) {
        System.err.println("Unable to open scriptFile: " + filename);
        System.exit(-1);
    }
    InputStreamReader inr = new InputStreamReader(in);
    BufferedReader br = new BufferedReader(inr);
    ArrayList<Long> duration = new ArrayList<Long>();
    ArrayList<Double> readProb = new ArrayList<Double>();
    ArrayList<Double> writeProb = new ArrayList<Double>();
    int lineNum = 0;
    String line;
    try {
        while ((line = br.readLine()) != null) {
            lineNum++;
            if (// skip comments and blanks
            line.startsWith("#") || line.isEmpty())
                continue;
            parseScriptLine(line, duration, readProb, writeProb);
        }
    } catch (IllegalArgumentException e) {
        System.err.println("Line: " + lineNum + ", " + e.getMessage());
        return -1;
    } finally {
        IOUtils.cleanup(LOG, br);
    }
    // Copy vectors to arrays of values, to avoid autoboxing overhead later
    durations = new long[duration.size()];
    readProbs = new double[readProb.size()];
    writeProbs = new double[writeProb.size()];
    for (int i = 0; i < durations.length; i++) {
        durations[i] = duration.get(i);
        readProbs[i] = readProb.get(i);
        writeProbs[i] = writeProb.get(i);
    }
    if (durations[0] == 0)
        System.err.println("Initial duration set to 0.  " + "Will loop until stopped manually.");
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) InputStreamReader(java.io.InputStreamReader) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) BufferedReader(java.io.BufferedReader) FileContext(org.apache.hadoop.fs.FileContext)

Example 63 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class ViewFsTestSetup method tearDownForViewFsLocalFs.

/**
   * 
   * delete the test directory in the target local fs
   */
public static void tearDownForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
    FileContext fclocal = FileContext.getLocalFSFileContext();
    Path targetOfTests = helper.getTestRootPath(fclocal);
    fclocal.delete(targetOfTests, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileContext(org.apache.hadoop.fs.FileContext)

Example 64 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class ViewFsTestSetup method setupForViewFsLocalFs.

/* 
   * return the ViewFS File context to be used for tests
   */
public static FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
    /**
     * create the test root on local_fs - the  mount table will point here
     */
    FileContext fsTarget = FileContext.getLocalFSFileContext();
    Path targetOfTests = helper.getTestRootPath(fsTarget);
    // In case previous test was killed before cleanup
    fsTarget.delete(targetOfTests, true);
    fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
    Configuration conf = new Configuration();
    // Set up viewfs link for test dir as described above
    String testDir = helper.getTestRootPath(fsTarget).toUri().getPath();
    linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
    // Set up viewfs link for home dir as described above
    setUpHomeDir(conf, fsTarget);
    // the test path may be relative to working dir - we need to make that work:
    // Set up viewfs link for wd as described above
    String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
    linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
    FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
    // in case testdir relative to wd.
    fc.setWorkingDirectory(new Path(wdDir));
    Log.getLog().info("Working dir is: " + fc.getWorkingDirectory());
    //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
    return fc;
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileContext(org.apache.hadoop.fs.FileContext)

Example 65 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestJobHistoryParsing method testDiagnosticsForKilledJob.

@Test(timeout = 60000)
public void testDiagnosticsForKilledJob() throws Exception {
    LOG.info("STARTING testDiagnosticsForKilledJob");
    try {
        final Configuration conf = new Configuration();
        conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
        RackResolver.init(conf);
        MRApp app = new MRAppWithHistoryWithJobKilled(2, 1, true, this.getClass().getName(), true);
        app.submit(conf);
        Job job = app.getContext().getAllJobs().values().iterator().next();
        JobId jobId = job.getID();
        app.waitForState(job, JobState.KILLED);
        // make sure all events are flushed
        app.waitForState(Service.STATE.STOPPED);
        JobHistory jobHistory = new JobHistory();
        jobHistory.init(conf);
        HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
        JobHistoryParser parser;
        JobInfo jobInfo;
        synchronized (fileInfo) {
            Path historyFilePath = fileInfo.getHistoryFile();
            FSDataInputStream in = null;
            FileContext fc = null;
            try {
                fc = FileContext.getFileContext(conf);
                in = fc.open(fc.makeQualified(historyFilePath));
            } catch (IOException ioe) {
                LOG.info("Can not open history file: " + historyFilePath, ioe);
                throw (new Exception("Can not open History File"));
            }
            parser = new JobHistoryParser(in);
            jobInfo = parser.parse();
        }
        Exception parseException = parser.getParseException();
        assertNull("Caught an expected exception " + parseException, parseException);
        final List<String> originalDiagnostics = job.getDiagnostics();
        final String historyError = jobInfo.getErrorInfo();
        assertTrue("No original diagnostics for a failed job", originalDiagnostics != null && !originalDiagnostics.isEmpty());
        assertNotNull("No history error info for a failed job ", historyError);
        for (String diagString : originalDiagnostics) {
            assertTrue(historyError.contains(diagString));
        }
        assertTrue("No killed message in diagnostics", historyError.contains(JobImpl.JOB_KILLED_DIAG));
    } finally {
        LOG.info("FINISHED testDiagnosticsForKilledJob");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) IOException(java.io.IOException) JobHistoryParser(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser) JobInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) FileContext(org.apache.hadoop.fs.FileContext) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Aggregations

FileContext (org.apache.hadoop.fs.FileContext)84 Path (org.apache.hadoop.fs.Path)71 Test (org.junit.Test)34 Configuration (org.apache.hadoop.conf.Configuration)33 IOException (java.io.IOException)29 File (java.io.File)16 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)14 FileStatus (org.apache.hadoop.fs.FileStatus)13 HashMap (java.util.HashMap)12 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 ArrayList (java.util.ArrayList)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)8 ExecutorService (java.util.concurrent.ExecutorService)7 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)7 URISyntaxException (java.net.URISyntaxException)6 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)6 ExecutionException (java.util.concurrent.ExecutionException)6 Future (java.util.concurrent.Future)6 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)6