Search in sources :

Example 41 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class HadoopClientProtocolMultipleServersSelfTest method beforeJob.

/**
     * @throws Exception If failed.
     */
private void beforeJob() throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.clear();
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("word");
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) BufferedWriter(java.io.BufferedWriter)

Example 42 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class HadoopTaskExecutionSelfTest method prepareFile.

/**
     * @param fileName File name.
     * @param lineCnt Line count.
     * @throws Exception If failed.
     */
private void prepareFile(String fileName, int lineCnt) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(igfsName);
    try (OutputStream os = igfs.create(new IgfsPath(fileName), true)) {
        PrintWriter w = new PrintWriter(new OutputStreamWriter(os));
        for (int i = 0; i < lineCnt; i++) w.print("Hello, Hadoop map-reduce!\n");
        w.flush();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) OutputStream(java.io.OutputStream) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) PrintWriter(java.io.PrintWriter)

Example 43 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class HadoopClientProtocolSelfTest method dumpIgfs.

/**
     * Dump IGFS content.
     *
     * @param igfs IGFS.
     * @param path Path.
     * @throws Exception If failed.
     */
@SuppressWarnings("ConstantConditions")
private static void dumpIgfs(IgniteFileSystem igfs, IgfsPath path) throws Exception {
    IgfsFile file = igfs.info(path);
    assert file != null;
    System.out.println(file.path());
    if (file.isDirectory()) {
        for (IgfsPath child : igfs.listPaths(path)) dumpIgfs(igfs, child);
    } else {
        try (BufferedReader br = new BufferedReader(new InputStreamReader(igfs.open(path)))) {
            String line = br.readLine();
            while (line != null) {
                System.out.println(line);
                line = br.readLine();
            }
        }
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) InputStreamReader(java.io.InputStreamReader) BufferedReader(java.io.BufferedReader) IgfsFile(org.apache.ignite.igfs.IgfsFile)

Example 44 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class HadoopClientProtocolSelfTest method testJobCounters.

/**
     * Tests job counters retrieval.
     *
     * @throws Exception If failed.
     */
public void testJobCounters() throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestCountingMapper.class);
        job.setReducerClass(TestCountingReducer.class);
        job.setCombinerClass(TestCountingCombiner.class);
        FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
        job.submit();
        final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
        assertEquals(0, cntr.getValue());
        cntr.increment(10);
        assertEquals(10, cntr.getValue());
        // Transferring to map phase.
        setupLockFile.delete();
        // Transferring to reduce phase.
        mapLockFile.delete();
        job.waitForCompletion(false);
        assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
        final Counters counters = job.getCounters();
        assertNotNull("counters cannot be null", counters);
        assertEquals("wrong counters count", 3, counters.countCounters());
        assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
    } catch (Throwable t) {
        log.error("Unexpected exception", t);
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Counter(org.apache.hadoop.mapreduce.Counter) Configuration(org.apache.hadoop.conf.Configuration) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) Counters(org.apache.hadoop.mapreduce.Counters) Job(org.apache.hadoop.mapreduce.Job) BufferedWriter(java.io.BufferedWriter)

Example 45 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class HadoopClientProtocolSelfTest method checkJobSubmit.

/**
     * Test job submission.
     *
     * @param noCombiners Whether there are no combiners.
     * @param noReducers Whether there are no reducers.
     * @throws Exception If failed.
     */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("word");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setJobName(JOB_NAME);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestMapper.class);
        job.setReducerClass(TestReducer.class);
        if (!noCombiners)
            job.setCombinerClass(TestCombiner.class);
        if (noReducers)
            job.setNumReduceTasks(0);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TestOutputFormat.class);
        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
        job.submit();
        JobID jobId = job.getJobID();
        // Setup phase.
        JobStatus jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
        assert jobStatus.getMapProgress() == 0.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        JobStatus recentJobStatus = job.getStatus();
        assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
        // Transferring to map phase.
        setupLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getSetupProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        // Map phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        recentJobStatus = job.getStatus();
        assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
        // Transferring to reduce phase.
        mapLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getMapProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        if (!noReducers) {
            // Reduce phase.
            jobStatus = job.getStatus();
            checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
            assert jobStatus.getSetupProgress() == 1.0f;
            assert jobStatus.getMapProgress() == 1.0f;
            assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
            // Ensure that reduces progress increases.
            U.sleep(2100);
            recentJobStatus = job.getStatus();
            assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
            reduceLockFile.delete();
        }
        job.waitForCompletion(false);
        jobStatus = job.getStatus();
        checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() == 1.0f;
        dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) IgfsPath(org.apache.ignite.igfs.IgfsPath) JobStatus(org.apache.hadoop.mapreduce.JobStatus) OutputStreamWriter(java.io.OutputStreamWriter) Job(org.apache.hadoop.mapreduce.Job) JobID(org.apache.hadoop.mapreduce.JobID)

Aggregations

IgfsPath (org.apache.ignite.igfs.IgfsPath)161 IgfsOutputStream (org.apache.ignite.igfs.IgfsOutputStream)23 IOException (java.io.IOException)22 ArrayList (java.util.ArrayList)15 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)14 HashMap (java.util.HashMap)13 IgniteException (org.apache.ignite.IgniteException)13 IgniteFileSystem (org.apache.ignite.IgniteFileSystem)13 IgfsFile (org.apache.ignite.igfs.IgfsFile)13 IgfsException (org.apache.ignite.igfs.IgfsException)12 IgniteUuid (org.apache.ignite.lang.IgniteUuid)11 IgfsBlockLocation (org.apache.ignite.igfs.IgfsBlockLocation)10 IgfsInputStream (org.apache.ignite.igfs.IgfsInputStream)10 Map (java.util.Map)9 Path (org.apache.hadoop.fs.Path)9 IgfsPathNotFoundException (org.apache.ignite.igfs.IgfsPathNotFoundException)9 FileNotFoundException (java.io.FileNotFoundException)6 OutputStream (java.io.OutputStream)6 IgfsDirectoryNotEmptyException (org.apache.ignite.igfs.IgfsDirectoryNotEmptyException)6 IgfsParentNotDirectoryException (org.apache.ignite.igfs.IgfsParentNotDirectoryException)6