Search in sources :

Example 11 with IgniteFileSystem

use of org.apache.ignite.IgniteFileSystem in project ignite by apache.

the class HadoopClientProtocolSelfTest method testJobCounters.

/**
 * Tests job counters retrieval.
 *
 * @throws Exception If failed.
 */
public void testJobCounters() throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestCountingMapper.class);
        job.setReducerClass(TestCountingReducer.class);
        job.setCombinerClass(TestCountingCombiner.class);
        FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
        job.submit();
        final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
        assertEquals(0, cntr.getValue());
        cntr.increment(10);
        assertEquals(10, cntr.getValue());
        // Transferring to map phase.
        setupLockFile.delete();
        // Transferring to reduce phase.
        mapLockFile.delete();
        job.waitForCompletion(false);
        assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
        final Counters counters = job.getCounters();
        assertNotNull("counters cannot be null", counters);
        assertEquals("wrong counters count", 3, counters.countCounters());
        assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
    } catch (Throwable t) {
        log.error("Unexpected exception", t);
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Counter(org.apache.hadoop.mapreduce.Counter) Configuration(org.apache.hadoop.conf.Configuration) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) Counters(org.apache.hadoop.mapreduce.Counters) Job(org.apache.hadoop.mapreduce.Job) BufferedWriter(java.io.BufferedWriter)

Example 12 with IgniteFileSystem

use of org.apache.ignite.IgniteFileSystem in project ignite by apache.

the class HadoopClientProtocolSelfTest method checkJobSubmit.

/**
 * Test job submission.
 *
 * @param noCombiners Whether there are no combiners.
 * @param noReducers Whether there are no reducers.
 * @throws Exception If failed.
 */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("word");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setJobName(JOB_NAME);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestMapper.class);
        job.setReducerClass(TestReducer.class);
        if (!noCombiners)
            job.setCombinerClass(TestCombiner.class);
        if (noReducers)
            job.setNumReduceTasks(0);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TestOutputFormat.class);
        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
        job.submit();
        JobID jobId = job.getJobID();
        // Setup phase.
        JobStatus jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
        assert jobStatus.getMapProgress() == 0.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        JobStatus recentJobStatus = job.getStatus();
        assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
        // Transferring to map phase.
        setupLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getSetupProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        // Map phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        recentJobStatus = job.getStatus();
        assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
        // Transferring to reduce phase.
        mapLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getMapProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        if (!noReducers) {
            // Reduce phase.
            jobStatus = job.getStatus();
            checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
            assert jobStatus.getSetupProgress() == 1.0f;
            assert jobStatus.getMapProgress() == 1.0f;
            assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
            // Ensure that reduces progress increases.
            U.sleep(2100);
            recentJobStatus = job.getStatus();
            assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
            reduceLockFile.delete();
        }
        job.waitForCompletion(false);
        jobStatus = job.getStatus();
        checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() == 1.0f;
        dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) IgfsPath(org.apache.ignite.igfs.IgfsPath) JobStatus(org.apache.hadoop.mapreduce.JobStatus) OutputStreamWriter(java.io.OutputStreamWriter) Job(org.apache.hadoop.mapreduce.Job) JobID(org.apache.hadoop.mapreduce.JobID)

Example 13 with IgniteFileSystem

use of org.apache.ignite.IgniteFileSystem in project ignite by apache.

the class HadoopTaskExecutionSelfTest method prepareFile.

/**
 * @param fileName File name.
 * @param lineCnt Line count.
 * @throws Exception If failed.
 */
private void prepareFile(String fileName, int lineCnt) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(igfsName);
    try (OutputStream os = igfs.create(new IgfsPath(fileName), true)) {
        PrintWriter w = new PrintWriter(new OutputStreamWriter(os));
        for (int i = 0; i < lineCnt; i++) w.print("Hello, Hadoop map-reduce!\n");
        w.flush();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) OutputStream(java.io.OutputStream) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) PrintWriter(java.io.PrintWriter)

Example 14 with IgniteFileSystem

use of org.apache.ignite.IgniteFileSystem in project ignite by apache.

the class IgniteKernal method fileSystem.

/**
 * {@inheritDoc}
 */
@Override
public IgniteFileSystem fileSystem(String name) {
    if (name == null)
        throw new IllegalArgumentException("IGFS name cannot be null");
    guard();
    try {
        checkClusterState();
        IgniteFileSystem fs = ctx.igfs().igfs(name);
        if (fs == null)
            throw new IllegalArgumentException("IGFS is not configured: " + name);
        return fs;
    } finally {
        unguard();
    }
}
Also used : IgniteFileSystem(org.apache.ignite.IgniteFileSystem) CacheConfigurationOverride(org.apache.ignite.internal.processors.cache.CacheConfigurationOverride)

Example 15 with IgniteFileSystem

use of org.apache.ignite.IgniteFileSystem in project ignite by apache.

the class IgfsStartCacheTest method testCacheStart.

/**
 * @throws Exception If failed.
 */
public void testCacheStart() throws Exception {
    Ignite g0 = G.start(config(true, 0));
    String dataCacheName = ((IgniteEx) g0).igfsx("igfs").configuration().getDataCacheConfiguration().getName();
    String metaCacheName = ((IgniteEx) g0).igfsx("igfs").configuration().getMetaCacheConfiguration().getName();
    checkIgfsCaches(g0, dataCacheName, metaCacheName);
    Ignite g1 = G.start(config(false, 1));
    checkIgfsCaches(g1, dataCacheName, metaCacheName);
    IgniteFileSystem igfs = g0.fileSystem("igfs");
    igfs.mkdirs(new IgfsPath("/test"));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath("/test/test.file"), true)))) {
        for (int i = 0; i < 1000; i++) bw.write("test-" + i);
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Ignite(org.apache.ignite.Ignite) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) BufferedWriter(java.io.BufferedWriter)

Aggregations

IgniteFileSystem (org.apache.ignite.IgniteFileSystem)26 IgfsPath (org.apache.ignite.igfs.IgfsPath)14 OutputStreamWriter (java.io.OutputStreamWriter)6 Ignite (org.apache.ignite.Ignite)6 Path (org.apache.hadoop.fs.Path)5 BufferedWriter (java.io.BufferedWriter)4 IgfsInputStream (org.apache.ignite.igfs.IgfsInputStream)4 IgfsOutputStream (org.apache.ignite.igfs.IgfsOutputStream)4 IOException (java.io.IOException)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 IgniteException (org.apache.ignite.IgniteException)3 IgfsBlockLocation (org.apache.ignite.igfs.IgfsBlockLocation)3 IgfsFile (org.apache.ignite.igfs.IgfsFile)3 IgfsMetrics (org.apache.ignite.igfs.IgfsMetrics)3 PrintWriter (java.io.PrintWriter)2 Configuration (org.apache.hadoop.conf.Configuration)2 CreateFlag (org.apache.hadoop.fs.CreateFlag)2 Job (org.apache.hadoop.mapreduce.Job)2 IgniteLogger (org.apache.ignite.IgniteLogger)2 IgfsProcessorAdapter (org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter)2