use of org.apache.ignite.IgniteFileSystem in project ignite by apache.
the class HadoopClientProtocolSelfTest method testJobCounters.
/**
* Tests job counters retrieval.
*
* @throws Exception If failed.
*/
public void testJobCounters() throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestCountingMapper.class);
job.setReducerClass(TestCountingReducer.class);
job.setCombinerClass(TestCountingCombiner.class);
FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
job.submit();
final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
assertEquals(0, cntr.getValue());
cntr.increment(10);
assertEquals(10, cntr.getValue());
// Transferring to map phase.
setupLockFile.delete();
// Transferring to reduce phase.
mapLockFile.delete();
job.waitForCompletion(false);
assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
final Counters counters = job.getCounters();
assertNotNull("counters cannot be null", counters);
assertEquals("wrong counters count", 3, counters.countCounters());
assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
} catch (Throwable t) {
log.error("Unexpected exception", t);
} finally {
job.getCluster().close();
}
}
use of org.apache.ignite.IgniteFileSystem in project ignite by apache.
the class HadoopClientProtocolSelfTest method checkJobSubmit.
/**
* Test job submission.
*
* @param noCombiners Whether there are no combiners.
* @param noReducers Whether there are no reducers.
* @throws Exception If failed.
*/
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("word");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setJobName(JOB_NAME);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
if (!noCombiners)
job.setCombinerClass(TestCombiner.class);
if (noReducers)
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TestOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
job.submit();
JobID jobId = job.getJobID();
// Setup phase.
JobStatus jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
assert jobStatus.getMapProgress() == 0.0f;
assert jobStatus.getReduceProgress() == 0.0f;
U.sleep(2100);
JobStatus recentJobStatus = job.getStatus();
assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
// Transferring to map phase.
setupLockFile.delete();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return F.eq(1.0f, job.getStatus().getSetupProgress());
} catch (Exception e) {
throw new RuntimeException("Unexpected exception.", e);
}
}
}, 5000L);
// Map phase.
jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
assert jobStatus.getReduceProgress() == 0.0f;
U.sleep(2100);
recentJobStatus = job.getStatus();
assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
// Transferring to reduce phase.
mapLockFile.delete();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return F.eq(1.0f, job.getStatus().getMapProgress());
} catch (Exception e) {
throw new RuntimeException("Unexpected exception.", e);
}
}
}, 5000L);
if (!noReducers) {
// Reduce phase.
jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() == 1.0f;
assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
// Ensure that reduces progress increases.
U.sleep(2100);
recentJobStatus = job.getStatus();
assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
reduceLockFile.delete();
}
job.waitForCompletion(false);
jobStatus = job.getStatus();
checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() == 1.0f;
assert jobStatus.getReduceProgress() == 1.0f;
dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
} finally {
job.getCluster().close();
}
}
use of org.apache.ignite.IgniteFileSystem in project ignite by apache.
the class HadoopTaskExecutionSelfTest method prepareFile.
/**
* @param fileName File name.
* @param lineCnt Line count.
* @throws Exception If failed.
*/
private void prepareFile(String fileName, int lineCnt) throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(igfsName);
try (OutputStream os = igfs.create(new IgfsPath(fileName), true)) {
PrintWriter w = new PrintWriter(new OutputStreamWriter(os));
for (int i = 0; i < lineCnt; i++) w.print("Hello, Hadoop map-reduce!\n");
w.flush();
}
}
use of org.apache.ignite.IgniteFileSystem in project ignite by apache.
the class IgniteKernal method fileSystem.
/**
* {@inheritDoc}
*/
@Override
public IgniteFileSystem fileSystem(String name) {
if (name == null)
throw new IllegalArgumentException("IGFS name cannot be null");
guard();
try {
checkClusterState();
IgniteFileSystem fs = ctx.igfs().igfs(name);
if (fs == null)
throw new IllegalArgumentException("IGFS is not configured: " + name);
return fs;
} finally {
unguard();
}
}
use of org.apache.ignite.IgniteFileSystem in project ignite by apache.
the class IgfsStartCacheTest method testCacheStart.
/**
* @throws Exception If failed.
*/
public void testCacheStart() throws Exception {
Ignite g0 = G.start(config(true, 0));
String dataCacheName = ((IgniteEx) g0).igfsx("igfs").configuration().getDataCacheConfiguration().getName();
String metaCacheName = ((IgniteEx) g0).igfsx("igfs").configuration().getMetaCacheConfiguration().getName();
checkIgfsCaches(g0, dataCacheName, metaCacheName);
Ignite g1 = G.start(config(false, 1));
checkIgfsCaches(g1, dataCacheName, metaCacheName);
IgniteFileSystem igfs = g0.fileSystem("igfs");
igfs.mkdirs(new IgfsPath("/test"));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath("/test/test.file"), true)))) {
for (int i = 0; i < 1000; i++) bw.write("test-" + i);
}
}
Aggregations