use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopClientProtocolMultipleServersSelfTest method beforeJob.
/**
* @throws Exception If failed.
*/
private void beforeJob() throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.clear();
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("word");
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopClientProtocolSelfTest method testJobCounters.
/**
* Tests job counters retrieval.
*
* @throws Exception If failed.
*/
public void testJobCounters() throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestCountingMapper.class);
job.setReducerClass(TestCountingReducer.class);
job.setCombinerClass(TestCountingCombiner.class);
FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
job.submit();
final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
assertEquals(0, cntr.getValue());
cntr.increment(10);
assertEquals(10, cntr.getValue());
// Transferring to map phase.
setupLockFile.delete();
// Transferring to reduce phase.
mapLockFile.delete();
job.waitForCompletion(false);
assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
final Counters counters = job.getCounters();
assertNotNull("counters cannot be null", counters);
assertEquals("wrong counters count", 3, counters.countCounters());
assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
} catch (Throwable t) {
log.error("Unexpected exception", t);
} finally {
job.getCluster().close();
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopClientProtocolSelfTest method checkJobSubmit.
/**
* Test job submission.
*
* @param noCombiners Whether there are no combiners.
* @param noReducers Whether there are no reducers.
* @throws Exception If failed.
*/
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("word");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setJobName(JOB_NAME);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
if (!noCombiners)
job.setCombinerClass(TestCombiner.class);
if (noReducers)
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TestOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
job.submit();
JobID jobId = job.getJobID();
// Setup phase.
JobStatus jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
assert jobStatus.getMapProgress() == 0.0f;
assert jobStatus.getReduceProgress() == 0.0f;
U.sleep(2100);
JobStatus recentJobStatus = job.getStatus();
assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
// Transferring to map phase.
setupLockFile.delete();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return F.eq(1.0f, job.getStatus().getSetupProgress());
} catch (Exception e) {
throw new RuntimeException("Unexpected exception.", e);
}
}
}, 5000L);
// Map phase.
jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
assert jobStatus.getReduceProgress() == 0.0f;
U.sleep(2100);
recentJobStatus = job.getStatus();
assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
// Transferring to reduce phase.
mapLockFile.delete();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return F.eq(1.0f, job.getStatus().getMapProgress());
} catch (Exception e) {
throw new RuntimeException("Unexpected exception.", e);
}
}
}, 5000L);
if (!noReducers) {
// Reduce phase.
jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() == 1.0f;
assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
// Ensure that reduces progress increases.
U.sleep(2100);
recentJobStatus = job.getStatus();
assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
reduceLockFile.delete();
}
job.waitForCompletion(false);
jobStatus = job.getStatus();
checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() == 1.0f;
assert jobStatus.getReduceProgress() == 1.0f;
dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
} finally {
job.getCluster().close();
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopClientProtocolSelfTest method dumpIgfs.
/**
* Dump IGFS content.
*
* @param igfs IGFS.
* @param path Path.
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
private static void dumpIgfs(IgniteFileSystem igfs, IgfsPath path) throws Exception {
IgfsFile file = igfs.info(path);
assert file != null;
System.out.println(file.path());
if (file.isDirectory()) {
for (IgfsPath child : igfs.listPaths(path)) dumpIgfs(igfs, child);
} else {
try (BufferedReader br = new BufferedReader(new InputStreamReader(igfs.open(path)))) {
String line = br.readLine();
while (line != null) {
System.out.println(line);
line = br.readLine();
}
}
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopFileSystem method open.
/**
* {@inheritDoc}
*/
@Override
public FSDataInputStream open(Path f, int bufSize) throws IOException {
A.notNull(f, "f");
enterBusy();
try {
IgfsPath path = convert(f);
HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
long logId = -1;
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logOpen(logId, path, bufSize, stream.length());
}
if (LOG.isDebugEnabled())
LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId);
if (LOG.isDebugEnabled())
LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
return new FSDataInputStream(igfsIn);
} finally {
leaveBusy();
}
}
Aggregations