use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopMapReduceTest method testWholeMapReduceExecution.
/**
* Tests whole job execution with all phases in all combination of new and old versions of API.
* @throws Exception If fails.
*/
public void testWholeMapReduceExecution() throws Exception {
IgfsPath inDir = new IgfsPath(PATH_INPUT);
igfs.mkdirs(inDir);
IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
generateTestFile(inFile.toString(), "red", red, "blue", blue, "green", green, "yellow", yellow);
for (boolean[] apiMode : getApiModes()) {
assert apiMode.length == 3;
boolean useNewMapper = apiMode[0];
boolean useNewCombiner = apiMode[1];
boolean useNewReducer = apiMode[2];
doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopCommandLineTest method testHadoopCommandLine.
/**
* Tests Hadoop command line integration.
*/
public void testHadoopCommandLine() throws Exception {
assertEquals(0, executeHadoopCmd("fs", "-ls", "/"));
assertEquals(0, executeHadoopCmd("fs", "-mkdir", "/input"));
assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "test-data").getAbsolutePath(), "/input"));
assertTrue(igfs.exists(new IgfsPath("/input/test-data")));
assertEquals(0, executeHadoopCmd("jar", examplesJar.getAbsolutePath(), "wordcount", "/input", "/output"));
IgfsPath path = new IgfsPath("/user/" + System.getProperty("user.name") + "/");
assertTrue(igfs.exists(path));
IgfsPath jobStatPath = null;
for (IgfsPath jobPath : igfs.listPaths(path)) {
assertNull(jobStatPath);
jobStatPath = jobPath;
}
File locStatFile = new File(testWorkDir, "performance");
assertEquals(0, executeHadoopCmd("fs", "-get", jobStatPath.toString() + "/performance", locStatFile.toString()));
long evtCnt = HadoopTestUtils.simpleCheckJobStatFile(new BufferedReader(new FileReader(locStatFile)));
//It's the minimum amount of events for job with combiner.
assertTrue(evtCnt >= 22);
assertTrue(igfs.exists(new IgfsPath("/output")));
BufferedReader in = new BufferedReader(new InputStreamReader(igfs.open(new IgfsPath("/output/part-r-00000"))));
List<String> res = new ArrayList<>();
String line;
while ((line = in.readLine()) != null) res.add(line);
Collections.sort(res);
assertEquals("[blue\t150, green\t200, red\t100, yellow\t50]", res.toString());
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class HadoopTasksAllVersionsTest method testMapTask.
/**
* Tests map task execution.
*
* @throws Exception If fails.
*/
@SuppressWarnings("ConstantConditions")
public void testMapTask() throws Exception {
IgfsPath inDir = new IgfsPath(PATH_INPUT);
igfs.mkdirs(inDir);
IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
URI inFileUri = URI.create(igfsScheme() + inFile.toString());
try (PrintWriter pw = new PrintWriter(igfs.create(inFile, true))) {
pw.println("hello0 world0");
pw.println("world1 hello1");
}
HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, igfs.info(inFile).length() - 1);
try (PrintWriter pw = new PrintWriter(igfs.append(inFile, false))) {
pw.println("hello2 world2");
pw.println("world3 hello3");
}
HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, fileBlock1.length(), igfs.info(inFile).length() - fileBlock1.length());
HadoopJobEx gridJob = getHadoopJob(igfsScheme() + inFile.toString(), igfsScheme() + PATH_OUTPUT);
HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock1);
HadoopTestTaskContext ctx = new HadoopTestTaskContext(taskInfo, gridJob);
ctx.mockOutput().clear();
ctx.run();
assertEquals("hello0,1; world0,1; world1,1; hello1,1", Joiner.on("; ").join(ctx.mockOutput()));
ctx.mockOutput().clear();
ctx.taskInfo(new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock2));
ctx.run();
assertEquals("hello2,1; world2,1; world3,1; hello3,1", Joiner.on("; ").join(ctx.mockOutput()));
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopFileSystemAbstractSelfTest method testZeroReplicationFactor.
/** @throws Exception If failed. */
public void testZeroReplicationFactor() throws Exception {
// This test doesn't make sense for any mode except of PRIMARY.
if (mode == PRIMARY) {
Path igfsHome = new Path(PRIMARY_URI);
Path file = new Path(igfsHome, "someFile");
try (FSDataOutputStream out = fs.create(file, (short) 0)) {
out.write(new byte[1024 * 1024]);
}
IgniteFileSystem igfs = grid(0).fileSystem("igfs");
IgfsPath filePath = new IgfsPath("/someFile");
IgfsFile fileInfo = igfs.info(filePath);
awaitPartitionMapExchange();
Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length());
assertEquals(1, locations.size());
IgfsBlockLocation location = F.first(locations);
assertEquals(1, location.nodeIds().size());
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopFileSystemLoggerSelfTest method testLogMisc.
/**
* Test miscellaneous operations logging.
*
* @throws Exception If failed.
*/
@SuppressWarnings("TooBroadScope")
public void testLogMisc() throws Exception {
IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
String newFile = "/dir3/file.test";
String file1 = "/dir3/file1.test";
String file2 = "/dir3/file1.test";
log.logMakeDirectory(PATH);
log.logRename(PATH, new IgfsPath(newFile));
log.logListDirectory(PATH, new String[] { file1, file2 });
log.logDelete(PATH, false);
log.close();
checkLog(new SB().a(U.jvmPid() + d() + TYPE_DIR_MAKE + d() + PATH_STR_ESCAPED + d() + d(17)).toString(), new SB().a(U.jvmPid() + d() + TYPE_RENAME + d() + PATH_STR_ESCAPED + d() + d(15) + newFile + d(2)).toString(), new SB().a(U.jvmPid() + d() + TYPE_DIR_LIST + d() + PATH_STR_ESCAPED + d() + d(17) + file1 + DELIM_FIELD_VAL + file2).toString(), new SB().a(U.jvmPid() + d() + TYPE_DELETE + d(1) + PATH_STR_ESCAPED + d() + d(16) + 0 + d()).toString());
}
Aggregations