use of org.apache.hadoop.fs.RawLocalFileSystem in project elasticsearch by elastic.
the class TestingFs method wrap.
// wrap hadoop rawlocalfilesystem to behave less crazy
static RawLocalFileSystem wrap(final Path base) {
final FileSystemProvider baseProvider = base.getFileSystem().provider();
return new RawLocalFileSystem() {
private org.apache.hadoop.fs.Path box(Path path) {
return new org.apache.hadoop.fs.Path(path.toUri());
}
private Path unbox(org.apache.hadoop.fs.Path path) {
return baseProvider.getPath(path.toUri());
}
@Override
protected org.apache.hadoop.fs.Path getInitialWorkingDirectory() {
return box(base);
}
@Override
public void setPermission(org.apache.hadoop.fs.Path path, FsPermission permission) {
// no execution, thank you very much!
}
// pretend we don't support symlinks (which causes hadoop to want to do crazy things),
// returning the boolean does not seem to really help, link-related operations are still called.
@Override
public boolean supportsSymlinks() {
return false;
}
@Override
public FileStatus getFileLinkStatus(org.apache.hadoop.fs.Path path) throws IOException {
return getFileStatus(path);
}
@Override
public org.apache.hadoop.fs.Path getLinkTarget(org.apache.hadoop.fs.Path path) throws IOException {
return path;
}
@Override
public FileStatus getFileStatus(org.apache.hadoop.fs.Path path) throws IOException {
BasicFileAttributes attributes;
try {
attributes = Files.readAttributes(unbox(path), BasicFileAttributes.class);
} catch (NoSuchFileException e) {
// unfortunately, specific exceptions are not guaranteed. don't wrap hadoop over a zip filesystem or something.
FileNotFoundException fnfe = new FileNotFoundException("File " + path + " does not exist");
fnfe.initCause(e);
throw fnfe;
}
// we set similar values to raw local filesystem, except we are never a symlink
long length = attributes.size();
boolean isDir = attributes.isDirectory();
int blockReplication = 1;
long blockSize = getDefaultBlockSize(path);
long modificationTime = attributes.creationTime().toMillis();
return new FileStatus(length, isDir, blockReplication, blockSize, modificationTime, path);
}
};
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project hadoop by apache.
the class TestFileSystemApplicationHistoryStore method testInitExistingWorkingDirectoryInSafeMode.
@Test
public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
tearDown();
// Setup file system to inject startup conditions
FileSystem fs = spy(new RawLocalFileSystem());
FileStatus fileStatus = Mockito.mock(FileStatus.class);
doReturn(true).when(fileStatus).isDirectory();
doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
try {
initAndStartStore(fs);
} catch (Exception e) {
Assert.fail("Exception should not be thrown: " + e);
}
// Make sure that directory creation was not attempted
verify(fileStatus, never()).isDirectory();
verify(fs, times(1)).mkdirs(any(Path.class));
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project hadoop by apache.
the class TestFileSystemApplicationHistoryStore method setup.
@Before
public void setup() throws Exception {
fs = new RawLocalFileSystem();
initAndStartStore(fs);
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project hadoop by apache.
the class TestFileOutputCommitter method testConcurrentCommitTaskWithSubDir.
private void testConcurrentCommitTaskWithSubDir(int version) throws Exception {
final Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
final Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
conf.setClass("fs.file.impl", RLFS.class, FileSystem.class);
FileSystem.closeAll();
final JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
final FileOutputCommitter amCommitter = new FileOutputCommitter(outDir, jContext);
amCommitter.setupJob(jContext);
final TaskAttemptContext[] taCtx = new TaskAttemptContextImpl[2];
taCtx[0] = new TaskAttemptContextImpl(conf, taskID);
taCtx[1] = new TaskAttemptContextImpl(conf, taskID1);
final TextOutputFormat[] tof = new TextOutputFormat[2];
for (int i = 0; i < tof.length; i++) {
tof[i] = new TextOutputFormat() {
@Override
public Path getDefaultWorkFile(TaskAttemptContext context, String extension) throws IOException {
final FileOutputCommitter foc = (FileOutputCommitter) getOutputCommitter(context);
return new Path(new Path(foc.getWorkPath(), SUB_DIR), getUniqueFile(context, getOutputName(context), extension));
}
};
}
final ExecutorService executor = HadoopExecutors.newFixedThreadPool(2);
try {
for (int i = 0; i < taCtx.length; i++) {
final int taskIdx = i;
executor.submit(new Callable<Void>() {
@Override
public Void call() throws IOException, InterruptedException {
final OutputCommitter outputCommitter = tof[taskIdx].getOutputCommitter(taCtx[taskIdx]);
outputCommitter.setupTask(taCtx[taskIdx]);
final RecordWriter rw = tof[taskIdx].getRecordWriter(taCtx[taskIdx]);
writeOutput(rw, taCtx[taskIdx]);
outputCommitter.commitTask(taCtx[taskIdx]);
return null;
}
});
}
} finally {
executor.shutdown();
while (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
LOG.info("Awaiting thread termination!");
}
}
amCommitter.commitJob(jContext);
final RawLocalFileSystem lfs = new RawLocalFileSystem();
lfs.setConf(conf);
assertFalse("Must not end up with sub_dir/sub_dir", lfs.exists(new Path(OUT_SUB_DIR, SUB_DIR)));
// validate output
validateContent(OUT_SUB_DIR);
FileUtil.fullyDelete(new File(outDir.toString()));
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project hadoop by apache.
the class TestPipeApplication method testApplication.
/**
* test org.apache.hadoop.mapred.pipes.Application
* test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS...
*
* @throws Throwable
*/
@Test
public void testApplication() throws Throwable {
JobConf conf = new JobConf();
RecordReader<FloatWritable, NullWritable> rReader = new Reader();
// client for test
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub");
TestTaskReporter reporter = new TestTaskReporter();
File[] psw = cleanTokenPasswordFile();
try {
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
// token for authorization
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>("user".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"));
TokenCache.setJobToken(token, conf.getCredentials());
FakeCollector output = new FakeCollector(new Counters.Counter(), new Progress());
FileSystem fs = new RawLocalFileSystem();
fs.initialize(FsConstants.LOCAL_FS_URI, conf);
Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")), IntWritable.class, Text.class, null, null, true);
output.setWriter(wr);
conf.set(Submitter.PRESERVE_COMMANDFILE, "true");
initStdOut(conf);
Application<WritableComparable<IntWritable>, Writable, IntWritable, Text> application = new Application<WritableComparable<IntWritable>, Writable, IntWritable, Text>(conf, rReader, output, reporter, IntWritable.class, Text.class);
application.getDownlink().flush();
application.getDownlink().mapItem(new IntWritable(3), new Text("txt"));
application.getDownlink().flush();
application.waitForFinish();
wr.close();
// test getDownlink().mapItem();
String stdOut = readStdOut(conf);
assertTrue(stdOut.contains("key:3"));
assertTrue(stdOut.contains("value:txt"));
// reporter test counter, and status should be sended
// test MessageType.REGISTER_COUNTER and INCREMENT_COUNTER
assertEquals(1.0, reporter.getProgress(), 0.01);
assertNotNull(reporter.getCounter("group", "name"));
// test status MessageType.STATUS
assertEquals(reporter.getStatus(), "PROGRESS");
stdOut = readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile"));
// check MessageType.PROGRESS
assertEquals(0.55f, rReader.getProgress(), 0.001);
application.getDownlink().close();
// test MessageType.OUTPUT
Entry<IntWritable, Text> entry = output.getCollect().entrySet().iterator().next();
assertEquals(123, entry.getKey().get());
assertEquals("value", entry.getValue().toString());
try {
// try to abort
application.abort(new Throwable());
fail();
} catch (IOException e) {
// abort works ?
assertEquals("pipe child exception", e.getMessage());
}
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
Aggregations