use of org.apache.hadoop.fs.RawLocalFileSystem in project hadoop by apache.
the class TestPipeApplication method testRunner.
/**
* test PipesMapRunner test the transfer data from reader
*
* @throws Exception
*/
@Test
public void testRunner() throws Exception {
// clean old password files
File[] psw = cleanTokenPasswordFile();
try {
RecordReader<FloatWritable, NullWritable> rReader = new ReaderPipesMapRunner();
JobConf conf = new JobConf();
conf.set(Submitter.IS_JAVA_RR, "true");
// for stdour and stderror
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
CombineOutputCollector<IntWritable, Text> output = new CombineOutputCollector<IntWritable, Text>(new Counters.Counter(), new Progress());
FileSystem fs = new RawLocalFileSystem();
fs.initialize(FsConstants.LOCAL_FS_URI, conf);
Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(new Path(workSpace + File.separator + "outfile")), IntWritable.class, Text.class, null, null, true);
output.setWriter(wr);
// stub for client
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
// token for authorization
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>("user".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"));
TokenCache.setJobToken(token, conf.getCredentials());
conf.setBoolean(MRJobConfig.SKIP_RECORDS, true);
TestTaskReporter reporter = new TestTaskReporter();
PipesMapRunner<FloatWritable, NullWritable, IntWritable, Text> runner = new PipesMapRunner<FloatWritable, NullWritable, IntWritable, Text>();
initStdOut(conf);
runner.configure(conf);
runner.run(rReader, output, reporter);
String stdOut = readStdOut(conf);
// test part of translated data. As common file for client and test -
// clients stdOut
// check version
assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
// check key and value classes
assertTrue(stdOut.contains("Key class:org.apache.hadoop.io.FloatWritable"));
assertTrue(stdOut.contains("Value class:org.apache.hadoop.io.NullWritable"));
// test have sent all data from reader
assertTrue(stdOut.contains("value:0.0"));
assertTrue(stdOut.contains("value:9.0"));
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project accumulo by apache.
the class HadoopLogCloser method close.
@Override
public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) throws IOException {
FileSystem ns = fs.getVolumeByPath(source).getFileSystem();
// if path points to a viewfs path, then resolve to underlying filesystem
if (ViewFSUtils.isViewFS(ns)) {
Path newSource = ns.resolvePath(source);
if (!newSource.equals(source) && newSource.toUri().getScheme() != null) {
ns = newSource.getFileSystem(CachedConfiguration.getInstance());
source = newSource;
}
}
if (ns instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) ns;
try {
if (!dfs.recoverLease(source)) {
log.info("Waiting for file to be closed {}", source.toString());
return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD);
}
log.info("Recovered lease on {}", source.toString());
} catch (FileNotFoundException ex) {
throw ex;
} catch (Exception ex) {
log.warn("Error recovering lease on " + source.toString(), ex);
ns.append(source).close();
log.info("Recovered lease on {} using append", source.toString());
}
} else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
// ignore
} else {
throw new IllegalStateException("Don't know how to recover a lease for " + ns.getClass().getName());
}
return 0;
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project incubator-gobblin by apache.
the class HadoopUtils method copyPath.
private static void copyPath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean deleteSource, boolean overwrite, Configuration conf) throws IOException {
Preconditions.checkArgument(srcFs.exists(src), String.format("Cannot copy from %s to %s because src does not exist", src, dst));
Preconditions.checkArgument(overwrite || !dstFs.exists(dst), String.format("Cannot copy from %s to %s because dst exists", src, dst));
try {
boolean isSourceFileSystemLocal = srcFs instanceof LocalFileSystem || srcFs instanceof RawLocalFileSystem;
if (isSourceFileSystemLocal) {
try {
dstFs.copyFromLocalFile(deleteSource, overwrite, src, dst);
} catch (IOException e) {
throw new IOException(String.format("Failed to copy %s to %s", src, dst), e);
}
} else if (!FileUtil.copy(srcFs, src, dstFs, dst, deleteSource, overwrite, conf)) {
throw new IOException(String.format("Failed to copy %s to %s", src, dst));
}
} catch (Throwable t1) {
try {
deleteIfExists(dstFs, dst, true);
} catch (Throwable t2) {
// Do nothing
}
throw t1;
}
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project elephant-bird by twitter.
the class TestLzoTextOutputFormat method setUp.
@Before
public void setUp() throws Exception {
outputDir_ = new Path(System.getProperty("test.build.data", "data"), "outputDir");
conf_ = new Configuration();
conf_.setBoolean(LzoUtils.LZO_OUTPUT_INDEX, true);
lfs_ = new RawLocalFileSystem();
lfs_.initialize(URI.create("file:///"), conf_);
// purge fs cache
FileSystem.closeAll();
}
use of org.apache.hadoop.fs.RawLocalFileSystem in project incubator-gobblin by apache.
the class GobblinYarnAppLauncher method buildLogCopier.
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());
LogCopier.Builder builder = LogCopier.newBuilder().useSrcFileSystem(this.fs).useDestFileSystem(rawLocalFs).readFrom(getHdfsLogDir(appWorkDir)).writeTo(sinkLogDir).acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) {
builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE));
}
if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) {
builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER));
}
return builder.build();
}
Aggregations