use of org.apache.hadoop.fs.LocalFileSystem in project hbase by apache.
the class TestStore method testHandleErrorsInFlush.
@Test
public void testHandleErrorsInFlush() throws Exception {
LOG.info("Setting up a faulty file system that cannot write");
final Configuration conf = HBaseConfiguration.create();
User user = User.createUserForTesting(conf, "testhandleerrorsinflush", new String[] { "foo" });
// Inject our faulty LocalFileSystem
conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
user.runAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Make sure it worked (above is sensitive to caching details in hadoop core)
FileSystem fs = FileSystem.get(conf);
Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
// Initialize region
init(name.getMethodName(), conf);
LOG.info("Adding some data");
store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null);
store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null);
store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null);
LOG.info("Before flush, we should have no files");
Collection<StoreFileInfo> files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
Assert.assertEquals(0, files != null ? files.size() : 0);
//flush
try {
LOG.info("Flushing");
flush(1);
Assert.fail("Didn't bubble up IOE!");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getMessage().contains("Fault injected"));
}
LOG.info("After failed flush, we should still have no files!");
files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
Assert.assertEquals(0, files != null ? files.size() : 0);
store.getHRegion().getWAL().close();
return null;
}
});
FileSystem.closeAllForUGI(user.getUGI());
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class TestIFile method testIFileReaderWithCodec.
@Test
public /** Same as above but create a reader. */
void testIFileReaderWithCodec() throws Exception {
Configuration conf = new Configuration();
FileSystem localFs = FileSystem.getLocal(conf);
FileSystem rfs = ((LocalFileSystem) localFs).getRaw();
Path path = new Path(new Path("build/test.ifile"), "data");
DefaultCodec codec = new GzipCodec();
codec.setConf(conf);
FSDataOutputStream out = rfs.create(path);
IFile.Writer<Text, Text> writer = new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class, codec, null);
writer.close();
FSDataInputStream in = rfs.open(path);
IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(), codec, null);
reader.close();
// test check sum
byte[] ab = new byte[100];
int readed = reader.checksumIn.readWithChecksum(ab, 0, ab.length);
assertEquals(readed, reader.checksumIn.getChecksum().length);
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class TestHadoopArchives method testCopyToLocal.
@Test
public /*
* Tests copying from archive file system to a local file system
*/
void testCopyToLocal() throws Exception {
final String fullHarPathStr = makeArchive();
// make path to copy the file to:
final String tmpDir = System.getProperty("test.build.data", "build/test/data") + "/work-dir/har-fs-tmp";
final Path tmpPath = new Path(tmpDir);
final LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
localFs.delete(tmpPath, true);
localFs.mkdirs(tmpPath);
assertTrue(localFs.exists(tmpPath));
// Create fresh HarFs:
final HarFileSystem harFileSystem = new HarFileSystem(fs);
try {
final URI harUri = new URI(fullHarPathStr);
harFileSystem.initialize(harUri, fs.getConf());
final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a");
final Path targetPath = new Path(tmpPath, "straus");
// copy the Har file to a local file system:
harFileSystem.copyToLocalFile(false, sourcePath, targetPath);
FileStatus straus = localFs.getFileStatus(targetPath);
// the file should contain just 1 character:
assertEquals(1, straus.getLen());
} finally {
harFileSystem.close();
localFs.delete(tmpPath, true);
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class TestTaskProgressReporter method testBytesWrittenLimit.
/**
* This is to test the limit on BYTES_WRITTEN. The test is limited in that
* the check is done only once at the first loop of TaskReport#run.
* @param limit the limit on BYTES_WRITTEN in local file system
* @param failFast should the task fail fast with such limit?
* @throws Exception
*/
public void testBytesWrittenLimit(long limit, boolean failFast) throws Exception {
ExitUtil.disableSystemExit();
threadExited = false;
Thread.UncaughtExceptionHandler h = new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread th, Throwable ex) {
System.out.println("Uncaught exception: " + ex);
if (ex instanceof ExitUtil.ExitException) {
threadExited = true;
}
}
};
JobConf conf = new JobConf();
// To disable task reporter sleeping
conf.getLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 0);
conf.setLong(MRJobConfig.TASK_LOCAL_WRITE_LIMIT_BYTES, limit);
LocalFileSystem localFS = FileSystem.getLocal(conf);
Path tmpPath = new Path("/tmp/testBytesWrittenLimit-tmpFile-" + new Random(System.currentTimeMillis()).nextInt());
FSDataOutputStream out = localFS.create(tmpPath, true);
out.write(new byte[LOCAL_BYTES_WRITTEN]);
out.close();
Task task = new DummyTask();
task.setConf(conf);
DummyTaskReporter reporter = new DummyTaskReporter(task);
Thread t = new Thread(reporter);
t.setUncaughtExceptionHandler(h);
reporter.setProgressFlag();
t.start();
while (!reporter.taskLimitIsChecked) {
Thread.yield();
}
task.setTaskDone();
reporter.resetDoneFlag();
t.join();
Assert.assertEquals(failFast, threadExited);
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class JHEventHandlerForSigtermTest method testDefaultFsIsUsedForHistory.
@Test(timeout = 50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files", dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system", localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
purgeHdfsHistoryIntermediateDoneDirectory(conf);
}
}
Aggregations