Search in sources :

Example 1 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hbase by apache.

the class TestStore method testHandleErrorsInFlush.

@Test
public void testHandleErrorsInFlush() throws Exception {
    LOG.info("Setting up a faulty file system that cannot write");
    final Configuration conf = HBaseConfiguration.create();
    User user = User.createUserForTesting(conf, "testhandleerrorsinflush", new String[] { "foo" });
    // Inject our faulty LocalFileSystem
    conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
    user.runAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            // Make sure it worked (above is sensitive to caching details in hadoop core)
            FileSystem fs = FileSystem.get(conf);
            Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
            // Initialize region
            init(name.getMethodName(), conf);
            LOG.info("Adding some data");
            store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null);
            store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null);
            store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null);
            LOG.info("Before flush, we should have no files");
            Collection<StoreFileInfo> files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
            Assert.assertEquals(0, files != null ? files.size() : 0);
            //flush
            try {
                LOG.info("Flushing");
                flush(1);
                Assert.fail("Didn't bubble up IOE!");
            } catch (IOException ioe) {
                Assert.assertTrue(ioe.getMessage().contains("Fault injected"));
            }
            LOG.info("After failed flush, we should still have no files!");
            files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
            Assert.assertEquals(0, files != null ? files.size() : 0);
            store.getHRegion().getWAL().close();
            return null;
        }
    });
    FileSystem.closeAllForUGI(user.getUGI());
}
Also used : User(org.apache.hadoop.hbase.security.User) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Collection(java.util.Collection) IOException(java.io.IOException) IOException(java.io.IOException) Test(org.junit.Test)

Example 2 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.

the class TestIFile method testIFileReaderWithCodec.

@Test
public /** Same as above but create a reader. */
void testIFileReaderWithCodec() throws Exception {
    Configuration conf = new Configuration();
    FileSystem localFs = FileSystem.getLocal(conf);
    FileSystem rfs = ((LocalFileSystem) localFs).getRaw();
    Path path = new Path(new Path("build/test.ifile"), "data");
    DefaultCodec codec = new GzipCodec();
    codec.setConf(conf);
    FSDataOutputStream out = rfs.create(path);
    IFile.Writer<Text, Text> writer = new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class, codec, null);
    writer.close();
    FSDataInputStream in = rfs.open(path);
    IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(), codec, null);
    reader.close();
    // test check sum 
    byte[] ab = new byte[100];
    int readed = reader.checksumIn.readWithChecksum(ab, 0, ab.length);
    assertEquals(readed, reader.checksumIn.getChecksum().length);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) GzipCodec(org.apache.hadoop.io.compress.GzipCodec) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) Text(org.apache.hadoop.io.Text) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 3 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.

the class TestHadoopArchives method testCopyToLocal.

@Test
public /*
   * Tests copying from archive file system to a local file system
   */
void testCopyToLocal() throws Exception {
    final String fullHarPathStr = makeArchive();
    // make path to copy the file to:
    final String tmpDir = System.getProperty("test.build.data", "build/test/data") + "/work-dir/har-fs-tmp";
    final Path tmpPath = new Path(tmpDir);
    final LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    localFs.delete(tmpPath, true);
    localFs.mkdirs(tmpPath);
    assertTrue(localFs.exists(tmpPath));
    // Create fresh HarFs:
    final HarFileSystem harFileSystem = new HarFileSystem(fs);
    try {
        final URI harUri = new URI(fullHarPathStr);
        harFileSystem.initialize(harUri, fs.getConf());
        final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a");
        final Path targetPath = new Path(tmpPath, "straus");
        // copy the Har file to a local file system:
        harFileSystem.copyToLocalFile(false, sourcePath, targetPath);
        FileStatus straus = localFs.getFileStatus(targetPath);
        // the file should contain just 1 character:
        assertEquals(1, straus.getLen());
    } finally {
        harFileSystem.close();
        localFs.delete(tmpPath, true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) HarFileSystem(org.apache.hadoop.fs.HarFileSystem) URI(java.net.URI) Test(org.junit.Test)

Example 4 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.

the class TestTaskProgressReporter method testBytesWrittenLimit.

/**
   * This is to test the limit on BYTES_WRITTEN. The test is limited in that
   * the check is done only once at the first loop of TaskReport#run.
   * @param limit the limit on BYTES_WRITTEN in local file system
   * @param failFast should the task fail fast with such limit?
   * @throws Exception
   */
public void testBytesWrittenLimit(long limit, boolean failFast) throws Exception {
    ExitUtil.disableSystemExit();
    threadExited = false;
    Thread.UncaughtExceptionHandler h = new Thread.UncaughtExceptionHandler() {

        public void uncaughtException(Thread th, Throwable ex) {
            System.out.println("Uncaught exception: " + ex);
            if (ex instanceof ExitUtil.ExitException) {
                threadExited = true;
            }
        }
    };
    JobConf conf = new JobConf();
    // To disable task reporter sleeping
    conf.getLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 0);
    conf.setLong(MRJobConfig.TASK_LOCAL_WRITE_LIMIT_BYTES, limit);
    LocalFileSystem localFS = FileSystem.getLocal(conf);
    Path tmpPath = new Path("/tmp/testBytesWrittenLimit-tmpFile-" + new Random(System.currentTimeMillis()).nextInt());
    FSDataOutputStream out = localFS.create(tmpPath, true);
    out.write(new byte[LOCAL_BYTES_WRITTEN]);
    out.close();
    Task task = new DummyTask();
    task.setConf(conf);
    DummyTaskReporter reporter = new DummyTaskReporter(task);
    Thread t = new Thread(reporter);
    t.setUncaughtExceptionHandler(h);
    reporter.setProgressFlag();
    t.start();
    while (!reporter.taskLimitIsChecked) {
        Thread.yield();
    }
    task.setTaskDone();
    reporter.resetDoneFlag();
    t.join();
    Assert.assertEquals(failFast, threadExited);
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 5 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.

the class JHEventHandlerForSigtermTest method testDefaultFsIsUsedForHistory.

@Test(timeout = 50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
    // Create default configuration pointing to the minicluster
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsCluster.getURI().toString());
    FileOutputStream os = new FileOutputStream(coreSitePath);
    conf.writeXml(os);
    os.close();
    // simulate execution under a non-default namenode
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "file:///");
    TestParams t = new TestParams();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
    JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0, false);
    JHEvenHandlerForTest jheh = spy(realJheh);
    jheh.init(conf);
    try {
        jheh.start();
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters())));
        // If we got here then event handler worked but we don't know with which
        // file system. Now we check that history stuff was written to minicluster
        FileSystem dfsFileSystem = dfsCluster.getFileSystem();
        assertTrue("Minicluster contains some history files", dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
        FileSystem localFileSystem = LocalFileSystem.get(conf);
        assertFalse("No history directory on non-default file system", localFileSystem.exists(new Path(t.dfsWorkDir)));
    } finally {
        jheh.stop();
        purgeHdfsHistoryIntermediateDoneDirectory(conf);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FileOutputStream(java.io.FileOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Counters(org.apache.hadoop.mapreduce.Counters) Test(org.junit.Test)

Aggregations

LocalFileSystem (org.apache.hadoop.fs.LocalFileSystem)121 Path (org.apache.hadoop.fs.Path)77 Test (org.junit.Test)64 Configuration (org.apache.hadoop.conf.Configuration)57 FileSystem (org.apache.hadoop.fs.FileSystem)35 IOException (java.io.IOException)33 File (java.io.File)23 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)23 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)23 SummarizerConfiguration (org.apache.accumulo.core.client.summary.SummarizerConfiguration)23 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)23 Key (org.apache.accumulo.core.data.Key)22 Value (org.apache.accumulo.core.data.Value)22 ArrayList (java.util.ArrayList)19 ExecutorService (java.util.concurrent.ExecutorService)15 Future (java.util.concurrent.Future)15 Scanner (org.apache.accumulo.core.client.Scanner)14 DataSegment (org.apache.druid.timeline.DataSegment)13 DataSegmentPusher (org.apache.druid.segment.loading.DataSegmentPusher)8 HdfsDataSegmentPusher (org.apache.druid.storage.hdfs.HdfsDataSegmentPusher)8