Search in sources :

Example 51 with Configuration

use of org.apache.hadoop.conf.Configuration in project flink by apache.

the class HadoopOutputFormatTest method testConfigure.

@Test
public void testConfigure() throws Exception {
    ConfigurableDummyOutputFormat outputFormat = mock(ConfigurableDummyOutputFormat.class);
    HadoopOutputFormat<String, Long> hadoopOutputFormat = setupHadoopOutputFormat(outputFormat, Job.getInstance(), null, null, new Configuration());
    hadoopOutputFormat.configure(new org.apache.flink.configuration.Configuration());
    verify(outputFormat, times(1)).setConf(any(Configuration.class));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Mockito.anyLong(org.mockito.Mockito.anyLong) Mockito.anyString(org.mockito.Mockito.anyString) Test(org.junit.Test)

Example 52 with Configuration

use of org.apache.hadoop.conf.Configuration in project flink by apache.

the class HadoopFileSystem method getHadoopWrapperClassNameForFileSystem.

@Override
public Class<?> getHadoopWrapperClassNameForFileSystem(String scheme) {
    Configuration hadoopConf = getHadoopConfiguration();
    Class<? extends org.apache.hadoop.fs.FileSystem> clazz;
    // We can activate this block once we drop Hadoop1 support (only hd2 has the getFileSystemClass-method)
    //		try {
    //			clazz = org.apache.hadoop.fs.FileSystem.getFileSystemClass(scheme, hadoopConf);
    //		} catch (IOException e) {
    //			LOG.info("Flink could not load the Hadoop File system implementation for scheme "+scheme);
    //			return null;
    //		}
    clazz = hadoopConf.getClass("fs." + scheme + ".impl", null, org.apache.hadoop.fs.FileSystem.class);
    if (clazz != null && LOG.isDebugEnabled()) {
        LOG.debug("Flink supports {} with the Hadoop file system wrapper, impl {}", scheme, clazz);
    }
    return clazz;
}
Also used : GlobalConfiguration(org.apache.flink.configuration.GlobalConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.flink.core.fs.FileSystem)

Example 53 with Configuration

use of org.apache.hadoop.conf.Configuration in project hadoop by apache.

the class MapFile method main.

public static void main(String[] args) throws Exception {
    String usage = "Usage: MapFile inFile outFile";
    if (args.length != 2) {
        System.err.println(usage);
        System.exit(-1);
    }
    String in = args[0];
    String out = args[1];
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    MapFile.Reader reader = null;
    MapFile.Writer writer = null;
    try {
        reader = new MapFile.Reader(fs, in, conf);
        writer = new MapFile.Writer(conf, fs, out, reader.getKeyClass().asSubclass(WritableComparable.class), reader.getValueClass());
        WritableComparable<?> key = ReflectionUtils.newInstance(reader.getKeyClass().asSubclass(WritableComparable.class), conf);
        Writable value = ReflectionUtils.newInstance(reader.getValueClass().asSubclass(Writable.class), conf);
        while (// copy all entries
        reader.next(key, value)) writer.append(key, value);
    } finally {
        IOUtils.cleanup(LOG, writer, reader);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem)

Example 54 with Configuration

use of org.apache.hadoop.conf.Configuration in project hadoop by apache.

the class TestFileUtil method testCopy5.

@Test(timeout = 30000)
public /*
   * Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
   */
void testCopy5() throws IOException {
    setupDirs();
    URI uri = tmp.toURI();
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.newInstance(uri, conf);
    final String content = "some-content";
    File srcFile = createFile(tmp, "src", content);
    Path srcPath = new Path(srcFile.toURI());
    // copy regular file:
    final File dest = new File(del, "dest");
    boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
    assertTrue(result);
    assertTrue(dest.exists());
    assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length, dest.length());
    // should not be deleted
    assertTrue(srcFile.exists());
    // copy regular file, delete src:
    dest.delete();
    assertTrue(!dest.exists());
    result = FileUtil.copy(fs, srcPath, dest, true, conf);
    assertTrue(result);
    assertTrue(dest.exists());
    assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length, dest.length());
    // should be deleted
    assertTrue(!srcFile.exists());
    // copy a dir:
    dest.delete();
    assertTrue(!dest.exists());
    srcPath = new Path(partitioned.toURI());
    result = FileUtil.copy(fs, srcPath, dest, true, conf);
    assertTrue(result);
    assertTrue(dest.exists() && dest.isDirectory());
    File[] files = dest.listFiles();
    assertTrue(files != null);
    assertEquals(2, files.length);
    for (File f : files) {
        assertEquals(3 + System.getProperty("line.separator").getBytes().length, f.length());
    }
    // should be deleted
    assertTrue(!partitioned.exists());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) URI(java.net.URI) JarFile(java.util.jar.JarFile) File(java.io.File) Test(org.junit.Test)

Example 55 with Configuration

use of org.apache.hadoop.conf.Configuration in project hadoop by apache.

the class TestLocalDirAllocator method testShouldNotthrowNPE.

/*
   * Test when mapred.local.dir not configured and called
   * getLocalPathForWrite
   */
@Test(timeout = 30000)
public void testShouldNotthrowNPE() throws Exception {
    Configuration conf1 = new Configuration();
    try {
        dirAllocator.getLocalPathForWrite("/test", conf1);
        fail("Exception not thrown when " + CONTEXT + " is not set");
    } catch (IOException e) {
        assertEquals(CONTEXT + " not configured", e.getMessage());
    } catch (NullPointerException e) {
        fail("Lack of configuration should not have thrown a NPE.");
    }
    String NEW_CONTEXT = CONTEXT + ".new";
    conf1.set(NEW_CONTEXT, "");
    LocalDirAllocator newDirAllocator = new LocalDirAllocator(NEW_CONTEXT);
    try {
        newDirAllocator.getLocalPathForWrite("/test", conf1);
        fail("Exception not thrown when " + NEW_CONTEXT + " is set to empty string");
    } catch (IOException e) {
        assertTrue(e instanceof DiskErrorException);
    } catch (NullPointerException e) {
        fail("Wrong configuration should not have thrown a NPE.");
    }
    try {
        newDirAllocator.getLocalPathToRead("/test", conf1);
        fail("Exception not thrown when " + NEW_CONTEXT + " is set to empty string");
    } catch (IOException e) {
        assertTrue(e instanceof DiskErrorException);
    } catch (NullPointerException e) {
        fail("Wrong configuration should not have thrown a NPE.");
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)5973 Test (org.junit.Test)3243 Path (org.apache.hadoop.fs.Path)1602 FileSystem (org.apache.hadoop.fs.FileSystem)903 IOException (java.io.IOException)850 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)727 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)517 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)502 File (java.io.File)499 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)388 ArrayList (java.util.ArrayList)360 URI (java.net.URI)319 BeforeClass (org.junit.BeforeClass)275 Job (org.apache.hadoop.mapreduce.Job)272 Before (org.junit.Before)264 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)219 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)203 HashMap (java.util.HashMap)192 FileStatus (org.apache.hadoop.fs.FileStatus)190 Properties (java.util.Properties)187