use of org.apache.hadoop.conf.Configuration in project flink by apache.
the class HadoopOutputFormatTest method testConfigure.
@Test
public void testConfigure() throws Exception {
ConfigurableDummyOutputFormat outputFormat = mock(ConfigurableDummyOutputFormat.class);
HadoopOutputFormat<String, Long> hadoopOutputFormat = setupHadoopOutputFormat(outputFormat, Job.getInstance(), null, null, new Configuration());
hadoopOutputFormat.configure(new org.apache.flink.configuration.Configuration());
verify(outputFormat, times(1)).setConf(any(Configuration.class));
}
use of org.apache.hadoop.conf.Configuration in project flink by apache.
the class HadoopFileSystem method getHadoopWrapperClassNameForFileSystem.
@Override
public Class<?> getHadoopWrapperClassNameForFileSystem(String scheme) {
Configuration hadoopConf = getHadoopConfiguration();
Class<? extends org.apache.hadoop.fs.FileSystem> clazz;
// We can activate this block once we drop Hadoop1 support (only hd2 has the getFileSystemClass-method)
// try {
// clazz = org.apache.hadoop.fs.FileSystem.getFileSystemClass(scheme, hadoopConf);
// } catch (IOException e) {
// LOG.info("Flink could not load the Hadoop File system implementation for scheme "+scheme);
// return null;
// }
clazz = hadoopConf.getClass("fs." + scheme + ".impl", null, org.apache.hadoop.fs.FileSystem.class);
if (clazz != null && LOG.isDebugEnabled()) {
LOG.debug("Flink supports {} with the Hadoop file system wrapper, impl {}", scheme, clazz);
}
return clazz;
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class MapFile method main.
public static void main(String[] args) throws Exception {
String usage = "Usage: MapFile inFile outFile";
if (args.length != 2) {
System.err.println(usage);
System.exit(-1);
}
String in = args[0];
String out = args[1];
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
MapFile.Reader reader = null;
MapFile.Writer writer = null;
try {
reader = new MapFile.Reader(fs, in, conf);
writer = new MapFile.Writer(conf, fs, out, reader.getKeyClass().asSubclass(WritableComparable.class), reader.getValueClass());
WritableComparable<?> key = ReflectionUtils.newInstance(reader.getKeyClass().asSubclass(WritableComparable.class), conf);
Writable value = ReflectionUtils.newInstance(reader.getValueClass().asSubclass(Writable.class), conf);
while (// copy all entries
reader.next(key, value)) writer.append(key, value);
} finally {
IOUtils.cleanup(LOG, writer, reader);
}
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestFileUtil method testCopy5.
@Test(timeout = 30000)
public /*
* Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
*/
void testCopy5() throws IOException {
setupDirs();
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.newInstance(uri, conf);
final String content = "some-content";
File srcFile = createFile(tmp, "src", content);
Path srcPath = new Path(srcFile.toURI());
// copy regular file:
final File dest = new File(del, "dest");
boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length, dest.length());
// should not be deleted
assertTrue(srcFile.exists());
// copy regular file, delete src:
dest.delete();
assertTrue(!dest.exists());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length, dest.length());
// should be deleted
assertTrue(!srcFile.exists());
// copy a dir:
dest.delete();
assertTrue(!dest.exists());
srcPath = new Path(partitioned.toURI());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists() && dest.isDirectory());
File[] files = dest.listFiles();
assertTrue(files != null);
assertEquals(2, files.length);
for (File f : files) {
assertEquals(3 + System.getProperty("line.separator").getBytes().length, f.length());
}
// should be deleted
assertTrue(!partitioned.exists());
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestLocalDirAllocator method testShouldNotthrowNPE.
/*
* Test when mapred.local.dir not configured and called
* getLocalPathForWrite
*/
@Test(timeout = 30000)
public void testShouldNotthrowNPE() throws Exception {
Configuration conf1 = new Configuration();
try {
dirAllocator.getLocalPathForWrite("/test", conf1);
fail("Exception not thrown when " + CONTEXT + " is not set");
} catch (IOException e) {
assertEquals(CONTEXT + " not configured", e.getMessage());
} catch (NullPointerException e) {
fail("Lack of configuration should not have thrown a NPE.");
}
String NEW_CONTEXT = CONTEXT + ".new";
conf1.set(NEW_CONTEXT, "");
LocalDirAllocator newDirAllocator = new LocalDirAllocator(NEW_CONTEXT);
try {
newDirAllocator.getLocalPathForWrite("/test", conf1);
fail("Exception not thrown when " + NEW_CONTEXT + " is set to empty string");
} catch (IOException e) {
assertTrue(e instanceof DiskErrorException);
} catch (NullPointerException e) {
fail("Wrong configuration should not have thrown a NPE.");
}
try {
newDirAllocator.getLocalPathToRead("/test", conf1);
fail("Exception not thrown when " + NEW_CONTEXT + " is set to empty string");
} catch (IOException e) {
assertTrue(e instanceof DiskErrorException);
} catch (NullPointerException e) {
fail("Wrong configuration should not have thrown a NPE.");
}
}
Aggregations