use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestGetSpaceUsed method testBuilderConf.
/**
* Test that the builder can create a class specified through the class.
*/
@Test
public void testBuilderConf() throws Exception {
File file = new File(DIR, "testBuilderConf");
assertTrue(file.createNewFile());
Configuration conf = new Configuration();
conf.set("fs.getspaceused.classname", DummyDU.class.getName());
CachingGetSpaceUsed instance = (CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder().setPath(file).setInterval(0).setConf(conf).build();
assertNotNull(instance);
assertTrue(instance instanceof DummyDU);
assertFalse(instance.running());
instance.close();
}
use of org.apache.hadoop.conf.Configuration in project flink by apache.
the class HDFSCopyFromLocal method copyFromLocal.
public static void copyFromLocal(final File localPath, final URI remotePath) throws Exception {
// Do it in another Thread because HDFS can deadlock if being interrupted while copying
String threadName = "HDFS Copy from " + localPath + " to " + remotePath;
final Tuple1<Exception> asyncException = Tuple1.of(null);
Thread copyThread = new Thread(threadName) {
@Override
public void run() {
try {
Configuration hadoopConf = HadoopFileSystem.getHadoopConfiguration();
FileSystem fs = FileSystem.get(remotePath, hadoopConf);
fs.copyFromLocalFile(new Path(localPath.getAbsolutePath()), new Path(remotePath));
} catch (Exception t) {
asyncException.f0 = t;
}
}
};
copyThread.setDaemon(true);
copyThread.start();
copyThread.join();
if (asyncException.f0 != null) {
throw asyncException.f0;
}
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class ITUseMiniCluster method clusterUp.
@Before
public void clusterUp() throws IOException {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class JvmPauseMonitor method main.
/**
* Simple 'main' to facilitate manual testing of the pause monitor.
*
* This main function just leaks memory into a list. Running this class
* with a 1GB heap will very quickly go into "GC hell" and result in
* log messages about the GC pauses.
*/
@SuppressWarnings("resource")
public static void main(String[] args) throws Exception {
JvmPauseMonitor monitor = new JvmPauseMonitor();
monitor.init(new Configuration());
monitor.start();
List<String> list = Lists.newArrayList();
int i = 0;
while (true) {
list.add(String.valueOf(i++));
}
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestDeprecatedKeys method testIteratorWithDeprecatedKeysMappedToMultipleNewKeys.
@Test
public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys() {
Configuration conf = new Configuration();
Configuration.addDeprecation("dK", new String[] { "nK1", "nK2" });
conf.set("k", "v");
conf.set("dK", "V");
assertEquals("V", conf.get("dK"));
assertEquals("V", conf.get("nK1"));
assertEquals("V", conf.get("nK2"));
conf.set("nK1", "VV");
assertEquals("VV", conf.get("dK"));
assertEquals("VV", conf.get("nK1"));
assertEquals("VV", conf.get("nK2"));
conf.set("nK2", "VVV");
assertEquals("VVV", conf.get("dK"));
assertEquals("VVV", conf.get("nK2"));
assertEquals("VVV", conf.get("nK1"));
boolean kFound = false;
boolean dKFound = false;
boolean nK1Found = false;
boolean nK2Found = false;
for (Map.Entry<String, String> entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v", entry.getValue());
kFound = true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VVV", entry.getValue());
dKFound = true;
}
if (entry.getKey().equals("nK1")) {
assertEquals("VVV", entry.getValue());
nK1Found = true;
}
if (entry.getKey().equals("nK2")) {
assertEquals("VVV", entry.getValue());
nK2Found = true;
}
}
assertTrue("regular Key not found", kFound);
assertTrue("deprecated Key not found", dKFound);
assertTrue("new Key 1 not found", nK1Found);
assertTrue("new Key 2 not found", nK2Found);
}
Aggregations