use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestHistoryFileManager method testScanDirectory.
@Test
public void testScanDirectory() throws Exception {
Path p = new Path("any");
FileContext fc = mock(FileContext.class);
when(fc.makeQualified(p)).thenReturn(p);
when(fc.listStatus(p)).thenThrow(new FileNotFoundException());
List<FileStatus> lfs = HistoryFileManager.scanDirectory(p, fc, null);
//primarily, succcess is that an exception was not thrown. Also nice to
//check this
Assert.assertNotNull(lfs);
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestDistributedShell method tearDown.
@After
public void tearDown() throws IOException {
if (yarnCluster != null) {
try {
yarnCluster.stop();
} finally {
yarnCluster = null;
}
}
if (hdfsCluster != null) {
try {
hdfsCluster.shutdown();
} finally {
hdfsCluster = null;
}
}
FileContext fsContext = FileContext.getLocalFSFileContext();
fsContext.delete(new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)), true);
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestDistributedShell method setupInternal.
private void setupInternal(int numNodeManager, float timelineVersion) throws Exception {
LOG.info("Starting up YARN cluster");
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
// reduce the teardown waiting time
conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000);
conf.set("yarn.log.dir", "target");
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
// mark if we need to launch the v1 timeline server
// disable aux-service based timeline aggregators
conf.set(YarnConfiguration.NM_AUX_SERVICES, "");
conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8");
conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
conf.set("mapreduce.jobhistory.address", "0.0.0.0:" + ServerSocketUtil.getPort(10021, 10));
// Enable ContainersMonitorImpl
conf.set(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, LinuxResourceCalculatorPlugin.class.getName());
conf.set(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, ProcfsBasedProcessTree.class.getName());
conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, true);
conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, true);
conf.setBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED, true);
// ATS version specific settings
if (timelineVersion == 1.0f) {
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.0f);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT);
} else if (timelineVersion == 1.5f) {
if (hdfsCluster == null) {
HdfsConfiguration hdfsConfig = new HdfsConfiguration();
hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig).numDataNodes(1).build();
}
fs = hdfsCluster.getFileSystem();
PluginStoreTestUtils.prepareFileSystemForPluginStore(fs);
PluginStoreTestUtils.prepareConfiguration(conf, hdfsCluster);
conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSES, DistributedShellTimelinePlugin.class.getName());
} else if (timelineVersion == 2.0f) {
// set version to 2
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
// disable v1 timeline server since we no longer have a server here
// enable aux-service based timeline aggregators
conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + TIMELINE_AUX_SERVICE_NAME + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter.class);
timelineV2StorageDir = tmpFolder.newFolder().getAbsolutePath();
// set the file system timeline writer storage directory
conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT, timelineV2StorageDir);
} else {
Assert.fail("Wrong timeline version number: " + timelineVersion);
}
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(TestDistributedShell.class.getSimpleName(), 1, numNodeManager, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort());
waitForNMsToRegister();
URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
if (url == null) {
throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
}
Configuration yarnClusterConfig = yarnCluster.getConfig();
yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, new File(url.getPath()).getParent());
//write the document to a buffer (not directly to the file, as that
//can cause the file being written to get read -which will then fail.
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
yarnClusterConfig.writeXml(bytesOut);
bytesOut.close();
//write the bytes to the file in the classpath
OutputStream os = new FileOutputStream(new File(url.getPath()));
os.write(bytesOut.toByteArray());
os.close();
}
FileContext fsContext = FileContext.getLocalFSFileContext();
fsContext.delete(new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)), true);
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
}
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestNodeStatusUpdater method testNodeStatusUpdaterRetryAndNMShutdown.
@Test(timeout = 200000)
public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception {
final long connectionWaitSecs = 1000;
final long connectionRetryIntervalMs = 1000;
int port = ServerSocketUtil.getPort(49156, 10);
YarnConfiguration conf = createNMConfig(port);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, connectionWaitSecs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, connectionRetryIntervalMs);
conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, 5000);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
CyclicBarrier syncBarrier = new CyclicBarrier(2);
nm = new MyNodeManager2(syncBarrier, conf);
nm.init(conf);
nm.start();
// start a container
ContainerId cId = TestNodeManagerShutdown.createContainerId();
FileContext localFS = FileContext.getLocalFSFileContext();
TestNodeManagerShutdown.startContainer(nm, cId, localFS, nmLocalDir, new File("start_file.txt"), port);
try {
// Wait until we start stopping
syncBarrier.await(10000, TimeUnit.MILLISECONDS);
// Wait until we finish stopping
syncBarrier.await(10000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
}
Assert.assertFalse("Containers not cleaned up when NM stopped", assertionFailedInThread.get());
Assert.assertTrue(((MyNodeManager2) nm).isStopped);
Assert.assertTrue("calculate heartBeatCount based on" + " connectionWaitSecs and RetryIntervalSecs", heartBeatID == 2);
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestNodeStatusUpdater method deleteBaseDir.
@After
public void deleteBaseDir() throws IOException {
FileContext lfs = FileContext.getLocalFSFileContext();
lfs.delete(new Path(basedir.getPath()), true);
}
Aggregations