use of org.apache.hadoop.fs.AbstractFileSystem in project ignite by apache.
the class HadoopSortingTest method testSortSimple.
/**
* @throws Exception If failed.
*/
public void testSortSimple() throws Exception {
// Generate test data.
Job job = Job.getInstance();
job.setInputFormatClass(InFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
job.setMapperClass(Mapper.class);
job.setNumReduceTasks(0);
setupFileSystems(job.getConfiguration());
FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_INPUT));
X.printerrln("Data generation started.");
grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration(), null)).get(180000);
X.printerrln("Data generation complete.");
// Run main map-reduce job.
job = Job.getInstance();
setupFileSystems(job.getConfiguration());
job.getConfiguration().set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, JavaSerialization.class.getName() + "," + WritableSerialization.class.getName());
FileInputFormat.setInputPaths(job, new Path(igfsScheme() + PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT));
job.setSortComparatorClass(JavaSerializationComparator.class);
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
job.setNumReduceTasks(2);
job.setMapOutputKeyClass(UUID.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
X.printerrln("Job started.");
grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2), createJobInfo(job.getConfiguration(), null)).get(180000);
X.printerrln("Job complete.");
// Check result.
Path outDir = new Path(igfsScheme() + PATH_OUTPUT);
AbstractFileSystem fs = AbstractFileSystem.get(new URI(igfsScheme()), job.getConfiguration());
for (FileStatus file : fs.listStatus(outDir)) {
X.printerrln("__ file: " + file);
if (file.getLen() == 0)
continue;
FSDataInputStream in = fs.open(file.getPath());
Scanner sc = new Scanner(in);
UUID prev = null;
while (sc.hasNextLine()) {
UUID next = UUID.fromString(sc.nextLine());
if (prev != null)
assertTrue(prev.compareTo(next) < 0);
prev = next;
}
}
}
use of org.apache.hadoop.fs.AbstractFileSystem in project elasticsearch by elastic.
the class HdfsRepository method createContext.
// create hadoop filecontext
@SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)")
private static FileContext createContext(URI uri, Settings repositorySettings) {
Configuration cfg = new Configuration(repositorySettings.getAsBoolean("load_defaults", true));
cfg.setClassLoader(HdfsRepository.class.getClassLoader());
cfg.reloadConfiguration();
Map<String, String> map = repositorySettings.getByPrefix("conf.").getAsMap();
for (Entry<String, String> entry : map.entrySet()) {
cfg.set(entry.getKey(), entry.getValue());
}
// create a hadoop user. if we want some auth, it must be done different anyway, and tested.
Subject subject;
try {
Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
Constructor<?> ctor = clazz.getConstructor(String.class);
ctor.setAccessible(true);
Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name"));
subject = new Subject(false, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet());
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
// disable FS cache
cfg.setBoolean("fs.hdfs.impl.disable.cache", true);
// create the filecontext with our user
return Subject.doAs(subject, (PrivilegedAction<FileContext>) () -> {
try {
AbstractFileSystem fs = AbstractFileSystem.get(uri, cfg);
return FileContext.getFileContext(fs, cfg);
} catch (UnsupportedFileSystemException e) {
throw new RuntimeException(e);
}
});
}
use of org.apache.hadoop.fs.AbstractFileSystem in project hadoop by apache.
the class TestNonAggregatingLogHandler method testFailedDirLogDeletion.
/*
* Test to ensure that we handle the cleanup of directories that may not have
* the application log dirs we're trying to delete or may have other problems.
* Test creates 7 log dirs, and fails the directory check for 4 of them and
* then checks to ensure we tried to delete only the ones that passed the
* check.
*/
@Test
public void testFailedDirLogDeletion() throws Exception {
File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 7);
final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
for (int i = 0; i < localLogDirs.length; i++) {
localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
}
String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l);
LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
NonAggregatingLogHandler rawLogHandler = new NonAggregatingLogHandler(dispatcher, mockDelService, mockDirsHandler, new NMNullStateStoreService());
NonAggregatingLogHandler logHandler = spy(rawLogHandler);
AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
FileContext lfs = FileContext.getFileContext(spylfs, conf);
doReturn(lfs).when(logHandler).getLocalFileContext(isA(Configuration.class));
logHandler.init(conf);
logHandler.start();
runMockedFailedDirs(logHandler, appId, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
logHandler.close();
}
use of org.apache.hadoop.fs.AbstractFileSystem in project hadoop by apache.
the class TestLogAggregationService method testFailedDirsLocalFileDeletionAfterUpload.
/*
* Test to make sure we handle cases where the directories we get back from
* the LocalDirsHandler may have issues including the log dir not being
* present as well as other issues. The test uses helper functions from
* TestNonAggregatingLogHandler.
*/
@Test
public void testFailedDirsLocalFileDeletionAfterUpload() throws Exception {
// setup conf and services
DeletionService mockDelService = mock(DeletionService.class);
File[] localLogDirs = TestNonAggregatingLogHandler.getLocalLogDirFiles(this.getClass().getName(), 7);
final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
for (int i = 0; i < localLogDirs.length; i++) {
localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
}
String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
this.conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500);
ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1);
this.dirsHandler = new LocalDirsHandlerService();
LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
LogAggregationService logAggregationService = spy(new LogAggregationService(dispatcher, this.context, mockDelService, mockDirsHandler));
AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
FileContext lfs = FileContext.getFileContext(spylfs, conf);
doReturn(lfs).when(logAggregationService).getLocalFileContext(isA(Configuration.class));
logAggregationService.init(this.conf);
logAggregationService.start();
TestNonAggregatingLogHandler.runMockedFailedDirs(logAggregationService, application1, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
logAggregationService.stop();
assertEquals(0, logAggregationService.getNumAggregators());
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
}
use of org.apache.hadoop.fs.AbstractFileSystem in project hadoop by apache.
the class ViewFsBaseTest method testGetFileChecksum.
@Test
public void testGetFileChecksum() throws AccessControlException, UnresolvedLinkException, IOException {
AbstractFileSystem mockAFS = mock(AbstractFileSystem.class);
InodeTree.ResolveResult<AbstractFileSystem> res = new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS, null, new Path("someFile"));
@SuppressWarnings("unchecked") InodeTree<AbstractFileSystem> fsState = mock(InodeTree.class);
when(fsState.resolve(anyString(), anyBoolean())).thenReturn(res);
ViewFs vfs = mock(ViewFs.class);
vfs.fsState = fsState;
when(vfs.getFileChecksum(new Path("/tmp/someFile"))).thenCallRealMethod();
vfs.getFileChecksum(new Path("/tmp/someFile"));
verify(mockAFS).getFileChecksum(new Path("someFile"));
}
Aggregations