use of org.apache.hadoop.fs.FileContext in project apex-malhar by apache.
the class FileSystemWALTest method testFinalizeWithDelete.
@Test
public void testFinalizeWithDelete() throws IOException {
testMeta.fsWAL.setMaxLength(2 * 1024);
testMeta.fsWAL.setup();
FileSystemWAL.FileSystemWALWriter fsWALWriter = testMeta.fsWAL.getWriter();
write1KRecords(fsWALWriter, 2);
testMeta.fsWAL.beforeCheckpoint(0);
write1KRecords(fsWALWriter, 2);
testMeta.fsWAL.beforeCheckpoint(1);
write1KRecords(fsWALWriter, 2);
testMeta.fsWAL.beforeCheckpoint(2);
FileSystemWAL.FileSystemWALReader fsWALReader = testMeta.fsWAL.getReader();
assertNumTuplesRead(fsWALReader, 6);
testMeta.fsWAL.committed(0);
fsWALWriter.delete(new FileSystemWAL.FileSystemWALPointer(2, 0));
FileContext fileContext = FileContextUtils.getFileContext(testMeta.fsWAL.getFilePath());
Assert.assertTrue("part 0 exists ", !fileContext.util().exists(new Path(testMeta.fsWAL.getPartFilePath(0))));
testMeta.fsWAL.committed(1);
Assert.assertTrue("part 1 exists ", !fileContext.util().exists(new Path(testMeta.fsWAL.getPartFilePath(1))));
fsWALReader.seek(fsWALReader.getStartPointer());
assertNumTuplesRead(fsWALReader, 2);
}
use of org.apache.hadoop.fs.FileContext in project elasticsearch by elastic.
the class HdfsRepository method doStart.
@Override
protected void doStart() {
String uriSetting = getMetadata().settings().get("uri");
if (Strings.hasText(uriSetting) == false) {
throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore");
}
URI uri = URI.create(uriSetting);
if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) {
throw new IllegalArgumentException(String.format(Locale.ROOT, "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", uri.getScheme(), uriSetting));
}
if (Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) {
throw new IllegalArgumentException(String.format(Locale.ROOT, "Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", uri.getPath(), uriSetting));
}
String pathSetting = getMetadata().settings().get("path");
// get configuration
if (pathSetting == null) {
throw new IllegalArgumentException("No 'path' defined for hdfs snapshot/restore");
}
int bufferSize = getMetadata().settings().getAsBytesSize("buffer_size", DEFAULT_BUFFER_SIZE).bytesAsInt();
try {
// initialize our filecontext
SpecialPermission.check();
FileContext fileContext = AccessController.doPrivileged((PrivilegedAction<FileContext>) () -> createContext(uri, getMetadata().settings()));
blobStore = new HdfsBlobStore(fileContext, pathSetting, bufferSize);
logger.debug("Using file-system [{}] for URI [{}], path [{}]", fileContext.getDefaultFileSystem(), fileContext.getDefaultFileSystem().getUri(), pathSetting);
} catch (IOException e) {
throw new ElasticsearchGenerationException(String.format(Locale.ROOT, "Cannot create HDFS repository for uri [%s]", uri), e);
}
super.doStart();
}
use of org.apache.hadoop.fs.FileContext in project elasticsearch by elastic.
the class HdfsRepository method createContext.
// create hadoop filecontext
@SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)")
private static FileContext createContext(URI uri, Settings repositorySettings) {
Configuration cfg = new Configuration(repositorySettings.getAsBoolean("load_defaults", true));
cfg.setClassLoader(HdfsRepository.class.getClassLoader());
cfg.reloadConfiguration();
Map<String, String> map = repositorySettings.getByPrefix("conf.").getAsMap();
for (Entry<String, String> entry : map.entrySet()) {
cfg.set(entry.getKey(), entry.getValue());
}
// create a hadoop user. if we want some auth, it must be done different anyway, and tested.
Subject subject;
try {
Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
Constructor<?> ctor = clazz.getConstructor(String.class);
ctor.setAccessible(true);
Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name"));
subject = new Subject(false, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet());
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
// disable FS cache
cfg.setBoolean("fs.hdfs.impl.disable.cache", true);
// create the filecontext with our user
return Subject.doAs(subject, (PrivilegedAction<FileContext>) () -> {
try {
AbstractFileSystem fs = AbstractFileSystem.get(uri, cfg);
return FileContext.getFileContext(fs, cfg);
} catch (UnsupportedFileSystemException e) {
throw new RuntimeException(e);
}
});
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestEntityGroupFSTimelineStore method tearDownClass.
@AfterClass
public static void tearDownClass() throws Exception {
hdfsCluster.shutdown();
FileContext fileContext = FileContext.getLocalFSFileContext();
fileContext.delete(new Path(config.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)), true);
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestFSDownload method deleteTestDir.
@AfterClass
public static void deleteTestDir() throws IOException {
FileContext fs = FileContext.getLocalFSFileContext();
fs.delete(new Path("target", TestFSDownload.class.getSimpleName()), true);
}
Aggregations