use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestSymlinkHdfsDisable method testSymlinkHdfsDisable.
@Test(timeout = 60000)
public void testSymlinkHdfsDisable() throws Exception {
Configuration conf = new HdfsConfiguration();
// disable symlink resolution
conf.setBoolean(CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
// spin up minicluster, get dfs and filecontext
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = cluster.getFileSystem();
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
// Create test files/links
FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable");
Path root = helper.getTestRootPath(fc);
Path target = new Path(root, "target");
Path link = new Path(root, "link");
DFSTestUtil.createFile(dfs, target, 4096, (short) 1, 0xDEADDEAD);
fc.createSymlink(target, link, false);
// Try to resolve links with FileSystem and FileContext
try {
fc.open(link);
fail("Expected error when attempting to resolve link");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("resolution is disabled", e);
}
try {
dfs.open(link);
fail("Expected error when attempting to resolve link");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("resolution is disabled", e);
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestUrlStreamHandler method testFileUrls.
/**
* Test opening and reading from an InputStream through a file:// URL.
*
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testFileUrls() throws IOException, URISyntaxException {
// URLStreamHandler is already set in JVM by testDfsUrls()
Configuration conf = new HdfsConfiguration();
// Locate the test temporary directory.
if (!TEST_ROOT_DIR.exists()) {
if (!TEST_ROOT_DIR.mkdirs())
throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
}
File tmpFile = new File(TEST_ROOT_DIR, "thefile");
URI uri = tmpFile.toURI();
FileSystem fs = FileSystem.get(uri, conf);
try {
byte[] fileContent = new byte[1024];
for (int i = 0; i < fileContent.length; ++i) fileContent[i] = (byte) i;
// First create the file through the FileSystem API
OutputStream os = fs.create(new Path(uri.getPath()));
os.write(fileContent);
os.close();
// Second, open and read the file content through the URL API.
URL fileURL = uri.toURL();
InputStream is = fileURL.openStream();
assertNotNull(is);
byte[] bytes = new byte[4096];
assertEquals(1024, is.read(bytes));
is.close();
for (int i = 0; i < fileContent.length; ++i) assertEquals(fileContent[i], bytes[i]);
// Cleanup: delete the file
fs.delete(new Path(uri.getPath()), false);
} finally {
fs.close();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestUrlStreamHandler method testDfsUrls.
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
* <p>
* First generate a file with some content through the FileSystem API, then
* try to open and read the file through the URL stream API.
*
* @throws IOException
*/
@Test
public void testDfsUrls() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
// Setup our own factory
// setURLSteramHandlerFactor is can be set at most once in the JVM
// the new URLStreamHandler is valid for all tests cases
// in TestStreamHandler
FsUrlStreamHandlerFactory factory = new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
java.net.URL.setURLStreamHandlerFactory(factory);
Path filePath = new Path("/thefile");
try {
byte[] fileContent = new byte[1024];
for (int i = 0; i < fileContent.length; ++i) fileContent[i] = (byte) i;
// First create the file through the FileSystem API
OutputStream os = fs.create(filePath);
os.write(fileContent);
os.close();
// Second, open and read the file content through the URL API
URI uri = fs.getUri();
URL fileURL = new URL(uri.getScheme(), uri.getHost(), uri.getPort(), filePath.toString());
InputStream is = fileURL.openStream();
assertNotNull(is);
byte[] bytes = new byte[4096];
assertEquals(1024, is.read(bytes));
is.close();
for (int i = 0; i < fileContent.length; ++i) assertEquals(fileContent[i], bytes[i]);
// Cleanup: delete the file
fs.delete(filePath, false);
} finally {
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestResolveHdfsSymlink method setUp.
@BeforeClass
public static void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestSymlinkHdfs method beforeClassSetup.
@BeforeClass
public static void beforeClassSetup() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(FsPermission.UMASK_LABEL, "000");
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).build();
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
dfs = cluster.getFileSystem();
}
Aggregations