use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestOfflineImageViewerForContentSummary method createOriginalFSImage.
/**
* Create a populated namespace for later testing. Save its contents to a
* data structure and store its fsimage location. We only want to generate
* the fsimage file once and use it for multiple tests.
*/
@BeforeClass
public static void createOriginalFSImage() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
Path parentDir = new Path("/parentDir");
Path childDir1 = new Path(parentDir, "childDir1");
Path childDir2 = new Path(parentDir, "childDir2");
Path dirForLinks = new Path("/dirForLinks");
hdfs.mkdirs(parentDir);
hdfs.mkdirs(childDir1);
hdfs.mkdirs(childDir2);
hdfs.mkdirs(dirForLinks);
hdfs.setQuota(parentDir, 10, 1024 * 1024 * 1024);
Path file1OnParentDir = new Path(parentDir, "file1");
try (FSDataOutputStream o = hdfs.create(file1OnParentDir)) {
o.write("123".getBytes());
}
try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file2"))) {
o.write("1234".getBytes());
}
try (FSDataOutputStream o = hdfs.create(new Path(childDir1, "file3"))) {
o.write("123".getBytes());
}
try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file4"))) {
o.write("123".getBytes());
}
Path link1 = new Path("/link1");
Path link2 = new Path("/dirForLinks/linkfordir1");
hdfs.createSymlink(new Path("/parentDir/file4"), link1, true);
summaryFromDFS = hdfs.getContentSummary(parentDir);
emptyDirSummaryFromDFS = hdfs.getContentSummary(childDir2);
fileSummaryFromDFS = hdfs.getContentSummary(file1OnParentDir);
symLinkSummaryFromDFS = hdfs.getContentSummary(link1);
hdfs.createSymlink(childDir1, link2, true);
symLinkSummaryForDirContainsFromDFS = hdfs.getContentSummary(new Path("/dirForLinks"));
// Write results to the fsimage file
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
hdfs.saveNamespace();
// Determine the location of the fsimage file
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
LOG.debug("original FS image file is " + originalFsimage);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestOfflineImageViewerForXAttr method createOriginalFSImage.
/**
* Create a populated namespace for later testing. Save its contents to a data
* structure and store its fsimage location. We only want to generate the
* fsimage file once and use it for multiple tests.
*/
@BeforeClass
public static void createOriginalFSImage() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
// Create a name space with XAttributes
Path dir = new Path("/dir1");
hdfs.mkdirs(dir);
hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
// Write results to the fsimage file
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
hdfs.saveNamespace();
List<XAttr> attributes = new ArrayList<XAttr>();
attributes.add(XAttrHelper.buildXAttr("user.attr1", "value1".getBytes()));
attr1JSon = JsonUtil.toJsonString(attributes, null);
attributes.add(XAttrHelper.buildXAttr("user.attr2", "value2".getBytes()));
// Determine the location of the fsimage file
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
LOG.debug("original FS image file is " + originalFsimage);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestWebHDFS method testWebHdfsNoRedirect.
@Test
public /**
* Test that when "&noredirect=true" is added to operations CREATE, APPEND,
* OPEN, and GETFILECHECKSUM the response (which is usually a 307 temporary
* redirect) is a 200 with JSON that contains the redirected location
*/
void testWebHdfsNoRedirect() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
LOG.info("Started cluster");
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
URL url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirectCreate" + "?op=CREATE" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending create request " + url);
checkResponseContainsLocation(url, "PUT");
//Write a file that we can read
final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final String PATH = "/testWebHdfsNoRedirect";
byte[] CONTENTS = new byte[1024];
RANDOM.nextBytes(CONTENTS);
try (OutputStream os = fs.create(new Path(PATH))) {
os.write(CONTENTS);
}
url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=OPEN" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending open request " + url);
checkResponseContainsLocation(url, "GET");
url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=GETFILECHECKSUM" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending getfilechecksum request " + url);
checkResponseContainsLocation(url, "GET");
url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=APPEND" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending append request " + url);
checkResponseContainsLocation(url, "POST");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestWebHDFS method testWebHdfsAppend.
@Test
public void testWebHdfsAppend() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final int dnNumber = 3;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNumber).build();
final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem fs = cluster.getFileSystem();
final Path appendFile = new Path("/testAppend.txt");
final String content = "hello world";
DFSTestUtil.writeFile(fs, appendFile, content);
for (int index = 0; index < dnNumber - 1; index++) {
cluster.shutdownDataNode(index);
}
cluster.restartNameNodes();
cluster.waitActive();
try {
DFSTestUtil.appendFile(webFS, appendFile, content);
fail("Should fail to append file since " + "datanode number is 1 and replication is 3");
} catch (IOException ignored) {
String resultContent = DFSTestUtil.readFile(fs, appendFile);
assertTrue(resultContent.equals(content));
}
} finally {
if (cluster != null) {
cluster.shutdown(true);
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestWebHDFSForHA method testHA.
@Test
public void testHA() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(0);
final Path dir = new Path("/test");
Assert.assertTrue(fs.mkdirs(dir));
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
final Path dir2 = new Path("/test2");
Assert.assertTrue(fs.mkdirs(dir2));
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations