use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testSeek.
public void testSeek() throws IOException {
final Path dir = new Path("/test/testSeek");
assertTrue(fs.mkdirs(dir));
{
//test zero file size
final Path zero = new Path(dir, "zero");
fs.create(zero).close();
int count = 0;
final FSDataInputStream in = fs.open(zero);
for (; in.read() != -1; count++) ;
in.close();
assertEquals(0, count);
}
final byte[] mydata = new byte[1 << 20];
new Random().nextBytes(mydata);
final Path p = new Path(dir, "file");
FSDataOutputStream out = fs.create(p, false, 4096, (short) 3, 1L << 17);
out.write(mydata, 0, mydata.length);
out.close();
final int one_third = mydata.length / 3;
final int two_third = one_third * 2;
{
//test seek
final int offset = one_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.seek(offset);
//read all remaining data
in.readFully(buf);
in.close();
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
}
}
{
//test position read (read the data after the two_third location)
final int offset = two_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.readFully(offset, buf);
in.close();
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testLengthParamLongerThanFile.
/**
* Test get with length parameter greater than actual file length.
*/
public void testLengthParamLongerThanFile() throws IOException {
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
Path dir = new Path("/test");
assertTrue(webhdfs.mkdirs(dir));
// Create a file with some content.
Path testFile = new Path("/test/testLengthParamLongerThanFile");
String content = "testLengthParamLongerThanFile";
FSDataOutputStream testFileOut = webhdfs.create(testFile);
try {
testFileOut.write(content.getBytes("US-ASCII"));
} finally {
IOUtils.closeStream(testFileOut);
}
// Open the file, but request length longer than actual file length by 1.
HttpOpParam.Op op = GetOpParam.Op.OPEN;
URL url = webhdfs.toUrl(op, testFile, new LengthParam((long) (content.length() + 1)));
HttpURLConnection conn = null;
InputStream is = null;
try {
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
conn.setInstanceFollowRedirects(true);
// Expect OK response and Content-Length header equal to actual length.
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals(String.valueOf(content.length()), conn.getHeaderField("Content-Length"));
// Check content matches.
byte[] respBody = new byte[content.length()];
is = conn.getInputStream();
IOUtils.readFully(is, respBody, 0, content.length());
assertEquals(content, new String(respBody, "US-ASCII"));
} finally {
IOUtils.closeStream(is);
if (conn != null) {
conn.disconnect();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testResponseCode.
public void testResponseCode() throws IOException {
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
final Path root = new Path("/");
final Path dir = new Path("/test/testUrl");
assertTrue(webhdfs.mkdirs(dir));
final Path file = new Path("/test/file");
final FSDataOutputStream out = webhdfs.create(file);
out.write(1);
out.close();
{
//test GETHOMEDIRECTORY
final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(conn, HttpServletResponse.SC_OK);
assertEquals(webhdfs.getHomeDirectory().toUri().getPath(), m.get(Path.class.getSimpleName()));
conn.disconnect();
}
{
//test GETHOMEDIRECTORY with unauthorized doAs
final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root, new DoAsParam(ugi.getShortUserName() + "proxy"));
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
conn.disconnect();
}
{
//test set owner with empty parameters
final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
conn.disconnect();
}
{
//test set replication on a directory
final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
final URL url = webhdfs.toUrl(op, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertFalse(webhdfs.setReplication(dir, (short) 1));
conn.disconnect();
}
{
//test get file status for a non-exist file.
final Path p = new Path(dir, "non-exist");
final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode());
conn.disconnect();
}
{
//test set permission with empty parameters
final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
final URL url = webhdfs.toUrl(op, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals(0, conn.getContentLength());
assertEquals(MediaType.APPLICATION_OCTET_STREAM, conn.getContentType());
assertEquals((short) 0755, webhdfs.getFileStatus(dir).getPermission().toShort());
conn.disconnect();
}
{
//test append.
AppendTestUtil.testAppend(fs, new Path(dir, "append"));
}
{
//test NamenodeAddressParam not set.
final HttpOpParam.Op op = PutOpParam.Op.CREATE;
final URL url = webhdfs.toUrl(op, dir);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(false);
conn.setInstanceFollowRedirects(false);
conn.connect();
final String redirect = conn.getHeaderField("Location");
conn.disconnect();
//remove NamenodeAddressParam
WebHdfsFileSystem.LOG.info("redirect = " + redirect);
final int i = redirect.indexOf(NamenodeAddressParam.NAME);
final int j = redirect.indexOf("&", i);
String modified = redirect.substring(0, i - 1) + redirect.substring(j);
WebHdfsFileSystem.LOG.info("modified = " + modified);
//connect to datanode
conn = (HttpURLConnection) new URL(modified).openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
conn.connect();
assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
}
{
//test jsonParse with non-json type.
final HttpOpParam.Op op = GetOpParam.Op.OPEN;
final URL url = webhdfs.toUrl(op, file);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
try {
WebHdfsFileSystem.jsonParse(conn, false);
fail();
} catch (IOException ioe) {
WebHdfsFileSystem.LOG.info("GOOD", ioe);
}
conn.disconnect();
}
{
//test create with path containing spaces
HttpOpParam.Op op = PutOpParam.Op.CREATE;
Path path = new Path("/test/path with spaces");
URL url = webhdfs.toUrl(op, path);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(false);
conn.setInstanceFollowRedirects(false);
final String redirect;
try {
conn.connect();
assertEquals(HttpServletResponse.SC_TEMPORARY_REDIRECT, conn.getResponseCode());
redirect = conn.getHeaderField("Location");
} finally {
conn.disconnect();
}
conn = (HttpURLConnection) new URL(redirect).openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
try {
conn.connect();
assertEquals(HttpServletResponse.SC_CREATED, conn.getResponseCode());
} finally {
conn.disconnect();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testOffsetPlusLengthParamsLongerThanFile.
/**
* Test get with offset and length parameters that combine to request a length
* greater than actual file length.
*/
public void testOffsetPlusLengthParamsLongerThanFile() throws IOException {
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
Path dir = new Path("/test");
assertTrue(webhdfs.mkdirs(dir));
// Create a file with some content.
Path testFile = new Path("/test/testOffsetPlusLengthParamsLongerThanFile");
String content = "testOffsetPlusLengthParamsLongerThanFile";
FSDataOutputStream testFileOut = webhdfs.create(testFile);
try {
testFileOut.write(content.getBytes("US-ASCII"));
} finally {
IOUtils.closeStream(testFileOut);
}
// Open the file, but request offset starting at 1 and length equal to file
// length. Considering the offset, this is longer than the actual content.
HttpOpParam.Op op = GetOpParam.Op.OPEN;
URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf(content.length())), new OffsetParam(1L));
HttpURLConnection conn = null;
InputStream is = null;
try {
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
conn.setInstanceFollowRedirects(true);
// Expect OK response and Content-Length header equal to actual length.
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals(String.valueOf(content.length() - 1), conn.getHeaderField("Content-Length"));
// Check content matches.
byte[] respBody = new byte[content.length() - 1];
is = conn.getInputStream();
IOUtils.readFully(is, respBody, 0, content.length() - 1);
assertEquals(content.substring(1), new String(respBody, "US-ASCII"));
} finally {
IOUtils.closeStream(is);
if (conn != null) {
conn.disconnect();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class PlanCommand method execute.
/**
* Runs the plan command. This command can be run with various options like
* <p>
* -plan -node IP -plan -node hostName -plan -node DatanodeUUID
*
* @param cmd - CommandLine
* @throws Exception
*/
@Override
public void execute(CommandLine cmd) throws Exception {
StrBuilder result = new StrBuilder();
String outputLine = "";
LOG.debug("Processing Plan Command.");
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
throw new IllegalArgumentException("A node name is required to create a" + " plan.");
}
if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.BANDWIDTH));
}
if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.MAXERROR));
}
readClusterInfo(cmd);
String output = null;
if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
}
setOutputPath(output);
// -plan nodename is the command line argument.
DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
if (node == null) {
throw new IllegalArgumentException("Unable to find the specified node. " + cmd.getOptionValue(DiskBalancerCLI.PLAN));
}
this.thresholdPercentage = getThresholdPercentage(cmd);
LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
setNodesToProcess(node);
populatePathNames(node);
NodePlan plan = null;
List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
setPlanParams(plans);
if (plans.size() > 0) {
plan = plans.get(0);
}
try (FSDataOutputStream beforeStream = create(String.format(DiskBalancerCLI.BEFORE_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
beforeStream.write(getCluster().toJson().getBytes(StandardCharsets.UTF_8));
}
try {
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
outputLine = String.format("Writing plan to:");
recordOutput(result, outputLine);
final String planFileName = String.format(DiskBalancerCLI.PLAN_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN));
final String planFileFullName = new Path(getOutputPath(), planFileName).toString();
recordOutput(result, planFileFullName);
try (FSDataOutputStream planStream = create(planFileName)) {
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
}
} else {
outputLine = String.format("No plan generated. DiskBalancing not needed for node: %s" + " threshold used: %s", cmd.getOptionValue(DiskBalancerCLI.PLAN), this.thresholdPercentage);
recordOutput(result, outputLine);
}
if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
printToScreen(plans);
}
} catch (Exception e) {
final String errMsg = "Errors while recording the output of plan command.";
LOG.error(errMsg, e);
result.appendln(errMsg);
result.appendln(Throwables.getStackTraceAsString(e));
}
getPrintStream().print(result.toString());
}
Aggregations