use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestWebHDFS method verifyPread.
static void verifyPread(FileSystem fs, Path p, long offset, long length, byte[] buf, byte[] expected) throws IOException {
long remaining = length - offset;
long checked = 0;
LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);
final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d", offset, remaining);
final FSDataInputStream in = fs.open(p, 64 << 10);
for (; remaining > 0; ) {
t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
final int n = (int) Math.min(remaining, buf.length);
in.readFully(offset, buf, 0, n);
checkData(offset, remaining, n, buf, expected);
offset += n;
remaining -= n;
checked += n;
}
in.close();
t.end(checked);
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testRootDir.
public void testRootDir() throws IOException {
final Path root = new Path("/");
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
WebHdfsFileSystem.LOG.info("null url=" + url);
Assert.assertTrue(url.toString().contains("v1"));
//test root permission
final FileStatus status = fs.getFileStatus(root);
assertTrue(status != null);
assertEquals(0777, status.getPermission().toShort());
//delete root
assertFalse(fs.delete(root, true));
//create file using root path
try {
final FSDataOutputStream out = fs.create(root);
out.write(1);
out.close();
fail();
} catch (IOException e) {
WebHdfsFileSystem.LOG.info("This is expected.", e);
}
//open file using root path
try {
final FSDataInputStream in = fs.open(root);
in.read();
fail();
} catch (IOException e) {
WebHdfsFileSystem.LOG.info("This is expected.", e);
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testSeek.
public void testSeek() throws IOException {
final Path dir = new Path("/test/testSeek");
assertTrue(fs.mkdirs(dir));
{
//test zero file size
final Path zero = new Path(dir, "zero");
fs.create(zero).close();
int count = 0;
final FSDataInputStream in = fs.open(zero);
for (; in.read() != -1; count++) ;
in.close();
assertEquals(0, count);
}
final byte[] mydata = new byte[1 << 20];
new Random().nextBytes(mydata);
final Path p = new Path(dir, "file");
FSDataOutputStream out = fs.create(p, false, 4096, (short) 3, 1L << 17);
out.write(mydata, 0, mydata.length);
out.close();
final int one_third = mydata.length / 3;
final int two_third = one_third * 2;
{
//test seek
final int offset = one_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.seek(offset);
//read all remaining data
in.readFully(buf);
in.close();
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
}
}
{
//test position read (read the data after the two_third location)
final int offset = two_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.readFully(offset, buf);
in.close();
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
}
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class CancelCommand method execute.
/**
* Executes the Client Calls.
*
* @param cmd - CommandLine
*/
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.info("Executing \"Cancel plan\" command.");
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
// that you can read from a datanode using queryStatus
if (cmd.hasOption(DiskBalancerCLI.NODE)) {
String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
cancelPlanUsingHash(nodeAddress, planHash);
} else {
// Or you can cancel a plan using the plan file. If the user
// points us to the plan file, we can compute the hash as well as read
// the address of the datanode from the plan file.
String planFile = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(), "Invalid plan file specified.");
String planData = null;
try (FSDataInputStream plan = open(planFile)) {
planData = IOUtils.toString(plan);
}
cancelPlan(planData);
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class ExecuteCommand method execute.
/**
* Executes the Client Calls.
*
* @param cmd - CommandLine
*/
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.info("Executing \"execute plan\" command");
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.EXECUTE));
verifyCommandOptions(DiskBalancerCLI.EXECUTE, cmd);
String planFile = cmd.getOptionValue(DiskBalancerCLI.EXECUTE);
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(), "Invalid plan file specified.");
String planData = null;
try (FSDataInputStream plan = open(planFile)) {
planData = IOUtils.toString(plan);
}
submitPlan(planFile, planData);
}
Aggregations