Search in sources :

Example 26 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestWebHDFS method verifyPread.

static void verifyPread(FileSystem fs, Path p, long offset, long length, byte[] buf, byte[] expected) throws IOException {
    long remaining = length - offset;
    long checked = 0;
    LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);
    final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d", offset, remaining);
    final FSDataInputStream in = fs.open(p, 64 << 10);
    for (; remaining > 0; ) {
        t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
        final int n = (int) Math.min(remaining, buf.length);
        in.readFully(offset, buf, 0, n);
        checkData(offset, remaining, n, buf, expected);
        offset += n;
        remaining -= n;
        checked += n;
    }
    in.close();
    t.end(checked);
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 27 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestWebHdfsFileSystemContract method testRootDir.

public void testRootDir() throws IOException {
    final Path root = new Path("/");
    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
    final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
    WebHdfsFileSystem.LOG.info("null url=" + url);
    Assert.assertTrue(url.toString().contains("v1"));
    //test root permission
    final FileStatus status = fs.getFileStatus(root);
    assertTrue(status != null);
    assertEquals(0777, status.getPermission().toShort());
    //delete root
    assertFalse(fs.delete(root, true));
    //create file using root path 
    try {
        final FSDataOutputStream out = fs.create(root);
        out.write(1);
        out.close();
        fail();
    } catch (IOException e) {
        WebHdfsFileSystem.LOG.info("This is expected.", e);
    }
    //open file using root path 
    try {
        final FSDataInputStream in = fs.open(root);
        in.read();
        fail();
    } catch (IOException e) {
        WebHdfsFileSystem.LOG.info("This is expected.", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) URL(java.net.URL)

Example 28 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestWebHdfsFileSystemContract method testSeek.

public void testSeek() throws IOException {
    final Path dir = new Path("/test/testSeek");
    assertTrue(fs.mkdirs(dir));
    {
        //test zero file size
        final Path zero = new Path(dir, "zero");
        fs.create(zero).close();
        int count = 0;
        final FSDataInputStream in = fs.open(zero);
        for (; in.read() != -1; count++) ;
        in.close();
        assertEquals(0, count);
    }
    final byte[] mydata = new byte[1 << 20];
    new Random().nextBytes(mydata);
    final Path p = new Path(dir, "file");
    FSDataOutputStream out = fs.create(p, false, 4096, (short) 3, 1L << 17);
    out.write(mydata, 0, mydata.length);
    out.close();
    final int one_third = mydata.length / 3;
    final int two_third = one_third * 2;
    {
        //test seek
        final int offset = one_third;
        final int len = mydata.length - offset;
        final byte[] buf = new byte[len];
        final FSDataInputStream in = fs.open(p);
        in.seek(offset);
        //read all remaining data
        in.readFully(buf);
        in.close();
        for (int i = 0; i < buf.length; i++) {
            assertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
        }
    }
    {
        //test position read (read the data after the two_third location)
        final int offset = two_third;
        final int len = mydata.length - offset;
        final byte[] buf = new byte[len];
        final FSDataInputStream in = fs.open(p);
        in.readFully(offset, buf);
        in.close();
        for (int i = 0; i < buf.length; i++) {
            assertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 29 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class CancelCommand method execute.

/**
   * Executes the Client Calls.
   *
   * @param cmd - CommandLine
   */
@Override
public void execute(CommandLine cmd) throws Exception {
    LOG.info("Executing \"Cancel plan\" command.");
    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
    verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
    // that you can read from a datanode using queryStatus
    if (cmd.hasOption(DiskBalancerCLI.NODE)) {
        String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
        String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
        cancelPlanUsingHash(nodeAddress, planHash);
    } else {
        // Or you can cancel a plan using the plan file. If the user
        // points us to the plan file, we can compute the hash as well as read
        // the address of the datanode from the plan file.
        String planFile = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
        Preconditions.checkArgument(planFile != null && !planFile.isEmpty(), "Invalid plan file specified.");
        String planData = null;
        try (FSDataInputStream plan = open(planFile)) {
            planData = IOUtils.toString(plan);
        }
        cancelPlan(planData);
    }
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 30 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class ExecuteCommand method execute.

/**
   * Executes the Client Calls.
   *
   * @param cmd - CommandLine
   */
@Override
public void execute(CommandLine cmd) throws Exception {
    LOG.info("Executing \"execute plan\" command");
    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.EXECUTE));
    verifyCommandOptions(DiskBalancerCLI.EXECUTE, cmd);
    String planFile = cmd.getOptionValue(DiskBalancerCLI.EXECUTE);
    Preconditions.checkArgument(planFile != null && !planFile.isEmpty(), "Invalid plan file specified.");
    String planData = null;
    try (FSDataInputStream plan = open(planFile)) {
        planData = IOUtils.toString(plan);
    }
    submitPlan(planFile, planData);
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Aggregations

FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)431 Path (org.apache.hadoop.fs.Path)271 FileSystem (org.apache.hadoop.fs.FileSystem)143 Test (org.junit.Test)135 IOException (java.io.IOException)125 Configuration (org.apache.hadoop.conf.Configuration)94 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)93 FileStatus (org.apache.hadoop.fs.FileStatus)62 InputStreamReader (java.io.InputStreamReader)37 BufferedReader (java.io.BufferedReader)36 FileNotFoundException (java.io.FileNotFoundException)26 IgfsPath (org.apache.ignite.igfs.IgfsPath)26 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)21 ArrayList (java.util.ArrayList)20 Random (java.util.Random)19 EOFException (java.io.EOFException)18 HashMap (java.util.HashMap)16 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)15 URI (java.net.URI)14 File (java.io.File)13