Search in sources :

Example 71 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestSwiftFileSystemRead method testRWJson.

/**
   * Read and write some JSON
   * @throws IOException
   */
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRWJson() throws IOException {
    final String message = "{" + " 'json': { 'i':43, 'b':true}," + " 's':'string'" + "}";
    final Path filePath = new Path("/test/file.json");
    writeTextFile(fs, filePath, message, false);
    String readJson = readBytesToString(fs, filePath, message.length());
    assertEquals(message, readJson);
    //now find out where it is
    FileStatus status = fs.getFileStatus(filePath);
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, 10);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) SwiftTestUtils.readBytesToString(org.apache.hadoop.fs.swift.util.SwiftTestUtils.readBytesToString) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 72 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestSwiftFileSystemBlockLocation method testLocateSingleFileBlocks.

@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateSingleFileBlocks() throws Throwable {
    describe("verify that a file returns 1+ blocks");
    FileStatus fileStatus = createFileAndGetStatus();
    BlockLocation[] locations = getFs().getFileBlockLocations(fileStatus, 0, 1);
    assertNotEqual("No block locations supplied for " + fileStatus, 0, locations.length);
    for (BlockLocation location : locations) {
        assertLocationValid(location);
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 73 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestSwiftFileSystemBlockLocation method testLocateRootDirectory.

@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateRootDirectory() throws Throwable {
    describe("verify that locating the root directory is an error");
    FileStatus status = fs.getFileStatus(path("/"));
    SwiftTestUtils.assertIsDirectory(status);
    BlockLocation[] locations;
    locations = getFs().getFileBlockLocations(status, 0, 1);
    assertEmptyBlockLocations(locations);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 74 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestSwiftFileSystemBlockLocation method testLocateDirectory.

@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testLocateDirectory() throws Throwable {
    describe("verify that locating a directory is an error");
    createFile(path("/test/filename"));
    FileStatus status = fs.getFileStatus(path("/test"));
    LOG.info("Filesystem is " + fs + "; target is " + status);
    SwiftTestUtils.assertIsDirectory(status);
    BlockLocation[] locations;
    locations = getFs().getFileBlockLocations(status, 0, 1);
    assertEmptyBlockLocations(locations);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 75 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestDFSUtil method testLocatedBlocks2Locations.

/**
   * Test conversion of LocatedBlock to BlockLocation
   */
@Test
public void testLocatedBlocks2Locations() {
    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;
    // ok
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds);
    l1.setStartOffset(0);
    l1.setCorrupt(false);
    // corrupt
    ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
    LocatedBlock l2 = new LocatedBlock(b2, ds);
    l2.setStartOffset(0);
    l2.setCorrupt(true);
    List<LocatedBlock> ls = Arrays.asList(l1, l2);
    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null);
    BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
    assertTrue("expected 2 blocks but got " + bs.length, bs.length == 2);
    int corruptCount = 0;
    for (BlockLocation b : bs) {
        if (b.isCorrupt()) {
            corruptCount++;
        }
    }
    assertTrue("expected 1 corrupt files but got " + corruptCount, corruptCount == 1);
    // test an empty location
    bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
    assertEquals(0, bs.length);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Aggregations

BlockLocation (org.apache.hadoop.fs.BlockLocation)88 Path (org.apache.hadoop.fs.Path)41 FileStatus (org.apache.hadoop.fs.FileStatus)30 Test (org.junit.Test)29 FileSystem (org.apache.hadoop.fs.FileSystem)16 ArrayList (java.util.ArrayList)14 Configuration (org.apache.hadoop.conf.Configuration)14 IOException (java.io.IOException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)7 InetSocketAddress (java.net.InetSocketAddress)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)5 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 IgfsBlockLocation (org.apache.ignite.igfs.IgfsBlockLocation)5 IgfsPath (org.apache.ignite.igfs.IgfsPath)5 HashMap (java.util.HashMap)4 Random (java.util.Random)4