Search in sources :

Example 36 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestReplaceDatanodeOnFailure method testAppend.

@Test
public void testAppend() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final short REPLICATION = (short) 3;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path f = new Path(DIR, "testAppend");
        {
            LOG.info("create an empty file " + f);
            fs.create(f, REPLICATION).close();
            final FileStatus status = fs.getFileStatus(f);
            Assert.assertEquals(REPLICATION, status.getReplication());
            Assert.assertEquals(0L, status.getLen());
        }
        final byte[] bytes = new byte[1000];
        {
            LOG.info("append " + bytes.length + " bytes to " + f);
            final FSDataOutputStream out = fs.append(f);
            out.write(bytes);
            out.close();
            final FileStatus status = fs.getFileStatus(f);
            Assert.assertEquals(REPLICATION, status.getReplication());
            Assert.assertEquals(bytes.length, status.getLen());
        }
        {
            LOG.info("append another " + bytes.length + " bytes to " + f);
            try {
                final FSDataOutputStream out = fs.append(f);
                out.write(bytes);
                out.close();
                Assert.fail();
            } catch (IOException ioe) {
                LOG.info("This exception is expected", ioe);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 37 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestReplication method checkFile.

/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    break;
                }
            }
            assertTrue(found);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i]);
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    assertTrue(isNotOnSameRack);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 38 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestReservedRawPaths method testListDotReserved.

@Test(timeout = 120000)
public void testListDotReserved() throws Exception {
    // Create a base file for comparison
    final Path baseFileRaw = new Path("/.reserved/raw/base");
    final int len = 8192;
    DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
    /*
     * Ensure that you can list /.reserved, with results: raw and .inodes
     */
    FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
    assertEquals(2, stats.length);
    assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
    assertEquals("raw", stats[1].getPath().getName());
    try {
        fs.listStatus(new Path("/.reserved/.inodes"));
        fail("expected FNFE");
    } catch (FileNotFoundException e) {
        assertExceptionContains("/.reserved/.inodes does not exist", e);
    }
    final FileStatus[] fileStatuses = fs.listStatus(new Path("/.reserved/raw"));
    assertEquals("expected 1 entry", fileStatuses.length, 1);
    assertMatches(fileStatuses[0].getPath().toString(), "/.reserved/raw/base");
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 39 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestReservedRawPaths method assertPathEquals.

private void assertPathEquals(Path p1, Path p2) throws IOException {
    final FileStatus p1Stat = fs.getFileStatus(p1);
    final FileStatus p2Stat = fs.getFileStatus(p2);
    /*
     * Use accessTime and modificationTime as substitutes for INode to check
     * for resolution to the same underlying file.
     */
    assertEquals("Access times not equal", p1Stat.getAccessTime(), p2Stat.getAccessTime());
    assertEquals("Modification times not equal", p1Stat.getModificationTime(), p2Stat.getModificationTime());
    assertEquals("pathname1 not equal", p1, Path.getPathWithoutSchemeAndAuthority(p1Stat.getPath()));
    assertEquals("pathname1 not equal", p2, Path.getPathWithoutSchemeAndAuthority(p2Stat.getPath()));
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus)

Example 40 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSetTimes method testTimesAtClose.

/**
   * Tests mod time change at close in DFS.
   */
@Test
public void testTimesAtClose() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    int replicas = 1;
    // parameter initialization
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        // create a new file and write to it
        Path file1 = new Path("/simple.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        System.out.println("Created and wrote file simple.dat");
        FileStatus statBeforeClose = fileSys.getFileStatus(file1);
        long mtimeBeforeClose = statBeforeClose.getModificationTime();
        String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose));
        System.out.println("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
        assertTrue(mtimeBeforeClose != 0);
        //close file after writing
        stm.close();
        System.out.println("Closed file.");
        FileStatus statAfterClose = fileSys.getFileStatus(file1);
        long mtimeAfterClose = statAfterClose.getModificationTime();
        String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
        System.out.println("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")");
        assertTrue(mtimeAfterClose != 0);
        assertTrue(mtimeBeforeClose != mtimeAfterClose);
        cleanupFile(fileSys, file1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30