Search in sources :

Example 56 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend4 method testAppendInsufficientLocations.

/**
   * Test that an append with no locations fails with an exception
   * showing insufficient locations.
   */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file with replication 3
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
        // Shut down all DNs that have the last block location for the file
        LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
        List<DataNode> dnsOfCluster = cluster.getDataNodes();
        DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
        for (DataNode dn : dnsOfCluster) {
            for (DatanodeInfo loc : dnsWithLocations) {
                if (dn.getDatanodeId().equals(loc)) {
                    dn.shutdown();
                    DFSTestUtil.waitForDatanodeDeath(dn);
                }
            }
        }
        // Wait till 0 replication is recognized
        DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
        // have the block.
        try {
            fileSystem.append(f);
            fail("Append should fail because insufficient locations");
        } catch (IOException e) {
            LOG.info("Expected exception: ", e);
        }
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
        assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 57 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppendRestart method testAppendRestart.

/**
   * Regression test for HDFS-2991. Creates and appends to files
   * where blocks start/end on block boundaries.
   */
@Test
public void testAppendRestart() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    FSDataOutputStream stream = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        FileSystem fs = cluster.getFileSystem();
        File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1));
        EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
        Path p1 = new Path("/block-boundaries");
        writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
        counts = FSImageTestUtil.countEditLogOpTypes(editLog);
        // OP_ADD to create file
        // OP_ADD_BLOCK for first block
        // OP_CLOSE to close file
        // OP_APPEND to reopen file
        // OP_ADD_BLOCK for second block
        // OP_CLOSE to close file
        assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
        assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
        assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
        assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
        Path p2 = new Path("/not-block-boundaries");
        writeAndAppend(fs, p2, BLOCK_SIZE / 2, BLOCK_SIZE);
        counts = FSImageTestUtil.countEditLogOpTypes(editLog);
        // OP_ADD to create file
        // OP_ADD_BLOCK for first block
        // OP_CLOSE to close file
        // OP_APPEND to re-establish the lease
        // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
        // OP_ADD_BLOCK at the start of the second block
        // OP_CLOSE to close file
        // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
        //       in addition to the ones above
        assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
        assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
        assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
        assertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
        assertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
        cluster.restartNameNode();
        AppendTestUtil.check(fs, p1, 2 * BLOCK_SIZE);
        AppendTestUtil.check(fs, p2, 3 * BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stream);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSEditLogOpCodes(org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) Holder(org.apache.hadoop.hdfs.util.Holder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) Test(org.junit.Test)

Example 58 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppendRestart method testAppendWithPipelineRecovery.

/**
   * Test to append to the file, when one of datanode in the existing pipeline
   * is down.
   */
@Test
public void testAppendWithPipelineRecovery() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    FSDataOutputStream out = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true).manageNameDfsDirs(true).numDataNodes(4).racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" }).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test1");
        out = fs.create(path, true, BLOCK_SIZE, (short) 3, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, 1024);
        out.close();
        cluster.stopDataNode(3);
        out = fs.append(path);
        AppendTestUtil.write(out, 1024, 1024);
        out.close();
        cluster.restartNameNode(true);
        AppendTestUtil.check(fs, path, 2048);
    } finally {
        IOUtils.closeStream(out);
        if (null != cluster) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 59 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend method testComplexFlush.

/**
   * Test that file data can be flushed.
   * @throws IOException an exception might be thrown
   */
@Test
public void testComplexFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        // create a new file.
        Path file1 = new Path("/complexFlush.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        System.out.println("Created file complexFlush.dat");
        int start = 0;
        for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
            stm.write(fileContents, start, 29);
            stm.hflush();
            start += 29;
        }
        stm.write(fileContents, start, AppendTestUtil.FILE_SIZE - start);
        // need to make sure we completely write out all full blocks before
        // the checkFile() call (see FSOutputSummer#flush)
        stm.flush();
        // verify that full blocks are sane
        checkFile(fs, file1, 1);
        stm.close();
        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
    } catch (IOException e) {
        System.out.println("Exception :" + e);
        throw e;
    } catch (Throwable e) {
        System.out.println("Throwable :" + e);
        e.printStackTrace();
        throw new IOException("Throwable : " + e);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 60 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend method testAppendAfterSoftLimit.

/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit() throws IOException, InterruptedException {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    //Set small soft-limit for lease
    final long softLimit = 1L;
    final long hardLimit = 9999999L;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.setLeasePeriod(softLimit, hardLimit);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    FileSystem fs2 = new DistributedFileSystem();
    fs2.initialize(fs.getUri(), conf);
    final Path testPath = new Path("/testAppendAfterSoftLimit");
    final byte[] fileContents = AppendTestUtil.initBuffer(32);
    // create a new file without closing
    FSDataOutputStream out = fs.create(testPath);
    out.write(fileContents);
    //Wait for > soft-limit
    Thread.sleep(250);
    try {
        FSDataOutputStream appendStream2 = fs2.append(testPath);
        appendStream2.write(fileContents);
        appendStream2.close();
        assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
    } finally {
        fs.close();
        fs2.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28