Search in sources :

Example 61 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend method testAppend2AfterSoftLimit.

/** Tests appending after soft-limit expires. */
@Test
public void testAppend2AfterSoftLimit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    //Set small soft-limit for lease
    final long softLimit = 1L;
    final long hardLimit = 9999999L;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.setLeasePeriod(softLimit, hardLimit);
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    DistributedFileSystem fs2 = new DistributedFileSystem();
    fs2.initialize(fs.getUri(), conf);
    final Path testPath = new Path("/testAppendAfterSoftLimit");
    final byte[] fileContents = AppendTestUtil.initBuffer(32);
    // create a new file without closing
    FSDataOutputStream out = fs.create(testPath);
    out.write(fileContents);
    //Wait for > soft-limit
    Thread.sleep(250);
    try {
        FSDataOutputStream appendStream2 = fs2.append(testPath, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
        appendStream2.write(fileContents);
        appendStream2.close();
        assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
        // make sure we now have 1 block since the first writer was revoked
        LocatedBlocks blks = fs.getClient().getLocatedBlocks(testPath.toString(), 0L);
        assertEquals(1, blks.getLocatedBlocks().size());
        for (LocatedBlock blk : blks.getLocatedBlocks()) {
            assertEquals(fileContents.length, blk.getBlockSize());
        }
    } finally {
        fs.close();
        fs2.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 62 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend method testSimpleFlush.

/**
   * Test a simple flush on a simple HDFS file.
   * @throws IOException an exception might be thrown
   */
@Test
public void testSimpleFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        // create a new file.
        Path file1 = new Path("/simpleFlush.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        System.out.println("Created file simpleFlush.dat");
        // write to file
        int mid = AppendTestUtil.FILE_SIZE / 2;
        stm.write(fileContents, 0, mid);
        stm.hflush();
        System.out.println("Wrote and Flushed first part of file.");
        // write the remainder of the file
        stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
        System.out.println("Written second part of file");
        stm.hflush();
        stm.hflush();
        System.out.println("Wrote and Flushed second part of file.");
        // verify that full blocks are sane
        checkFile(fs, file1, 1);
        stm.close();
        System.out.println("Closed file.");
        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
    } catch (IOException e) {
        System.out.println("Exception :" + e);
        throw e;
    } catch (Throwable e) {
        System.out.println("Throwable :" + e);
        e.printStackTrace();
        throw new IOException("Throwable : " + e);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 63 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend method testBreakHardlinksIfNeeded.

@Test
public void testBreakHardlinksIfNeeded() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    try {
        // create a new file, write to it and close it.
        Path file1 = new Path("/filestatus.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        writeFile(stm);
        stm.close();
        // Get a handle to the datanode
        DataNode[] dn = cluster.listDataNodes();
        assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);
        LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        List<LocatedBlock> blocks = locations.getLocatedBlocks();
        final FsDatasetSpi<?> fsd = dn[0].getFSDataset();
        //
        for (int i = 0; i < blocks.size(); i = i + 2) {
            ExtendedBlock b = blocks.get(i).getBlock();
            final File f = FsDatasetTestUtil.getBlockFile(fsd, b.getBlockPoolId(), b.getLocalBlock());
            File link = new File(f.toString() + ".link");
            System.out.println("Creating hardlink for File " + f + " to " + link);
            HardLink.createHardLink(f, link);
        }
        // Detach all blocks. This should remove hardlinks (if any)
        for (int i = 0; i < blocks.size(); i++) {
            ExtendedBlock b = blocks.get(i).getBlock();
            System.out.println("breakHardlinksIfNeeded detaching block " + b);
            assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
        }
        // return false
        for (int i = 0; i < blocks.size(); i++) {
            ExtendedBlock b = blocks.get(i).getBlock();
            System.out.println("breakHardlinksIfNeeded re-attempting to " + "detach block " + b);
            assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
        }
    } finally {
        client.close();
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) Test(org.junit.Test)

Example 64 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend2 method testAppendLessThanChecksumChunk.

/**
   * Make sure when the block length after appending is less than 512 bytes, the
   * checksum re-calculation and overwrite are performed correctly.
   */
@Test
public void testAppendLessThanChecksumChunk() throws Exception {
    final byte[] buf = new byte[1024];
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
    cluster.waitActive();
    try (DistributedFileSystem fs = cluster.getFileSystem()) {
        final int len1 = 200;
        final int len2 = 300;
        final Path p = new Path("/foo");
        FSDataOutputStream out = fs.create(p);
        out.write(buf, 0, len1);
        out.close();
        out = fs.append(p);
        out.write(buf, 0, len2);
        // flush but leave open
        out.hflush();
        // read data to verify the replica's content and checksum are correct
        FSDataInputStream in = fs.open(p);
        final int length = in.read(0, buf, 0, len1 + len2);
        assertTrue(length > 0);
        in.close();
        out.close();
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 65 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileConcurrentReader method testUnfinishedBlockPacketBufferOverrun.

/**
   * test case: if the BlockSender decides there is only one packet to send,
   * the previous computation of the pktSize based on transferToAllowed
   * would result in too small a buffer to do the buffer-copy needed
   * for partial chunks.
   */
@Test(timeout = 30000)
public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
    // check that / exists
    Path path = new Path("/");
    System.out.println("Path : \"" + path.toString() + "\"");
    // create a new file in the root, write data, do no close
    Path file1 = new Path("/unfinished-block");
    final FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);
    // write partial block and sync
    final int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
    final int partialBlockSize = bytesPerChecksum - 1;
    writeFileAndSync(stm, partialBlockSize);
    // Make sure a client can read it before it is closed
    checkCanRead(fileSystem, file1, partialBlockSize);
    stm.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28