Search in sources :

Example 81 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend3 method doSmallAppends.

// Do small appends.
void doSmallAppends(Path file, DistributedFileSystem fs, int iterations) throws IOException {
    for (int i = 0; i < iterations; i++) {
        FSDataOutputStream stm;
        try {
            stm = fs.append(file);
        } catch (IOException e) {
            // If another thread is already appending, skip this time.
            continue;
        }
        // Failure in write or close will be terminal.
        AppendTestUtil.write(stm, 0, 123);
        stm.close();
    }
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException)

Example 82 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend4 method recoverFile.

/*
   * Recover file.
   * Try and open file in append mode.
   * Doing this, we get a hold of the file that crashed writer
   * was writing to.  Once we have it, close it.  This will
   * allow subsequent reader to see up to last sync.
   * NOTE: This is the same algorithm that HBase uses for file recovery
   * @param fs
   * @throws Exception
   */
private void recoverFile(final FileSystem fs) throws Exception {
    LOG.info("Recovering File Lease");
    // set the soft limit to be 1 second so that the
    // namenode triggers lease recovery upon append request
    cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
    // Trying recovery
    int tries = 60;
    boolean recovered = false;
    FSDataOutputStream out = null;
    while (!recovered && tries-- > 0) {
        try {
            out = fs.append(file1);
            LOG.info("Successfully opened for append");
            recovered = true;
        } catch (IOException e) {
            LOG.info("Failed open for append, waiting on lease recovery");
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ex) {
            // ignore it and try again
            }
        }
    }
    if (out != null) {
        out.close();
    }
    if (!recovered) {
        fail("Recovery should take < 1 min");
    }
    LOG.info("Past out lease recovery");
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException)

Example 83 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend4 method testUpdateNeededReplicationsForAppendedFile.

/**
   * Test the updation of NeededReplications for the Appended Block
   */
@Test(timeout = 60000)
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file.
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Append to the file.
        FSDataOutputStream append = fileSystem.append(f);
        append.write("/testAppend".getBytes());
        append.close();
        // Start a new datanode
        cluster.startDataNodes(conf, 1, true, null, null);
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 84 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestBlockToken method testBlockTokenInLastLocatedBlock.

/**
   * This test writes a file and gets the block locations without closing the
   * file, and tests the block token in the last block. Block token is verified
   * by ensuring it is of correct kind.
   *
   * @throws IOException
   * @throws InterruptedException
   */
private void testBlockTokenInLastLocatedBlock(boolean enableProtobuf) throws IOException, InterruptedException {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, enableProtobuf);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    try {
        FileSystem fs = cluster.getFileSystem();
        String fileName = "/testBlockTokenInLastLocatedBlock";
        Path filePath = new Path(fileName);
        FSDataOutputStream out = fs.create(filePath, (short) 1);
        out.write(new byte[1000]);
        // ensure that the first block is written out (see FSOutputSummer#flush)
        out.flush();
        LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
        while (locatedBlocks.getLastLocatedBlock() == null) {
            Thread.sleep(100);
            locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
        }
        Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock().getBlockToken();
        Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
        out.close();
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration)

Example 85 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestMover method testWithinSameNode.

private void testWithinSameNode(Configuration conf) throws Exception {
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testScheduleWithinSameNode/file";
        Path dir = new Path("/testScheduleWithinSameNode");
        dfs.mkdirs(dir);
        // write to DISK
        dfs.setStoragePolicy(dir, "HOT");
        final FSDataOutputStream out = dfs.create(new Path(file));
        out.writeChars("testScheduleWithinSameNode");
        out.close();
        // verify before movement
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        StorageType[] storageTypes = lb.getStorageTypes();
        for (StorageType storageType : storageTypes) {
            Assert.assertTrue(StorageType.DISK == storageType);
        }
        // move to ARCHIVE
        dfs.setStoragePolicy(dir, "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // Wait till namenode notified about the block location details
        waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28