Search in sources :

Example 31 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestReplication method testReplicationWhenBlockCorruption.

/**
   * Test that blocks should get replicated if we have corrupted blocks and
   * having good replicas at least equal or greater to minreplication
   *
   * Simulate rbw blocks by creating dummy copies, then a DN restart to detect
   * those corrupted blocks asap.
   */
@Test(timeout = 30000)
public void testReplicationWhenBlockCorruption() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(1).build();
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("/test");
        FSDataOutputStream create = fs.create(filePath);
        fs.setReplication(filePath, (short) 1);
        create.write(new byte[1024]);
        create.close();
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
        int numReplicaCreated = 0;
        for (final DataNode dn : cluster.getDataNodes()) {
            if (!dn.getFSDataset().contains(block)) {
                cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
                numReplicaCreated++;
            }
        }
        assertEquals(2, numReplicaCreated);
        fs.setReplication(filePath, (short) 3);
        // Lets detect all DNs about dummy copied
        cluster.restartDataNodes();
        // blocks
        cluster.waitActive();
        cluster.triggerBlockReports();
        DFSTestUtil.waitReplication(fs, filePath, (short) 3);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 32 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestRollingUpgrade method testRollback.

@Test
public void testRollback() throws Exception {
    // start a cluster
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final Path foo = new Path("/foo");
        final Path bar = new Path("/bar");
        cluster.getFileSystem().mkdirs(foo);
        final Path file = new Path(foo, "file");
        final byte[] data = new byte[1024];
        ThreadLocalRandom.current().nextBytes(data);
        final FSDataOutputStream out = cluster.getFileSystem().create(file);
        out.write(data, 0, data.length);
        out.close();
        checkMxBeanIsNull();
        startRollingUpgrade(foo, bar, file, data, cluster);
        checkMxBean();
        cluster.getFileSystem().rollEdits();
        cluster.getFileSystem().rollEdits();
        rollbackRollingUpgrade(foo, bar, file, data, cluster);
        checkMxBeanIsNull();
        startRollingUpgrade(foo, bar, file, data, cluster);
        cluster.getFileSystem().rollEdits();
        cluster.getFileSystem().rollEdits();
        rollbackRollingUpgrade(foo, bar, file, data, cluster);
        startRollingUpgrade(foo, bar, file, data, cluster);
        cluster.restartNameNode();
        rollbackRollingUpgrade(foo, bar, file, data, cluster);
        startRollingUpgrade(foo, bar, file, data, cluster);
        cluster.restartNameNode();
        rollbackRollingUpgrade(foo, bar, file, data, cluster);
        startRollingUpgrade(foo, bar, file, data, cluster);
        rollbackRollingUpgrade(foo, bar, file, data, cluster);
        startRollingUpgrade(foo, bar, file, data, cluster);
        rollbackRollingUpgrade(foo, bar, file, data, cluster);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 33 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSetTimes method writeFile.

private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl) throws IOException {
    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    return stm;
}
Also used : Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 34 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSetTimes method testTimesAtClose.

/**
   * Tests mod time change at close in DFS.
   */
@Test
public void testTimesAtClose() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    int replicas = 1;
    // parameter initialization
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        // create a new file and write to it
        Path file1 = new Path("/simple.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        System.out.println("Created and wrote file simple.dat");
        FileStatus statBeforeClose = fileSys.getFileStatus(file1);
        long mtimeBeforeClose = statBeforeClose.getModificationTime();
        String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose));
        System.out.println("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
        assertTrue(mtimeBeforeClose != 0);
        //close file after writing
        stm.close();
        System.out.println("Closed file.");
        FileStatus statAfterClose = fileSys.getFileStatus(file1);
        long mtimeAfterClose = statAfterClose.getModificationTime();
        String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
        System.out.println("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")");
        assertTrue(mtimeAfterClose != 0);
        assertTrue(mtimeBeforeClose != mtimeAfterClose);
        cleanupFile(fileSys, file1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 35 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSetTimes method testTimes.

/**
   * Tests mod & access time in DFS.
   */
@Test
public void testTimes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    final int nnport = cluster.getNameNodePort();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    int replicas = 1;
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        //
        // create file and record atime/mtime
        //
        System.out.println("Creating testdir1 and testdir1/test1.dat.");
        Path dir1 = new Path("testdir1");
        Path file1 = new Path(dir1, "test1.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        FileStatus stat = fileSys.getFileStatus(file1);
        long atimeBeforeClose = stat.getAccessTime();
        String adate = dateForm.format(new Date(atimeBeforeClose));
        System.out.println("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")");
        assertTrue(atimeBeforeClose != 0);
        stm.close();
        stat = fileSys.getFileStatus(file1);
        long atime1 = stat.getAccessTime();
        long mtime1 = stat.getModificationTime();
        adate = dateForm.format(new Date(atime1));
        String mdate = dateForm.format(new Date(mtime1));
        System.out.println("atime on " + file1 + " is " + adate + " (" + atime1 + ")");
        System.out.println("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")");
        assertTrue(atime1 != 0);
        //
        // record dir times
        //
        stat = fileSys.getFileStatus(dir1);
        long mdir1 = stat.getAccessTime();
        assertTrue(mdir1 == 0);
        // set the access time to be one day in the past
        long atime2 = atime1 - (24L * 3600L * 1000L);
        fileSys.setTimes(file1, -1, atime2);
        // check new access time on file
        stat = fileSys.getFileStatus(file1);
        long atime3 = stat.getAccessTime();
        String adate3 = dateForm.format(new Date(atime3));
        System.out.println("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")");
        assertTrue(atime2 == atime3);
        assertTrue(mtime1 == stat.getModificationTime());
        // set the modification time to be 1 hour in the past
        long mtime2 = mtime1 - (3600L * 1000L);
        fileSys.setTimes(file1, mtime2, -1);
        // check new modification time on file
        stat = fileSys.getFileStatus(file1);
        long mtime3 = stat.getModificationTime();
        String mdate3 = dateForm.format(new Date(mtime3));
        System.out.println("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")");
        assertTrue(atime2 == stat.getAccessTime());
        assertTrue(mtime2 == mtime3);
        long mtime4 = Time.now() - (3600L * 1000L);
        long atime4 = Time.now();
        fileSys.setTimes(dir1, mtime4, atime4);
        // check new modification time on file
        stat = fileSys.getFileStatus(dir1);
        assertTrue("Not matching the modification times", mtime4 == stat.getModificationTime());
        assertTrue("Not matching the access times", atime4 == stat.getAccessTime());
        Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
        try {
            fileSys.setTimes(nonExistingDir, mtime4, atime4);
            fail("Expecting FileNotFoundException");
        } catch (FileNotFoundException e) {
            assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
        }
        // shutdown cluster and restart
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        // verify that access times and modification times persist after a
        // cluster restart.
        System.out.println("Verifying times after cluster restart");
        stat = fileSys.getFileStatus(file1);
        assertTrue(atime2 == stat.getAccessTime());
        assertTrue(mtime3 == stat.getModificationTime());
        cleanupFile(fileSys, file1);
        cleanupFile(fileSys, dir1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28