Search in sources :

Example 16 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestWrites method testCheckCommitFromRead.

@Test
public // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
void testCheckCommitFromRead() throws IOException {
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    Mockito.when(fos.getPos()).thenReturn((long) 0);
    NfsConfiguration config = new NfsConfiguration();
    config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(config), false, config);
    // fake handle for "/dumpFilePath"
    FileHandle h = new FileHandle(1);
    COMMIT_STATUS ret;
    WriteManager wm = new WriteManager(new ShellBasedIdMapping(config), config, false);
    assertTrue(wm.addOpenFileStream(h, ctx));
    // Test inactive open file context
    ctx.setActiveStatusForTest(false);
    Channel ch = Mockito.mock(Channel.class);
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
    ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
    assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
    // Test request with non zero commit offset
    ctx.setActiveStatusForTest(true);
    Mockito.when(fos.getPos()).thenReturn((long) 10);
    ctx.setNextOffsetForTest((long) 10);
    COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
    assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
    // Do_SYNC state will be updated to FINISHED after data sync
    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
    status = ctx.checkCommitInternal(10, ch, 1, attr, true);
    assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
    ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10));
    ConcurrentNavigableMap<Long, CommitCtx> commits = ctx.getPendingCommitsForTest();
    assertTrue(commits.size() == 0);
    ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
    // commit triggered by read doesn't wait
    assertEquals(0, commits.size());
    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11));
    // Test request with zero commit offset
    // There is one pending write [5,10]
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
    assertEquals(0, commits.size());
    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
    // Empty pending writes
    ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15));
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) CommitCtx(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) Channel(org.jboss.netty.channel.Channel) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) COMMIT_STATUS(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS) ShellBasedIdMapping(org.apache.hadoop.security.ShellBasedIdMapping) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 17 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestDecommission method testPendingNodes.

@Test(timeout = 120000)
public void testPendingNodes() throws Exception {
    org.apache.log4j.Logger.getLogger(DecommissionManager.class).setLevel(Level.TRACE);
    // Only allow one node to be decom'd at a time
    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES, 1);
    // Disable the normal monitor runs
    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, Integer.MAX_VALUE);
    startCluster(1, 3);
    final FileSystem fs = getCluster().getFileSystem();
    final DatanodeManager datanodeManager = getCluster().getNamesystem().getBlockManager().getDatanodeManager();
    final DecommissionManager decomManager = datanodeManager.getDecomManager();
    // Keep a file open to prevent decom from progressing
    HdfsDataOutputStream open1 = (HdfsDataOutputStream) fs.create(new Path("/openFile1"), (short) 3);
    // Flush and trigger block reports so the block definitely shows up on NN
    open1.write(123);
    open1.hflush();
    for (DataNode d : getCluster().getDataNodes()) {
        DataNodeTestUtils.triggerBlockReport(d);
    }
    // Decom two nodes, so one is still alive
    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
    for (int i = 0; i < 2; i++) {
        final DataNode d = getCluster().getDataNodes().get(i);
        DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
        decommissionedNodes.add(dn);
    }
    for (int i = 2; i >= 0; i--) {
        assertTrackedAndPending(decomManager, 0, i);
        BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
    }
    // Close file, try to decom the last node, should get stuck in tracked
    open1.close();
    final DataNode d = getCluster().getDataNodes().get(2);
    DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
    decommissionedNodes.add(dn);
    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
    assertTrackedAndPending(decomManager, 1, 0);
}
Also used : DecommissionManager(org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager) Path(org.apache.hadoop.fs.Path) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 18 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestFileCreation method testFileCreationNamenodeRestart.

/**
   * Test that file leases are persisted across namenode restarts.
   */
@Test
public void testFileCreationNamenodeRestart() throws IOException, NoSuchFieldException, IllegalAccessException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    if (simulatedStorage) {
        SimulatedFSDataset.setFactory(conf);
    }
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        final int nnport = cluster.getNameNodePort();
        // create a new file.
        Path file1 = new Path("/filestatus.dat");
        HdfsDataOutputStream stm = create(fs, file1, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
        assertEquals(file1 + " should be replicated to 1 datanode.", 1, stm.getCurrentBlockReplication());
        // write two full blocks.
        writeFile(stm, numBlocks * blockSize);
        stm.hflush();
        assertEquals(file1 + " should still be replicated to 1 datanode.", 1, stm.getCurrentBlockReplication());
        // rename file wile keeping it open.
        Path fileRenamed = new Path("/filestatusRenamed.dat");
        fs.rename(file1, fileRenamed);
        System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to " + fileRenamed);
        file1 = fileRenamed;
        // create another new file.
        //
        Path file2 = new Path("/filestatus2.dat");
        FSDataOutputStream stm2 = createFile(fs, file2, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
        // create yet another new file with full path name. 
        // rename it while open
        //
        Path file3 = new Path("/user/home/fullpath.dat");
        FSDataOutputStream stm3 = createFile(fs, file3, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
        Path file4 = new Path("/user/home/fullpath4.dat");
        FSDataOutputStream stm4 = createFile(fs, file4, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
        fs.mkdirs(new Path("/bin"));
        fs.rename(new Path("/user/home"), new Path("/bin"));
        Path file3new = new Path("/bin/home/fullpath.dat");
        System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to " + file3new);
        Path file4new = new Path("/bin/home/fullpath4.dat");
        System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to " + file4new);
        // restart cluster with the same namenode port as before.
        // This ensures that leases are persisted in fsimage.
        cluster.shutdown(false, false);
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from fsimage.
        cluster.shutdown(false, false);
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // instruct the dfsclient to use a new filename when it requests
        // new blocks for files that were renamed.
        DFSOutputStream dfstream = (DFSOutputStream) (stm.getWrappedStream());
        Field f = DFSOutputStream.class.getDeclaredField("src");
        Field modifiersField = Field.class.getDeclaredField("modifiers");
        modifiersField.setAccessible(true);
        modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
        f.setAccessible(true);
        f.set(dfstream, file1.toString());
        dfstream = (DFSOutputStream) (stm3.getWrappedStream());
        f.set(dfstream, file3new.toString());
        dfstream = (DFSOutputStream) (stm4.getWrappedStream());
        f.set(dfstream, file4new.toString());
        // write 1 byte to file.  This should succeed because the 
        // namenode should have persisted leases.
        byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
        stm.write(buffer);
        stm.close();
        stm2.write(buffer);
        stm2.close();
        stm3.close();
        stm4.close();
        // verify that new block is associated with this file
        DFSClient client = fs.dfs;
        LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        System.out.println("locations = " + locations.locatedBlockCount());
        assertTrue("Error blocks were not cleaned up for file " + file1, locations.locatedBlockCount() == 3);
        // verify filestatus2.dat
        locations = client.getNamenode().getBlockLocations(file2.toString(), 0, Long.MAX_VALUE);
        System.out.println("locations = " + locations.locatedBlockCount());
        assertTrue("Error blocks were not cleaned up for file " + file2, locations.locatedBlockCount() == 1);
    } finally {
        IOUtils.closeStream(fs);
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Field(java.lang.reflect.Field) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 19 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestFSImageWithSnapshot method testSaveLoadImageWithAppending.

/**
   * Test the fsimage saving/loading while file appending.
   */
@Test(timeout = 60000)
public void testSaveLoadImageWithAppending() throws Exception {
    Path sub1 = new Path(dir, "sub1");
    Path sub1file1 = new Path(sub1, "sub1file1");
    Path sub1file2 = new Path(sub1, "sub1file2");
    DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
    DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
    // 1. create snapshot s0
    hdfs.allowSnapshot(dir);
    hdfs.createSnapshot(dir, "s0");
    // 2. create snapshot s1 before appending sub1file1 finishes
    HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // also append sub1file2
    DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s1");
    out.close();
    // 3. create snapshot s2 before appending finishes
    out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    hdfs.createSnapshot(dir, "s2");
    out.close();
    // 4. save fsimage before appending finishes
    out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // dump fsdir
    File fsnBefore = dumpTree2File("before");
    // save the namesystem to a temp file
    File imageFile = saveFSImageToTempFile();
    // 5. load fsimage and compare
    // first restart the cluster, and format the cluster
    out.close();
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
    hdfs = cluster.getFileSystem();
    // then load the fsimage
    loadFSImageFromTempFile(imageFile);
    // dump the fsdir tree again
    File fsnAfter = dumpTree2File("after");
    // compare two dumped tree
    SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) File(java.io.File) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) Test(org.junit.Test)

Example 20 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestFSImageWithSnapshot method appendFileWithoutClosing.

/** Append a file without closing the output stream */
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length) throws IOException {
    byte[] toAppend = new byte[length];
    Random random = new Random();
    random.nextBytes(toAppend);
    HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
    out.write(toAppend);
    return out;
}
Also used : Random(java.util.Random) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream)

Aggregations

HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)23 Test (org.junit.Test)17 Path (org.apache.hadoop.fs.Path)10 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)10 DFSClient (org.apache.hadoop.hdfs.DFSClient)9 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)6 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)5 CommitCtx (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx)5 IOException (java.io.IOException)4 Channel (org.jboss.netty.channel.Channel)4 Configuration (org.apache.hadoop.conf.Configuration)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 Random (java.util.Random)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)2