Search in sources :

Example 16 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestBlockManager method testNeededReconstructionWhileAppending.

@Test(timeout = 60000)
public void testNeededReconstructionWhileAppending() throws IOException {
    Configuration conf = new HdfsConfiguration();
    String src = "/test-file";
    Path file = new Path(src);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    try {
        BlockManager bm = cluster.getNamesystem().getBlockManager();
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols namenode = cluster.getNameNodeRpc();
        DFSOutputStream out = null;
        try {
            out = (DFSOutputStream) (fs.create(file).getWrappedStream());
            out.write(1);
            out.hflush();
            out.close();
            FSDataInputStream in = null;
            ExtendedBlock oldBlock = null;
            try {
                in = fs.open(file);
                oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
            } finally {
                IOUtils.closeStream(in);
            }
            String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
            namenode.append(src, clientName, new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
            LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
            ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
            namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
            BlockInfo bi = bm.getStoredBlock(newBlock.getLocalBlock());
            assertFalse(bm.isNeededReconstruction(bi, bm.countNodes(bi, cluster.getNamesystem().isInStartupSafeMode())));
        } finally {
            IOUtils.closeStream(out);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 17 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestDataNodeRollingUpgrade method testDatanodePeersXceiver.

@Test(timeout = 600000)
public // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
void testDatanodePeersXceiver() throws Exception {
    try {
        startCluster();
        // Create files in DFS.
        String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
        String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
        String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
        DFSClient client1 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        DFSClient client2 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        DFSClient client3 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
        DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
        DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);
        byte[] toWrite = new byte[1024 * 1024 * 8];
        Random rb = new Random(1111);
        rb.nextBytes(toWrite);
        s1.write(toWrite, 0, 1024 * 1024 * 8);
        s1.flush();
        s2.write(toWrite, 0, 1024 * 1024 * 8);
        s2.flush();
        s3.write(toWrite, 0, 1024 * 1024 * 8);
        s3.flush();
        assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer().getNumPeersXceiver());
        s1.close();
        s2.close();
        s3.close();
        assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer().getNumPeersXceiver());
        client1.close();
        client2.close();
        client3.close();
    } finally {
        shutdownCluster();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Random(java.util.Random) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 18 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestDataNodeVolumeMetrics method testVolumeMetrics.

@Test
public void testVolumeMetrics() throws Exception {
    MiniDFSCluster cluster = setupClusterForVolumeMetrics();
    try {
        FileSystem fs = cluster.getFileSystem();
        final Path fileName = new Path("/test.dat");
        final long fileLen = Integer.MAX_VALUE + 1L;
        DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen, fs.getDefaultBlockSize(fileName), REPL, 1L, true);
        try (FSDataOutputStream out = fs.append(fileName)) {
            out.writeBytes("hello world");
            ((DFSOutputStream) out.getWrappedStream()).hsync();
        }
        verifyDataNodeVolumeMetrics(fs, cluster, fileName);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 19 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestStorageMover method testMigrateOpenFileToArchival.

/**
   * Move an open file into archival storage
   */
@Test
public void testMigrateOpenFileToArchival() throws Exception {
    LOG.info("testMigrateOpenFileToArchival");
    final Path fooDir = new Path("/foo");
    Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
    policyMap.put(fooDir, COLD);
    NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap);
    ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
    MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
    test.setupCluster();
    // create an open file
    banner("writing to file /foo/bar");
    final Path barFile = new Path(fooDir, "bar");
    DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
    FSDataOutputStream out = test.dfs.append(barFile);
    out.writeBytes("hello, ");
    ((DFSOutputStream) out.getWrappedStream()).hsync();
    try {
        banner("start data migration");
        // set /foo to COLD
        test.setStoragePolicy();
        test.migrate(ExitStatus.SUCCESS);
        // make sure the under construction block has not been migrated
        LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
        LOG.info("Locations: " + lbs);
        List<LocatedBlock> blks = lbs.getLocatedBlocks();
        Assert.assertEquals(1, blks.size());
        Assert.assertEquals(1, blks.get(0).getLocations().length);
        banner("finish the migration, continue writing");
        // make sure the writing can continue
        out.writeBytes("world!");
        ((DFSOutputStream) out.getWrappedStream()).hsync();
        IOUtils.cleanup(LOG, out);
        lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
        LOG.info("Locations: " + lbs);
        blks = lbs.getLocatedBlocks();
        Assert.assertEquals(1, blks.size());
        Assert.assertEquals(1, blks.get(0).getLocations().length);
        banner("finish writing, starting reading");
        // check the content of /foo/bar
        FSDataInputStream in = test.dfs.open(barFile);
        byte[] buf = new byte[13];
        // read from offset 1024
        in.readFully(BLOCK_SIZE, buf, 0, buf.length);
        IOUtils.cleanup(LOG, in);
        Assert.assertEquals("hello, world!", new String(buf));
    } finally {
        test.shutdownCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 20 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestDiskspaceQuotaUpdate method testUpdateQuotaForFSync.

/**
   * Test if the quota can be correctly updated when file length is updated
   * through fsync
   */
@Test(timeout = 60000)
public void testUpdateQuotaForFSync() throws Exception {
    final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(getDFS(), bar, BLOCKSIZE, REPLICATION, 0L);
    getDFS().setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    FSDataOutputStream out = getDFS().append(bar);
    out.write(new byte[BLOCKSIZE / 4]);
    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
    INodeDirectory fooNode = getFSDirectory().getINode4Write(foo.toString()).asDirectory();
    QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
    long ns = quota.getNameSpace();
    long ds = quota.getStorageSpace();
    // foo and bar
    assertEquals(2, ns);
    // file is under construction
    assertEquals(BLOCKSIZE * 2 * REPLICATION, ds);
    out.write(new byte[BLOCKSIZE / 4]);
    out.close();
    fooNode = getFSDirectory().getINode4Write(foo.toString()).asDirectory();
    quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
    ns = quota.getNameSpace();
    ds = quota.getStorageSpace();
    assertEquals(2, ns);
    assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
    // append another block
    DFSTestUtil.appendFile(getDFS(), bar, BLOCKSIZE);
    quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
    ns = quota.getNameSpace();
    ds = quota.getStorageSpace();
    // foo and bar
    assertEquals(2, ns);
    assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Aggregations

DFSOutputStream (org.apache.hadoop.hdfs.DFSOutputStream)20 Test (org.junit.Test)16 Path (org.apache.hadoop.fs.Path)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 FileSystem (org.apache.hadoop.fs.FileSystem)6 Configuration (org.apache.hadoop.conf.Configuration)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 IOException (java.io.IOException)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)2 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)2 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)2 File (java.io.File)1