Search in sources :

Example 6 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestRenameWithSnapshots method testAppendFileAfterRenameInSnapshot.

/**
   * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
   * append file without closing it. Unit test for HDFS-5425.
   */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
    final Path test = new Path("/test");
    final Path foo = new Path(test, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
    // rename bar --> bar2
    final Path bar2 = new Path(foo, "bar2");
    hdfs.rename(bar, bar2);
    // append file and keep it as underconstruction.
    FSDataOutputStream out = hdfs.append(bar2);
    out.writeByte(0);
    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // save namespace and restart
    restartClusterAndCheckImage(true);
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 7 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestOpenFilesWithSnapshot method doWriteAndAbort.

private void doWriteAndAbort(DistributedFileSystem fs, Path path) throws IOException {
    fs.mkdirs(path);
    fs.allowSnapshot(path);
    DFSTestUtil.createFile(fs, new Path("/test/test1"), 100, (short) 2, 100024L);
    DFSTestUtil.createFile(fs, new Path("/test/test2"), 100, (short) 2, 100024L);
    Path file = new Path("/test/test/test2");
    FSDataOutputStream out = fs.create(file);
    for (int i = 0; i < 2; i++) {
        long count = 0;
        while (count < 1048576) {
            out.writeBytes("hell");
            count += 4;
        }
    }
    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
    Path file2 = new Path("/test/test/test3");
    FSDataOutputStream out2 = fs.create(file2);
    for (int i = 0; i < 2; i++) {
        long count = 0;
        while (count < 1048576) {
            out2.writeBytes("hell");
            count += 4;
        }
    }
    ((DFSOutputStream) out2.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    DFSTestUtil.abortStream((DFSOutputStream) out2.getWrappedStream());
    fs.createSnapshot(path, "s1");
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream)

Example 8 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class HdfsDataOutputStream method hsync.

/**
   * Sync buffered data to DataNodes (flush to disk devices).
   *
   * @param syncFlags
   *          Indicate the detailed semantic and actions of the hsync.
   * @throws IOException
   * @see FSDataOutputStream#hsync()
   */
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
    OutputStream wrappedStream = getWrappedStream();
    if (wrappedStream instanceof CryptoOutputStream) {
        wrappedStream.flush();
        wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
    }
    ((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
Also used : OutputStream(java.io.OutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) CryptoOutputStream(org.apache.hadoop.crypto.CryptoOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) CryptoOutputStream(org.apache.hadoop.crypto.CryptoOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream)

Example 9 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestDataNodeVolumeMetrics method testVolumeMetricsWithVolumeDepartureArrival.

@Test
public void testVolumeMetricsWithVolumeDepartureArrival() throws Exception {
    MiniDFSCluster cluster = setupClusterForVolumeMetrics();
    try {
        FileSystem fs = cluster.getFileSystem();
        final Path fileName = new Path("/test.dat");
        final long fileLen = Integer.MAX_VALUE + 1L;
        DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen, fs.getDefaultBlockSize(fileName), REPL, 1L, true);
        try (FSDataOutputStream out = fs.append(fileName)) {
            out.writeBytes("hello world");
            ((DFSOutputStream) out.getWrappedStream()).hsync();
        }
        ArrayList<DataNode> dns = cluster.getDataNodes();
        assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
        final String dataDir = cluster.getDataDirectory();
        final File dn1Vol2 = new File(dataDir, "data2");
        DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
        verifyDataNodeVolumeMetrics(fs, cluster, fileName);
        DataNodeTestUtils.restoreDataDirFromFailure(dn1Vol2);
        DataNodeTestUtils.reconfigureDataNode(dns.get(0), dn1Vol2);
        verifyDataNodeVolumeMetrics(fs, cluster, fileName);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) File(java.io.File) Test(org.junit.Test)

Example 10 with DFSOutputStream

use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.

the class TestDataNodeMetrics method testRoundTripAckMetric.

/**
   * Tests that round-trip acks in a datanode write pipeline are correctly 
   * measured. 
   */
@Test
public void testRoundTripAckMetric() throws Exception {
    final int datanodeCount = 2;
    final int interval = 1;
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        // Open a file and get the head of the pipeline
        Path testFile = new Path("/testRoundTripAckMetric.txt");
        FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
        DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
        // Slow down the writes to catch the write pipeline
        dout.setChunksPerPacket(5);
        dout.setArtificialSlowdown(3000);
        fsout.write(new byte[10000]);
        DatanodeInfo[] pipeline = null;
        int count = 0;
        while (pipeline == null && count < 5) {
            pipeline = dout.getPipeline();
            System.out.println("Waiting for pipeline to be created.");
            Thread.sleep(1000);
            count++;
        }
        // Get the head node that should be receiving downstream acks
        DatanodeInfo headInfo = pipeline[0];
        DataNode headNode = null;
        for (DataNode datanode : cluster.getDataNodes()) {
            if (datanode.getDatanodeId().equals(headInfo)) {
                headNode = datanode;
                break;
            }
        }
        assertNotNull("Could not find the head of the datanode write pipeline", headNode);
        // Close the file and wait for the metrics to rollover
        Thread.sleep((interval + 1) * 1000);
        // Check the ack was received
        MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
        assertTrue("Expected non-zero number of acks", getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
        assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s", dnMetrics);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Aggregations

DFSOutputStream (org.apache.hadoop.hdfs.DFSOutputStream)20 Test (org.junit.Test)16 Path (org.apache.hadoop.fs.Path)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 FileSystem (org.apache.hadoop.fs.FileSystem)6 Configuration (org.apache.hadoop.conf.Configuration)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 IOException (java.io.IOException)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)2 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)2 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)2 File (java.io.File)1