Search in sources :

Example 86 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestMover method testMoverFailedRetryWithPinnedBlocks.

/**
   * Test to verify that mover should work well with pinned blocks as well as
   * failed blocks. Mover should continue retrying the failed blocks only.
   */
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String parenDir = "/parent";
        dfs.mkdirs(new Path(parenDir));
        final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
        byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
        out.write(fileData);
        out.close();
        // Adding pinned blocks.
        createFileWithFavoredDatanodes(conf, cluster, dfs);
        // Delete block file so, block move will fail with FileNotFoundException
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
        Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
        LocatedBlock lb = locatedBlocks.get(0);
        cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(parenDir), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
        Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 87 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSpaceReservation method testRBWFileCreationError.

@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {
    final short replication = 1;
    startCluster(BLOCK_SIZE, replication, -1);
    final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences().get(0);
    final String methodName = GenericTestUtils.getMethodName();
    final Path file = new Path("/" + methodName + ".01.dat");
    // Mock BlockPoolSlice so that RBW file creation gives IOExcception
    BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
    Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any())).thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
    Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
    field.setAccessible(true);
    Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field.get(fsVolumeImpl);
    bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
    try {
        // Write 1 byte to the file
        FSDataOutputStream os = fs.create(file, replication);
        os.write(new byte[1]);
        os.hsync();
        os.close();
        fail("Expecting IOException file creation failure");
    } catch (IOException e) {
    // Exception can be ignored (expected)
    }
    // Ensure RBW space reserved is released
    assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForReplicas(), fsVolumeImpl.getReservedForReplicas() == 0);
    // Reserve some bytes to verify double clearing space should't happen
    fsVolumeImpl.reserveSpaceForReplica(1000);
    try {
        // Write 1 byte to the file
        FSDataOutputStream os = fs.create(new Path("/" + methodName + ".02.dat"), replication);
        os.write(new byte[1]);
        os.hsync();
        os.close();
        fail("Expecting IOException file creation failure");
    } catch (IOException e) {
    // Exception can be ignored (expected)
    }
    // Ensure RBW space reserved is released only once
    assertTrue(fsVolumeImpl.getReservedForReplicas() == 1000);
}
Also used : Path(org.apache.hadoop.fs.Path) Field(java.lang.reflect.Field) IOException(java.io.IOException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Map(java.util.Map) Test(org.junit.Test)

Example 88 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSpaceReservation method createFileAndTestSpaceReservation.

private void createFileAndTestSpaceReservation(final String fileNamePrefix, final int fileBlockSize) throws IOException, InterruptedException {
    // Enough for 1 block + meta files + some delta.
    final long configuredCapacity = fileBlockSize * 2 - 1;
    startCluster(BLOCK_SIZE, 1, configuredCapacity);
    FSDataOutputStream out = null;
    Path path = new Path("/" + fileNamePrefix + ".dat");
    try {
        out = fs.create(path, false, 4096, (short) 1, fileBlockSize);
        byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)];
        out.write(buffer);
        out.hsync();
        int bytesWritten = buffer.length;
        // Check that space was reserved for a full block minus the bytesWritten.
        assertThat(singletonVolume.getReservedForReplicas(), is((long) fileBlockSize - bytesWritten));
        out.close();
        out = null;
        // Check that the reserved space has been released since we closed the
        // file.
        assertThat(singletonVolume.getReservedForReplicas(), is(0L));
        // Reopen the file for appends and write 1 more byte.
        out = fs.append(path);
        out.write(buffer);
        out.hsync();
        bytesWritten += buffer.length;
        // Check that space was again reserved for a full block minus the
        // bytesWritten so far.
        assertThat(singletonVolume.getReservedForReplicas(), is((long) fileBlockSize - bytesWritten));
        // Write once again and again verify the available space. This ensures
        // that the reserved space is progressively adjusted to account for bytes
        // written to disk.
        out.write(buffer);
        out.hsync();
        bytesWritten += buffer.length;
        assertThat(singletonVolume.getReservedForReplicas(), is((long) fileBlockSize - bytesWritten));
    } finally {
        if (out != null) {
            out.close();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 89 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSpaceReservation method testReservedSpaceForPipelineRecovery.

@Test(timeout = 30000)
public void testReservedSpaceForPipelineRecovery() throws Exception {
    final short replication = 3;
    startCluster(BLOCK_SIZE, replication, -1);
    final String methodName = GenericTestUtils.getMethodName();
    final Path file = new Path("/" + methodName + ".01.dat");
    old = DataNodeFaultInjector.get();
    // Fault injector to fail connection to mirror first time.
    DataNodeFaultInjector.set(new DataNodeFaultInjector() {

        private int tries = 0;

        @Override
        public void failMirrorConnection() throws IOException {
            if (tries++ == 0) {
                throw new IOException("Failing Mirror for space reservation");
            }
        }
    });
    // Write 1 byte to the file and kill the writer.
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.close();
    // Ensure all space reserved for the replica was released on each
    // DataNode.
    cluster.triggerBlockReports();
    for (final DataNode dn : cluster.getDataNodes()) {
        try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
            final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
            GenericTestUtils.waitFor(new Supplier<Boolean>() {

                @Override
                public Boolean get() {
                    LOG.info("dn " + dn.getDisplayName() + " space : " + volume.getReservedForReplicas());
                    return (volume.getReservedForReplicas() == 0);
                }
            }, 100, // Wait until the test times out.
            Integer.MAX_VALUE);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) IOException(java.io.IOException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 90 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestSpaceReservation method testWithLimitedSpace.

@Test(timeout = 300000)
public void testWithLimitedSpace() throws IOException {
    // Cluster with just enough space for a full block + meta.
    startCluster(BLOCK_SIZE, 1, 2 * BLOCK_SIZE - 1);
    final String methodName = GenericTestUtils.getMethodName();
    Path file1 = new Path("/" + methodName + ".01.dat");
    Path file2 = new Path("/" + methodName + ".02.dat");
    // Create two files.
    FSDataOutputStream os1 = null, os2 = null;
    try {
        os1 = fs.create(file1);
        os2 = fs.create(file2);
        // Write one byte to the first file.
        byte[] data = new byte[1];
        os1.write(data);
        os1.hsync();
        // Try to write one byte to the second file.
        // The block allocation must fail.
        thrown.expect(RemoteException.class);
        os2.write(data);
        os2.hsync();
    } finally {
        if (os1 != null) {
            os1.close();
        }
    // os2.close() will fail as no block was allocated.
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28