Search in sources :

Example 31 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDatanodeRestart method testRbwReplicas.

private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException {
    FSDataOutputStream out = null;
    FileSystem fs = cluster.getFileSystem();
    final Path src = new Path("/test.txt");
    try {
        final int fileLen = 515;
        // create some rbw replicas on disk
        byte[] writeBuf = new byte[fileLen];
        new Random().nextBytes(writeBuf);
        out = fs.create(src);
        out.write(writeBuf);
        out.hflush();
        DataNode dn = cluster.getDataNodes().get(0);
        try (FsDatasetSpi.FsVolumeReferences volumes = dataset(dn).getFsVolumeReferences()) {
            for (FsVolumeSpi vol : volumes) {
                final FsVolumeImpl volume = (FsVolumeImpl) vol;
                File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
                File rbwDir = new File(currentDir, "rbw");
                for (File file : rbwDir.listFiles()) {
                    if (isCorrupt && Block.isBlockFilename(file)) {
                        new RandomAccessFile(file, "rw").setLength(// corrupt
                        fileLen - 1);
                    }
                }
            }
        }
        cluster.restartDataNodes();
        cluster.waitActive();
        dn = cluster.getDataNodes().get(0);
        // check volumeMap: one rwr replica
        String bpid = cluster.getNamesystem().getBlockPoolId();
        ReplicaMap replicas = dataset(dn).volumeMap;
        Assert.assertEquals(1, replicas.size(bpid));
        ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
        Assert.assertEquals(ReplicaState.RWR, replica.getState());
        if (isCorrupt) {
            Assert.assertEquals((fileLen - 1) / 512 * 512, replica.getNumBytes());
        } else {
            Assert.assertEquals(fileLen, replica.getNumBytes());
        }
        dataset(dn).invalidate(bpid, new Block[] { replica });
    } finally {
        IOUtils.closeStream(out);
        if (fs.exists(src)) {
            fs.delete(src, false);
        }
        fs.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 32 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDatasetVolumeCheckerTimeout method testDiskCheckTimeout.

@Test(timeout = 1000)
public void testDiskCheckTimeout() throws Exception {
    LOG.info("Executing {}", testName.getMethodName());
    final FsVolumeSpi volume = makeSlowVolume();
    final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, new FakeTimer());
    final AtomicLong numCallbackInvocations = new AtomicLong(0);
    lock.lock();
    /**
     * Request a check and ensure it triggered {@link FsVolumeSpi#check}.
     */
    boolean result = checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {

        @Override
        public void call(Set<FsVolumeSpi> healthyVolumes, Set<FsVolumeSpi> failedVolumes) {
            numCallbackInvocations.incrementAndGet();
            // Assert that the disk check registers a failed volume due to
            // timeout
            assertThat(healthyVolumes.size(), is(0));
            assertThat(failedVolumes.size(), is(1));
        }
    });
    // Wait for the callback
    Thread.sleep(DISK_CHECK_TIME);
    // Release lock
    lock.unlock();
    // Ensure that the check was invoked only once.
    verify(volume, times(1)).check(anyObject());
    assertThat(numCallbackInvocations.get(), is(1L));
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 33 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestWriteToReplica method testAppend.

private void testAppend(String bpid, FsDatasetSpi<?> dataSet, ExtendedBlock[] blocks) throws IOException {
    long newGS = blocks[FINALIZED].getGenerationStamp() + 1;
    final FsVolumeSpi v = dataSet.getVolume(blocks[FINALIZED]);
    if (v instanceof FsVolumeImpl) {
        FsVolumeImpl fvi = (FsVolumeImpl) v;
        long available = fvi.getCapacity() - fvi.getDfsUsed();
        long expectedLen = blocks[FINALIZED].getNumBytes();
        try {
            fvi.onBlockFileDeletion(bpid, -available);
            blocks[FINALIZED].setNumBytes(expectedLen + 100);
            dataSet.append(blocks[FINALIZED], newGS, expectedLen);
            Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
        } catch (DiskOutOfSpaceException e) {
            Assert.assertTrue(e.getMessage().startsWith("Insufficient space for appending to "));
        }
        fvi.onBlockFileDeletion(bpid, available);
        blocks[FINALIZED].setNumBytes(expectedLen);
    }
    newGS = blocks[RBW].getGenerationStamp() + 1;
    dataSet.append(blocks[FINALIZED], newGS, // successful
    blocks[FINALIZED].getNumBytes());
    blocks[FINALIZED].setGenerationStamp(newGS);
    try {
        dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
        Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[TEMPORARY], e.getMessage());
    }
    try {
        dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
        Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RBW], e.getMessage());
    }
    try {
        dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
        Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RWR], e.getMessage());
    }
    try {
        dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
        Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RUR], e.getMessage());
    }
    try {
        dataSet.append(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
        Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + blocks[NON_EXISTENT], e.getMessage());
    }
    newGS = blocks[FINALIZED].getGenerationStamp() + 1;
    dataSet.recoverAppend(blocks[FINALIZED], newGS, // successful
    blocks[FINALIZED].getNumBytes());
    blocks[FINALIZED].setGenerationStamp(newGS);
    try {
        dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
        Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
    }
    newGS = blocks[RBW].getGenerationStamp() + 1;
    dataSet.recoverAppend(blocks[RBW], newGS, blocks[RBW].getNumBytes());
    blocks[RBW].setGenerationStamp(newGS);
    try {
        dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
        Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
    }
    try {
        dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
        Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
    }
    try {
        dataSet.recoverAppend(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
        Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)

Aggregations

FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)33 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)15 Test (org.junit.Test)10 IOException (java.io.IOException)8 File (java.io.File)7 HashSet (java.util.HashSet)7 Path (org.apache.hadoop.fs.Path)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)6 Configuration (org.apache.hadoop.conf.Configuration)5 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 ArrayList (java.util.ArrayList)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)3 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)3 FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)3 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)3 HashMap (java.util.HashMap)2