Search in sources :

Example 1 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class FsDatasetImplTestUtils method injectCorruptReplica.

@Override
public void injectCorruptReplica(ExtendedBlock block) throws IOException {
    Preconditions.checkState(!dataset.contains(block), "Block " + block + " already exists on dataset.");
    try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) {
        FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0);
        FinalizedReplica finalized = new FinalizedReplica(block.getLocalBlock(), volume, volume.getFinalizedDir(block.getBlockPoolId()));
        File blockFile = finalized.getBlockFile();
        if (!blockFile.createNewFile()) {
            throw new FileExistsException("Block file " + blockFile + " already exists.");
        }
        File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000);
        if (!metaFile.createNewFile()) {
            throw new FileExistsException("Meta file " + metaFile + " already exists.");
        }
        dataset.volumeMap.add(block.getBlockPoolId(), finalized);
    }
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) FileExistsException(org.apache.commons.io.FileExistsException)

Example 2 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveVolumes.

@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
    // Feed FsDataset with block metadata.
    final int NUM_BLOCKS = 100;
    for (int i = 0; i < NUM_BLOCKS; i++) {
        String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
        ExtendedBlock eb = new ExtendedBlock(bpid, i);
        try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
        }
    }
    final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
    final String volumePathToRemove = dataDirs[0];
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
    FsVolumeImpl volumeToRemove = null;
    for (FsVolumeSpi vol : volReferences) {
        if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
            volumeToRemove = (FsVolumeImpl) vol;
        }
    }
    assertTrue(volumeToRemove != null);
    volReferences.close();
    dataset.removeVolumes(volumesToRemove, true);
    int expectedNumVolumes = dataDirs.length - 1;
    assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
    assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
    try {
        dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {

            @Override
            public void run() {
            }
        });
        fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
    } catch (RuntimeException e) {
        GenericTestUtils.assertExceptionContains("Cannot find volume", e);
    }
    int totalNumReplicas = 0;
    for (String bpid : dataset.volumeMap.getBlockPoolList()) {
        totalNumReplicas += dataset.volumeMap.size(bpid);
    }
    assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) Matchers.anyString(org.mockito.Matchers.anyString) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class FsDatasetImplTestUtils method verifyBlockPoolMissing.

@Override
public void verifyBlockPoolMissing(String bpid) throws IOException {
    FsVolumeImpl volume;
    try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
        volume = (FsVolumeImpl) references.get(0);
    }
    File bpDir = new File(volume.getCurrentDir(), bpid);
    if (bpDir.exists()) {
        throw new IOException(String.format("Block pool directory %s exists", bpDir));
    }
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) IOException(java.io.IOException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 4 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class FsDatasetImplTestUtils method createReplicaUnderRecovery.

@Override
public Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId) throws IOException {
    try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
        ReplicaUnderRecovery rur = new ReplicaUnderRecovery(new FinalizedReplica(block.getLocalBlock(), volume, volume.getCurrentDir().getParentFile()), recoveryId);
        dataset.volumeMap.add(block.getBlockPoolId(), rur);
        return rur;
    }
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) ReplicaUnderRecovery(org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)

Example 5 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class FsDatasetImplTestUtils method getRawCapacity.

@Override
public long getRawCapacity() throws IOException {
    try (FsVolumeReferences volRefs = dataset.getFsVolumeReferences()) {
        Preconditions.checkState(volRefs.size() != 0);
        DF df = volRefs.get(0).getUsageStats(dataset.datanode.getConf());
        if (df != null) {
            return df.getCapacity();
        } else {
            return -1;
        }
    }
}
Also used : DF(org.apache.hadoop.fs.DF) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)

Aggregations

FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)10 File (java.io.File)5 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)4 Test (org.junit.Test)4 IOException (java.io.IOException)3 RandomAccessFile (java.io.RandomAccessFile)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)3 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)2 FileOutputStream (java.io.FileOutputStream)1 HashSet (java.util.HashSet)1 Iterator (java.util.Iterator)1 FileExistsException (org.apache.commons.io.FileExistsException)1