Search in sources :

Example 36 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestIncrementalBlockReports method injectBlockReceived.

/**
   * Inject a fake 'received' block into the BPServiceActor state.
   */
private void injectBlockReceived() {
    ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(getDummyBlock(), BlockStatus.RECEIVED_BLOCK, null);
    DatanodeStorage s = singletonDn.getFSDataset().getStorage(storageUuid);
    actor.getIbrManager().notifyNamenodeBlock(rdbi, s, false);
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 37 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestIncrementalBrVariations method verifyIncrementalBlockReports.

public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
    // Get the block list for the file with the block locations.
    LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
    try (FsDatasetSpi.FsVolumeReferences volumes = dn0.getFSDataset().getFsVolumeReferences()) {
        // We will send 'fake' incremental block reports to the NN that look
        // like they originated from DN 0.
        StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[volumes.size()];
        // Lie to the NN that one block on each storage has been deleted.
        for (int i = 0; i < reports.length; ++i) {
            FsVolumeSpi volume = volumes.get(i);
            boolean foundBlockOnStorage = false;
            ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
            // report.
            for (LocatedBlock block : blocks.getLocatedBlocks()) {
                if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
                    rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
                    foundBlockOnStorage = true;
                    break;
                }
            }
            assertTrue(foundBlockOnStorage);
            reports[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(volume.getStorageID()), rdbi);
            if (splitReports) {
                // If we are splitting reports then send the report for this storage now.
                StorageReceivedDeletedBlocks[] singletonReport = { reports[i] };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
            }
        }
        if (!splitReports) {
            // Send a combined report.
            cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
        }
        // Make sure that the deleted block from each storage was picked up
        // by the NameNode.  IBRs are async, make sure the NN processes
        // all of them.
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
    }
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)

Example 38 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class DatanodeProtocolServerSideTranslatorPB method blockReceivedAndDeleted.

@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(RpcController controller, BlockReceivedAndDeletedRequestProto request) throws ServiceException {
    List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
    StorageReceivedDeletedBlocks[] info = new StorageReceivedDeletedBlocks[sBlocks.size()];
    for (int i = 0; i < sBlocks.size(); i++) {
        StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
        List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
        ReceivedDeletedBlockInfo[] rdBlocks = new ReceivedDeletedBlockInfo[list.size()];
        for (int j = 0; j < list.size(); j++) {
            rdBlocks[j] = PBHelper.convert(list.get(j));
        }
        if (sBlock.hasStorage()) {
            info[i] = new StorageReceivedDeletedBlocks(PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
        } else {
            info[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
        }
    }
    try {
        impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), info);
    } catch (IOException e) {
        throw new ServiceException(e);
    }
    return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
Also used : ReceivedDeletedBlockInfoProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) ServiceException(com.google.protobuf.ServiceException) StorageReceivedDeletedBlocksProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) IOException(java.io.IOException) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 39 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class IncrementalBlockReportManager method generateIBRs.

private synchronized StorageReceivedDeletedBlocks[] generateIBRs() {
    final List<StorageReceivedDeletedBlocks> reports = new ArrayList<>(pendingIBRs.size());
    for (Map.Entry<DatanodeStorage, PerStorageIBR> entry : pendingIBRs.entrySet()) {
        final PerStorageIBR perStorage = entry.getValue();
        // Send newly-received and deleted blockids to namenode
        final ReceivedDeletedBlockInfo[] rdbi = perStorage.removeAll();
        if (rdbi != null) {
            reports.add(new StorageReceivedDeletedBlocks(entry.getKey(), rdbi));
        }
    }
    readyToSend = false;
    return reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]);
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ArrayList(java.util.ArrayList) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) Map(java.util.Map) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 40 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class FsDatasetImpl method activateVolume.

/**
   * Activate a volume to serve requests.
   * @throws IOException if the storage UUID already exists.
   */
private void activateVolume(ReplicaMap replicaMap, Storage.StorageDirectory sd, StorageType storageType, FsVolumeReference ref) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
        if (dnStorage != null) {
            final String errorMsg = String.format("Found duplicated storage UUID: %s in %s.", sd.getStorageUuid(), sd.getVersionFile());
            LOG.error(errorMsg);
            throw new IOException(errorMsg);
        }
        volumeMap.addAll(replicaMap);
        storageMap.put(sd.getStorageUuid(), new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
        asyncDiskService.addVolume((FsVolumeImpl) ref.getVolume());
        volumes.addVolume(ref);
    }
}
Also used : AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)47 Test (org.junit.Test)27 ArrayList (java.util.ArrayList)16 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)13 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)13 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)12 Block (org.apache.hadoop.hdfs.protocol.Block)11 Path (org.apache.hadoop.fs.Path)10 Configuration (org.apache.hadoop.conf.Configuration)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)8 Map (java.util.Map)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)5 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)5