Search in sources :

Example 1 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class VolumeScanner method scanBlock.

/**
   * Scan a block.
   *
   * @param cblock               The block to scan.
   * @param bytesPerSec          The bytes per second to scan at.
   *
   * @return                     The length of the block that was scanned, or
   *                               -1 if the block could not be scanned.
   */
private long scanBlock(ExtendedBlock cblock, long bytesPerSec) {
    // 'cblock' has a valid blockId and block pool id, but we don't yet know the
    // genstamp the block is supposed to have.  Ask the FsDatasetImpl for this
    // information.
    ExtendedBlock block = null;
    try {
        Block b = volume.getDataset().getStoredBlock(cblock.getBlockPoolId(), cblock.getBlockId());
        if (b == null) {
            LOG.info("Replica {} was not found in the VolumeMap for volume {}", cblock, volume);
        } else {
            block = new ExtendedBlock(cblock.getBlockPoolId(), b);
        }
    } catch (FileNotFoundException e) {
        LOG.info("FileNotFoundException while finding block {} on volume {}", cblock, volume);
    } catch (IOException e) {
        LOG.warn("I/O error while finding block {} on volume {}", cblock, volume);
    }
    if (block == null) {
        // block not found.
        return -1;
    }
    LOG.debug("start scanning block {}", block);
    BlockSender blockSender = null;
    try {
        blockSender = new BlockSender(block, 0, -1, false, true, true, datanode, null, CachingStrategy.newDropBehind());
        throttler.setBandwidth(bytesPerSec);
        long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
        resultHandler.handle(block, null);
        return bytesRead;
    } catch (IOException e) {
        resultHandler.handle(block, e);
    } finally {
        IOUtils.cleanup(null, blockSender);
    }
    return -1;
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FileNotFoundException(java.io.FileNotFoundException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Example 2 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class BlockPoolSlice method addToReplicasMap.

/**
   * Add replicas under the given directory to the volume map
   * @param volumeMap the replicas map
   * @param dir an input directory
   * @param lazyWriteReplicaMap Map of replicas on transient
   *                                storage.
   * @param isFinalized true if the directory has finalized replicas;
   *                    false if the directory has rbw replicas
   */
void addToReplicasMap(ReplicaMap volumeMap, File dir, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized) throws IOException {
    File[] files = fileIoProvider.listFiles(volume, dir);
    for (File file : files) {
        if (file.isDirectory()) {
            addToReplicasMap(volumeMap, file, lazyWriteReplicaMap, isFinalized);
        }
        if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) {
            file = recoverTempUnlinkedBlock(file);
            if (file == null) {
                // in another iteration and can continue here
                continue;
            }
        }
        if (!Block.isBlockFilename(file)) {
            continue;
        }
        long genStamp = FsDatasetUtil.getGenerationStampFromFile(files, file);
        long blockId = Block.filename2id(file.getName());
        Block block = new Block(blockId, file.length(), genStamp);
        addReplicaToReplicasMap(block, volumeMap, lazyWriteReplicaMap, isFinalized);
    }
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 3 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestDFSClientRetries method testClientDNProtocolTimeout.

/** Test that timeout occurs when DN does not respond to RPC.
   * Start up a server and ask it to sleep for n seconds. Make an
   * RPC to the server and set rpcTimeout to less than n and ensure
   * that socketTimeoutException is obtained
   */
@Test
public void testClientDNProtocolTimeout() throws IOException {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
        proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
        fail("Did not get expected exception: SocketTimeoutException");
    } catch (SocketTimeoutException e) {
        LOG.info("Got the expected Exception: SocketTimeoutException");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) SocketTimeoutException(java.net.SocketTimeoutException) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Example 4 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class DataNodeCluster method main.

public static void main(String[] args) throws InterruptedException {
    int numDataNodes = 0;
    int numRacks = 0;
    boolean inject = false;
    long startingBlockId = 1;
    int numBlocksPerDNtoInject = 0;
    int replication = 1;
    boolean checkDataNodeAddrConfig = false;
    long simulatedCapacityPerDn = SimulatedFSDataset.DEFAULT_CAPACITY;
    String bpid = null;
    Configuration conf = new HdfsConfiguration();
    for (int i = 0; i < args.length; i++) {
        // parse command line
        if (args[i].equals("-n")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("missing number of nodes");
            }
            numDataNodes = Integer.parseInt(args[i]);
        } else if (args[i].equals("-racks")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing number of racks");
            }
            numRacks = Integer.parseInt(args[i]);
        } else if (args[i].equals("-r")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing replication factor");
            }
            replication = Integer.parseInt(args[i]);
        } else if (args[i].equals("-d")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing datanode dirs parameter");
            }
            dataNodeDirs = args[i];
        } else if (args[i].equals("-simulated")) {
            SimulatedFSDataset.setFactory(conf);
            if ((i + 1) < args.length && !args[i + 1].startsWith("-")) {
                simulatedCapacityPerDn = Long.parseLong(args[++i]);
            }
        } else if (args[i].equals("-bpid")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing blockpoolid parameter");
            }
            bpid = args[i];
        } else if (args[i].equals("-inject")) {
            if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
                System.out.print("-inject is valid only for simulated");
                printUsageExit();
            }
            inject = true;
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing starting block and number of blocks per DN to inject");
            }
            startingBlockId = Integer.parseInt(args[i]);
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing number of blocks to inject");
            }
            numBlocksPerDNtoInject = Integer.parseInt(args[i]);
        } else if (args[i].equals("-checkDataNodeAddrConfig")) {
            checkDataNodeAddrConfig = true;
        } else {
            printUsageExit();
        }
    }
    if (numDataNodes <= 0 || replication <= 0) {
        printUsageExit("numDataNodes and replication have to be greater than zero");
    }
    if (replication > numDataNodes) {
        printUsageExit("Replication must be less than or equal to numDataNodes");
    }
    if (bpid == null) {
        printUsageExit("BlockPoolId must be provided");
    }
    String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
    if (nameNodeAdr == null) {
        System.out.println("No name node address and port in config");
        System.exit(-1);
    }
    boolean simulated = FsDatasetSpi.Factory.getFactory(conf).isSimulated();
    System.out.println("Starting " + numDataNodes + (simulated ? " Simulated " : " ") + " Data Nodes that will connect to Name Node at " + nameNodeAdr);
    System.setProperty("test.build.data", dataNodeDirs);
    long[] simulatedCapacities = new long[numDataNodes];
    for (int i = 0; i < numDataNodes; ++i) {
        simulatedCapacities[i] = simulatedCapacityPerDn;
    }
    MiniDFSCluster mc = new MiniDFSCluster();
    try {
        mc.formatDataNodeDirs();
    } catch (IOException e) {
        System.out.println("Error formating data node dirs:" + e);
    }
    String[] rack4DataNode = null;
    if (numRacks > 0) {
        System.out.println("Using " + numRacks + " racks: ");
        String rackPrefix = getUniqueRackPrefix();
        rack4DataNode = new String[numDataNodes];
        for (int i = 0; i < numDataNodes; ++i) {
            //rack4DataNode[i] = racks[i%numRacks];
            rack4DataNode[i] = rackPrefix + "-" + i % numRacks;
            System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
        }
    }
    try {
        mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR, rack4DataNode, null, simulatedCapacities, false, checkDataNodeAddrConfig);
        // Give the DN some time to connect to NN and init storage directories.
        Thread.sleep(10 * 1000);
        if (inject) {
            long blockSize = 10;
            System.out.println("Injecting " + numBlocksPerDNtoInject + " blocks in each DN starting at blockId " + startingBlockId + " with blocksize of " + blockSize);
            Block[] blocks = new Block[numBlocksPerDNtoInject];
            long blkid = startingBlockId;
            for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
                for (int i = 0; i < blocks.length; ++i) {
                    blocks[i] = new Block(blkid++, blockSize, CreateEditsLog.BLOCK_GENERATION_STAMP);
                }
                for (int i = 1; i <= replication; ++i) {
                    // inject blocks for dn_i into dn_i and replica in dn_i's neighbors 
                    mc.injectBlocks((i_dn + i - 1) % numDataNodes, Arrays.asList(blocks), bpid);
                    System.out.println("Injecting blocks of dn " + i_dn + " into dn" + ((i_dn + i - 1) % numDataNodes));
                }
            }
            System.out.println("Created blocks from Bids " + startingBlockId + " to " + (blkid - 1));
        }
    } catch (IOException e) {
        System.out.println("Error creating data node:" + e);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Example 5 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestTriggerBlockReport method testTriggerBlockReport.

private void testTriggerBlockReport(boolean incremental) throws Exception {
    Configuration conf = new HdfsConfiguration();
    // Set a really long value for dfs.blockreport.intervalMsec and
    // dfs.heartbeat.interval, so that incremental block reports and heartbeats
    // won't be sent during this test unless they're triggered
    // manually.
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
    DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);
    // We should get 1 incremental block report.
    Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    // since the interval we configured is so long.
    for (int i = 0; i < 3; i++) {
        Thread.sleep(10);
        Mockito.verify(spy, times(0)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
        Mockito.verify(spy, times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    }
    // Create a fake block deletion notification on the DataNode.
    // This will be sent with the next incremental block report.
    ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512, 1000), BlockStatus.DELETED_BLOCK, null);
    DataNode datanode = cluster.getDataNodes().get(0);
    BPServiceActor actor = datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
    final FsDatasetSpi<?> dataset = datanode.getFSDataset();
    final DatanodeStorage storage;
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        storage = dataset.getStorage(volumes.get(0).getStorageID());
    }
    actor.getIbrManager().addRDBI(rdbi, storage);
    // Manually trigger a block report.
    datanode.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
    // actually sent.  Wait for it to be sent here.
    if (incremental) {
        Mockito.verify(spy, timeout(60000).times(2)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
    } else {
        Mockito.verify(spy, timeout(60000)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
    }
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) FileSystem(org.apache.hadoop.fs.FileSystem) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10