Search in sources :

Example 1 with BlockIterator

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator in project hadoop by apache.

the class VolumeScanner method enableBlockPoolId.

/**
   * Allow the scanner to scan the given block pool.
   *
   * @param bpid       The block pool id.
   */
public synchronized void enableBlockPoolId(String bpid) {
    for (BlockIterator iter : blockIters) {
        if (iter.getBlockPoolId().equals(bpid)) {
            LOG.warn("{}: already enabled scanning on block pool {}", this, bpid);
            return;
        }
    }
    BlockIterator iter = null;
    try {
        // Load a block iterator for the next block pool on the volume.
        iter = volume.loadBlockIterator(bpid, BLOCK_ITERATOR_NAME);
        LOG.trace("{}: loaded block iterator for {}.", this, bpid);
    } catch (FileNotFoundException e) {
        LOG.debug("{}: failed to load block iterator: " + e.getMessage(), this);
    } catch (IOException e) {
        LOG.warn("{}: failed to load block iterator.", this, e);
    }
    if (iter == null) {
        iter = volume.newBlockIterator(bpid, BLOCK_ITERATOR_NAME);
        LOG.trace("{}: created new block iterator for {}.", this, bpid);
    }
    iter.setMaxStalenessMs(conf.maxStalenessMs);
    blockIters.add(iter);
    notify();
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 2 with BlockIterator

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator in project hadoop by apache.

the class VolumeScanner method disableBlockPoolId.

/**
   * Disallow the scanner from scanning the given block pool.
   *
   * @param bpid       The block pool id.
   */
public synchronized void disableBlockPoolId(String bpid) {
    Iterator<BlockIterator> i = blockIters.iterator();
    while (i.hasNext()) {
        BlockIterator iter = i.next();
        if (iter.getBlockPoolId().equals(bpid)) {
            LOG.trace("{}: disabling scanning on block pool {}", this, bpid);
            i.remove();
            IOUtils.cleanup(null, iter);
            if (curBlockIter == iter) {
                curBlockIter = null;
            }
            notify();
            return;
        }
    }
    LOG.warn("{}: can't remove block pool {}, because it was never " + "added.", this, bpid);
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator)

Example 3 with BlockIterator

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator in project hadoop by apache.

the class VolumeScanner method run.

@Override
public void run() {
    // Record the minute on which the scanner started.
    this.startMinute = TimeUnit.MINUTES.convert(Time.monotonicNow(), TimeUnit.MILLISECONDS);
    this.curMinute = startMinute;
    try {
        LOG.trace("{}: thread starting.", this);
        resultHandler.setup(this);
        try {
            long timeout = 0;
            while (true) {
                ExtendedBlock suspectBlock = null;
                // suspect block list.
                synchronized (this) {
                    if (stopping) {
                        break;
                    }
                    if (timeout > 0) {
                        LOG.debug("{}: wait for {} milliseconds", this, timeout);
                        wait(timeout);
                        if (stopping) {
                            break;
                        }
                    }
                    suspectBlock = popNextSuspectBlock();
                }
                timeout = runLoop(suspectBlock);
            }
        } catch (InterruptedException e) {
            // We are exiting because of an InterruptedException,
            // probably sent by VolumeScanner#shutdown.
            LOG.trace("{} exiting because of InterruptedException.", this);
        } catch (Throwable e) {
            LOG.error("{} exiting because of exception ", this, e);
        }
        LOG.info("{} exiting.", this);
        // Save the current position of all block iterators and close them.
        for (BlockIterator iter : blockIters) {
            saveBlockIterator(iter);
            IOUtils.cleanup(null, iter);
        }
    } finally {
        // When the VolumeScanner exits, release the reference we were holding
        // on the volume.  This will allow the volume to be removed later.
        IOUtils.cleanup(null, ref);
    }
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 4 with BlockIterator

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator in project hadoop by apache.

the class VolumeScanner method findNextUsableBlockIter.

/**
   * Find a usable block iterator.<p/>
   *
   * We will consider available block iterators in order.  This property is
   * important so that we don't keep rescanning the same block pool id over
   * and over, while other block pools stay unscanned.<p/>
   *
   * A block pool is always ready to scan if the iterator is not at EOF.  If
   * the iterator is at EOF, the block pool will be ready to scan when
   * conf.scanPeriodMs milliseconds have elapsed since the iterator was last
   * rewound.<p/>
   *
   * @return                     0 if we found a usable block iterator; the
   *                               length of time we should delay before
   *                               checking again otherwise.
   */
private synchronized long findNextUsableBlockIter() {
    int numBlockIters = blockIters.size();
    if (numBlockIters == 0) {
        LOG.debug("{}: no block pools are registered.", this);
        return Long.MAX_VALUE;
    }
    int curIdx;
    if (curBlockIter == null) {
        curIdx = 0;
    } else {
        curIdx = blockIters.indexOf(curBlockIter);
        Preconditions.checkState(curIdx >= 0);
    }
    // Note that this has to be wall-clock time, not monotonic time.  This is
    // because the time saved in the cursor file is a wall-clock time.  We do
    // not want to save a monotonic time in the cursor file, because it resets
    // every time the machine reboots (on most platforms).
    long nowMs = Time.now();
    long minTimeoutMs = Long.MAX_VALUE;
    for (int i = 0; i < numBlockIters; i++) {
        int idx = (curIdx + i + 1) % numBlockIters;
        BlockIterator iter = blockIters.get(idx);
        if (!iter.atEnd()) {
            LOG.info("Now scanning bpid {} on volume {}", iter.getBlockPoolId(), volume);
            curBlockIter = iter;
            return 0L;
        }
        long iterStartMs = iter.getIterStartMs();
        long waitMs = (iterStartMs + conf.scanPeriodMs) - nowMs;
        if (waitMs <= 0) {
            iter.rewind();
            LOG.info("Now rescanning bpid {} on volume {}, after more than " + "{} hour(s)", iter.getBlockPoolId(), volume, TimeUnit.HOURS.convert(conf.scanPeriodMs, TimeUnit.MILLISECONDS));
            curBlockIter = iter;
            return 0L;
        }
        minTimeoutMs = Math.min(minTimeoutMs, waitMs);
    }
    LOG.info("{}: no suitable block pools found to scan.  Waiting {} ms.", this, minTimeoutMs);
    return minTimeoutMs;
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator)

Example 5 with BlockIterator

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator in project hadoop by apache.

the class TestBlockScanner method testVolumeIteratorImpl.

/**
   * Test iterating through a bunch of blocks in a volume using a volume
   * iterator.<p/>
   *
   * We will rewind the iterator when about halfway through the blocks.
   *
   * @param numFiles        The number of files to create.
   * @param maxStaleness    The maximum staleness to allow with the iterator.
   * @throws Exception
   */
private void testVolumeIteratorImpl(int numFiles, long maxStaleness) throws Exception {
    Configuration conf = new Configuration();
    disableBlockScanner(conf);
    TestContext ctx = new TestContext(conf, 1);
    ctx.createFiles(0, numFiles, 1);
    assertEquals(1, ctx.volumes.size());
    FsVolumeSpi volume = ctx.volumes.get(0);
    ExtendedBlock savedBlock = null, loadedBlock = null;
    boolean testedRewind = false, testedSave = false, testedLoad = false;
    int blocksProcessed = 0, savedBlocksProcessed = 0;
    try {
        List<BPOfferService> bpos = ctx.datanode.getAllBpOs();
        assertEquals(1, bpos.size());
        BlockIterator iter = volume.newBlockIterator(ctx.bpids[0], "test");
        assertEquals(ctx.bpids[0], iter.getBlockPoolId());
        iter.setMaxStalenessMs(maxStaleness);
        while (true) {
            HashSet<ExtendedBlock> blocks = new HashSet<ExtendedBlock>();
            for (int blockIdx = 0; blockIdx < numFiles; blockIdx++) {
                blocks.add(ctx.getFileBlock(0, blockIdx));
            }
            while (true) {
                ExtendedBlock block = iter.nextBlock();
                if (block == null) {
                    break;
                }
                blocksProcessed++;
                LOG.info("BlockIterator for {} found block {}, blocksProcessed = {}", volume, block, blocksProcessed);
                if (testedSave && (savedBlock == null)) {
                    savedBlock = block;
                }
                if (testedLoad && (loadedBlock == null)) {
                    loadedBlock = block;
                    // The block that we get back right after loading the iterator
                    // should be the same block we got back right after saving
                    // the iterator.
                    assertEquals(savedBlock, loadedBlock);
                }
                boolean blockRemoved = blocks.remove(block);
                assertTrue("Found unknown block " + block, blockRemoved);
                if (blocksProcessed > (numFiles / 3)) {
                    if (!testedSave) {
                        LOG.info("Processed {} blocks out of {}.  Saving iterator.", blocksProcessed, numFiles);
                        iter.save();
                        testedSave = true;
                        savedBlocksProcessed = blocksProcessed;
                    }
                }
                if (blocksProcessed > (numFiles / 2)) {
                    if (!testedRewind) {
                        LOG.info("Processed {} blocks out of {}.  Rewinding iterator.", blocksProcessed, numFiles);
                        iter.rewind();
                        break;
                    }
                }
                if (blocksProcessed > ((2 * numFiles) / 3)) {
                    if (!testedLoad) {
                        LOG.info("Processed {} blocks out of {}.  Loading iterator.", blocksProcessed, numFiles);
                        iter = volume.loadBlockIterator(ctx.bpids[0], "test");
                        iter.setMaxStalenessMs(maxStaleness);
                        break;
                    }
                }
            }
            if (!testedRewind) {
                testedRewind = true;
                blocksProcessed = 0;
                LOG.info("Starting again at the beginning...");
                continue;
            }
            if (!testedLoad) {
                testedLoad = true;
                blocksProcessed = savedBlocksProcessed;
                LOG.info("Starting again at the load point...");
                continue;
            }
            assertEquals(numFiles, blocksProcessed);
            break;
        }
    } finally {
        ctx.close();
    }
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator) Configuration(org.apache.hadoop.conf.Configuration) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HashSet(java.util.HashSet)

Aggregations

BlockIterator (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator)5 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1 Configuration (org.apache.hadoop.conf.Configuration)1 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)1