Search in sources :

Example 16 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method recoverRbwImpl.

private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check generation stamp
        long replicaGenerationStamp = rbw.getGenerationStamp();
        if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
        }
        // check replica length
        long bytesAcked = rbw.getBytesAcked();
        long numBytes = rbw.getNumBytes();
        if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd) {
            throw new ReplicaNotFoundException("Unmatched length replica " + rbw + ": BytesAcked = " + bytesAcked + " BytesRcvd = " + numBytes + " are not in the range of [" + minBytesRcvd + ", " + maxBytesRcvd + "].");
        }
        FsVolumeReference ref = rbw.getReplicaInfo().getVolume().obtainReference();
        try {
            // any corrupt data written after the acked length can go unnoticed.
            if (numBytes > bytesAcked) {
                rbw.getReplicaInfo().truncateBlock(bytesAcked);
                rbw.setNumBytes(bytesAcked);
                rbw.setLastChecksumAndDataLen(bytesAcked, null);
            }
            // bump the replica's generation stamp to newGS
            rbw.getReplicaInfo().bumpReplicaGS(newGS);
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(rbw, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 17 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method moveBlockAcrossVolumes.

/**
   * Moves a given block from one volume to another volume. This is used by disk
   * balancer.
   *
   * @param block       - ExtendedBlock
   * @param destination - Destination volume
   * @return Old replica info
   */
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi destination) throws IOException {
    ReplicaInfo replicaInfo = getReplicaInfo(block);
    if (replicaInfo.getState() != ReplicaState.FINALIZED) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
    }
    FsVolumeReference volumeRef = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        volumeRef = destination.obtainReference();
    }
    try {
        moveBlock(block, replicaInfo, volumeRef);
    } finally {
        if (volumeRef != null) {
            volumeRef.close();
        }
    }
    return replicaInfo;
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 18 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method addVolume.

private void addVolume(Collection<StorageLocation> dataLocations, Storage.StorageDirectory sd) throws IOException {
    final StorageLocation storageLocation = sd.getStorageLocation();
    // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
    // nothing needed to be rolled back to make various data structures, e.g.,
    // storageMap and asyncDiskService, consistent.
    FsVolumeImpl fsVolume = new FsVolumeImplBuilder().setDataset(this).setStorageID(sd.getStorageUuid()).setStorageDirectory(sd).setFileIoProvider(datanode.getFileIoProvider()).setConf(this.conf).build();
    FsVolumeReference ref = fsVolume.obtainReference();
    ReplicaMap tempVolumeMap = new ReplicaMap(datasetLock);
    fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
    activateVolume(tempVolumeMap, sd, storageLocation.getStorageType(), ref);
    LOG.info("Added volume - " + storageLocation + ", StorageType: " + storageLocation.getStorageType());
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 19 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class DatasetVolumeChecker method checkAllVolumes.

/**
   * Run checks against all volumes of a dataset.
   *
   * This check may be performed at service startup and subsequently at
   * regular intervals to detect and handle failed volumes.
   *
   * @param dataset - FsDatasetSpi to be checked.
   * @return set of failed volumes.
   */
public Set<FsVolumeSpi> checkAllVolumes(final FsDatasetSpi<? extends FsVolumeSpi> dataset) throws InterruptedException {
    final long gap = timer.monotonicNow() - lastAllVolumesCheck;
    if (gap < minDiskCheckGapMs) {
        numSkippedChecks.incrementAndGet();
        LOG.trace("Skipped checking all volumes, time since last check {} is less " + "than the minimum gap between checks ({} ms).", gap, minDiskCheckGapMs);
        return Collections.emptySet();
    }
    final FsDatasetSpi.FsVolumeReferences references = dataset.getFsVolumeReferences();
    if (references.size() == 0) {
        LOG.warn("checkAllVolumesAsync - no volumes can be referenced");
        return Collections.emptySet();
    }
    lastAllVolumesCheck = timer.monotonicNow();
    final Set<FsVolumeSpi> healthyVolumes = new HashSet<>();
    final Set<FsVolumeSpi> failedVolumes = new HashSet<>();
    final Set<FsVolumeSpi> allVolumes = new HashSet<>();
    final AtomicLong numVolumes = new AtomicLong(references.size());
    final CountDownLatch latch = new CountDownLatch(1);
    for (int i = 0; i < references.size(); ++i) {
        final FsVolumeReference reference = references.getReference(i);
        Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(reference.getVolume(), IGNORED_CONTEXT);
        LOG.info("Scheduled health check for volume {}", reference.getVolume());
        if (olf.isPresent()) {
            allVolumes.add(reference.getVolume());
            Futures.addCallback(olf.get(), new ResultHandler(reference, healthyVolumes, failedVolumes, numVolumes, new Callback() {

                @Override
                public void call(Set<FsVolumeSpi> ignored1, Set<FsVolumeSpi> ignored2) {
                    latch.countDown();
                }
            }));
        } else {
            IOUtils.cleanup(null, reference);
            if (numVolumes.decrementAndGet() == 0) {
                latch.countDown();
            }
        }
    }
    // the remaining volumes.
    if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
        LOG.warn("checkAllVolumes timed out after {} ms" + maxAllowedTimeForCheckMs);
    }
    numSyncDatasetChecks.incrementAndGet();
    synchronized (this) {
        // of a potentially changing set.
        return new HashSet<>(Sets.difference(allVolumes, healthyVolumes));
    }
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicLong(java.util.concurrent.atomic.AtomicLong) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) FutureCallback(com.google.common.util.concurrent.FutureCallback) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) HashSet(java.util.HashSet)

Example 20 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class TestFsVolumeList method testReleaseVolumeRefIfNoBlockScanner.

@Test(timeout = 30000)
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
    FsVolumeList volumeList = new FsVolumeList(Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
    File volDir = new File(baseDir, "volume-0");
    volDir.mkdirs();
    FsVolumeImpl volume = new FsVolumeImplBuilder().setConf(conf).setDataset(dataset).setStorageID("storage-id").setStorageDirectory(new StorageDirectory(StorageLocation.parse(volDir.getPath()))).build();
    FsVolumeReference ref = volume.obtainReference();
    volumeList.addVolume(ref);
    assertNull(ref.getVolume());
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File) Test(org.junit.Test)

Aggregations

FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)20 IOException (java.io.IOException)12 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)10 MultipleIOException (org.apache.hadoop.io.MultipleIOException)9 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)7 ClosedChannelException (java.nio.channels.ClosedChannelException)6 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 ArrayList (java.util.ArrayList)4 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)4 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 File (java.io.File)2 HashSet (java.util.HashSet)2 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)2 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)2 Test (org.junit.Test)2