Search in sources :

Example 6 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method createRbw.

// FsDatasetSpi
@Override
public ReplicaHandler createRbw(StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
        if (replicaInfo != null) {
            throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created.");
        }
        // create a new block
        FsVolumeReference ref = null;
        // significantly.
        if (allowLazyPersist && lazyWriter != null && b.getNumBytes() % cacheManager.getOsPageSize() == 0 && reserveLockedMemory(b.getNumBytes())) {
            try {
                // First try to place the block on a transient volume.
                ref = volumes.getNextTransientVolume(b.getNumBytes());
                datanode.getMetrics().incrRamDiskBlocksWrite();
            } catch (DiskOutOfSpaceException de) {
            // Ignore the exception since we just fall back to persistent storage.
            } finally {
                if (ref == null) {
                    cacheManager.release(b.getNumBytes());
                }
            }
        }
        if (ref == null) {
            ref = volumes.getNextVolume(storageType, b.getNumBytes());
        }
        FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
        if (allowLazyPersist && !v.isTransientStorage()) {
            datanode.getMetrics().incrRamDiskBlocksWriteFallback();
        }
        ReplicaInPipeline newReplicaInfo;
        try {
            newReplicaInfo = v.createRbw(b);
            if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
                throw new IOException("CreateRBW returned a replica of state " + newReplicaInfo.getReplicaInfo().getState() + " for block " + b.getBlockId());
            }
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
        return new ReplicaHandler(newReplicaInfo, ref);
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 7 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method recoverAppend.

// FsDatasetSpi
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    LOG.info("Recover failed append to " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
                FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
                ReplicaInPipeline replica;
                try {
                    // change the replica's state/gs etc.
                    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
                        replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
                    } else {
                        //RBW
                        replicaInfo.bumpReplicaGS(newGS);
                        replica = (ReplicaInPipeline) replicaInfo;
                    }
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                return new ReplicaHandler(replica, ref);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 8 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method getStorageReports.

// FsDatasetSpi
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
    List<StorageReport> reports;
    synchronized (statsLock) {
        List<FsVolumeImpl> curVolumes = volumes.getVolumes();
        reports = new ArrayList<>(curVolumes.size());
        for (FsVolumeImpl volume : curVolumes) {
            try (FsVolumeReference ref = volume.obtainReference()) {
                StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false, volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(), volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
                reports.add(sr);
            } catch (ClosedChannelException e) {
                continue;
            }
        }
    }
    return reports.toArray(new StorageReport[reports.size()]);
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 9 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method append.

// FsDatasetSpi
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check the validity of the parameter
        if (newGS < b.getGenerationStamp()) {
            throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp");
        }
        ReplicaInfo replicaInfo = getReplicaInfo(b);
        LOG.info("Appending to " + replicaInfo);
        if (replicaInfo.getState() != ReplicaState.FINALIZED) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
        }
        if (replicaInfo.getNumBytes() != expectedBlockLen) {
            throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen);
        }
        FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
        ReplicaInPipeline replica = null;
        try {
            replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(replica, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 10 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method addVolume.

@Override
public void addVolume(final StorageLocation location, final List<NamespaceInfo> nsInfos) throws IOException {
    // Prepare volume in DataStorage
    final DataStorage.VolumeBuilder builder;
    try {
        builder = dataStorage.prepareVolume(datanode, location, nsInfos);
    } catch (IOException e) {
        volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
        throw e;
    }
    final Storage.StorageDirectory sd = builder.getStorageDirectory();
    StorageType storageType = location.getStorageType();
    final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd, location);
    final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
    ArrayList<IOException> exceptions = Lists.newArrayList();
    for (final NamespaceInfo nsInfo : nsInfos) {
        String bpid = nsInfo.getBlockPoolID();
        try {
            fsVolume.addBlockPool(bpid, this.conf, this.timer);
            fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
        } catch (IOException e) {
            LOG.warn("Caught exception when adding " + fsVolume + ". Will throw later.", e);
            exceptions.add(e);
        }
    }
    if (!exceptions.isEmpty()) {
        try {
            sd.unlock();
        } catch (IOException e) {
            exceptions.add(e);
        }
        throw MultipleIOException.createIOException(exceptions);
    }
    final FsVolumeReference ref = fsVolume.obtainReference();
    setupAsyncLazyPersistThread(fsVolume);
    builder.build();
    activateVolume(tempVolumeMap, sd, storageType, ref);
    LOG.info("Added volume - " + location + ", StorageType: " + storageType);
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageType(org.apache.hadoop.fs.StorageType) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Aggregations

FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)20 IOException (java.io.IOException)12 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)10 MultipleIOException (org.apache.hadoop.io.MultipleIOException)9 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)7 ClosedChannelException (java.nio.channels.ClosedChannelException)6 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 ArrayList (java.util.ArrayList)4 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)4 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 File (java.io.File)2 HashSet (java.util.HashSet)2 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)2 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)2 Test (org.junit.Test)2