Search in sources :

Example 11 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method createTemporary.

// FsDatasetSpi
@Override
public ReplicaHandler createTemporary(StorageType storageType, ExtendedBlock b) throws IOException {
    long startTimeMs = Time.monotonicNow();
    long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
    ReplicaInfo lastFoundReplicaInfo = null;
    do {
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            ReplicaInfo currentReplicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
            if (currentReplicaInfo == lastFoundReplicaInfo) {
                if (lastFoundReplicaInfo != null) {
                    invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
                }
                FsVolumeReference ref = volumes.getNextVolume(storageType, b.getNumBytes());
                FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
                ReplicaInPipeline newReplicaInfo;
                try {
                    newReplicaInfo = v.createTemporary(b);
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
                return new ReplicaHandler(newReplicaInfo, ref);
            } else {
                if (!(currentReplicaInfo.getGenerationStamp() < b.getGenerationStamp() && (currentReplicaInfo.getState() == ReplicaState.TEMPORARY || currentReplicaInfo.getState() == ReplicaState.RBW))) {
                    throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + currentReplicaInfo.getState() + " and thus cannot be created.");
                }
                lastFoundReplicaInfo = currentReplicaInfo;
            }
        }
        // Hang too long, just bail out. This is not supposed to happen.
        long writerStopMs = Time.monotonicNow() - startTimeMs;
        if (writerStopMs > writerStopTimeoutMs) {
            LOG.warn("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
            throw new IOException("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
        }
        // Stop the previous writer
        ((ReplicaInPipeline) lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs);
    } while (true);
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 12 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsDatasetImpl method getTmpInputStreams.

/**
   * Returns handles to the block file and its metadata file
   */
// FsDatasetSpi
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo info = getReplicaInfo(b);
        FsVolumeReference ref = info.getVolume().obtainReference();
        try {
            InputStream blockInStream = info.getDataInputStream(blkOffset);
            try {
                InputStream metaInStream = info.getMetadataInputStream(metaOffset);
                return new ReplicaInputStreams(blockInStream, metaInStream, ref, datanode.getFileIoProvider());
            } catch (IOException e) {
                IOUtils.cleanup(null, blockInStream);
                throw e;
            }
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
    }
}
Also used : ReplicaInputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) LengthInputStream(org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 13 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsVolumeList method addBlockPool.

void addBlockPool(final String bpid, final Configuration conf) throws IOException {
    long totalStartTime = Time.monotonicNow();
    final List<IOException> exceptions = Collections.synchronizedList(new ArrayList<IOException>());
    List<Thread> blockPoolAddingThreads = new ArrayList<Thread>();
    for (final FsVolumeImpl v : volumes) {
        Thread t = new Thread() {

            public void run() {
                try (FsVolumeReference ref = v.obtainReference()) {
                    FsDatasetImpl.LOG.info("Scanning block pool " + bpid + " on volume " + v + "...");
                    long startTime = Time.monotonicNow();
                    v.addBlockPool(bpid, conf);
                    long timeTaken = Time.monotonicNow() - startTime;
                    FsDatasetImpl.LOG.info("Time taken to scan block pool " + bpid + " on " + v + ": " + timeTaken + "ms");
                } catch (ClosedChannelException e) {
                // ignore.
                } catch (IOException ioe) {
                    FsDatasetImpl.LOG.info("Caught exception while scanning " + v + ". Will throw later.", ioe);
                    exceptions.add(ioe);
                }
            }
        };
        blockPoolAddingThreads.add(t);
        t.start();
    }
    for (Thread t : blockPoolAddingThreads) {
        try {
            t.join();
        } catch (InterruptedException ie) {
            throw new IOException(ie);
        }
    }
    if (!exceptions.isEmpty()) {
        throw exceptions.get(0);
    }
    long totalTimeTaken = Time.monotonicNow() - totalStartTime;
    FsDatasetImpl.LOG.info("Total time to scan all replicas for block pool " + bpid + ": " + totalTimeTaken + "ms");
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ArrayList(java.util.ArrayList) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) IOException(java.io.IOException)

Example 14 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsVolumeList method handleVolumeFailures.

/**
   * Calls {@link FsVolumeImpl#checkDirs()} on each volume.
   * 
   * Use {@link checkDirsLock} to allow only one instance of checkDirs() call.
   *
   * @return list of all the failed volumes.
   * @param failedVolumes
   */
void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
    try (AutoCloseableLock lock = checkDirsLock.acquire()) {
        for (FsVolumeSpi vol : failedVolumes) {
            FsVolumeImpl fsv = (FsVolumeImpl) vol;
            try (FsVolumeReference ref = fsv.obtainReference()) {
                addVolumeFailureInfo(fsv);
                removeVolume(fsv);
            } catch (ClosedChannelException e) {
                FsDatasetImpl.LOG.debug("Caught exception when obtaining " + "reference count on closed volume", e);
            } catch (IOException e) {
                FsDatasetImpl.LOG.error("Unexpected IOException", e);
            }
        }
        waitVolumeRemoved(5000, checkDirsLockCondition);
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) IOException(java.io.IOException)

Example 15 with FsVolumeReference

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.

the class FsVolumeList method getAllVolumesMap.

void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap, final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
    long totalStartTime = Time.monotonicNow();
    final List<IOException> exceptions = Collections.synchronizedList(new ArrayList<IOException>());
    List<Thread> replicaAddingThreads = new ArrayList<Thread>();
    for (final FsVolumeImpl v : volumes) {
        Thread t = new Thread() {

            public void run() {
                try (FsVolumeReference ref = v.obtainReference()) {
                    FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid + " on volume " + v + "...");
                    long startTime = Time.monotonicNow();
                    v.getVolumeMap(bpid, volumeMap, ramDiskReplicaMap);
                    long timeTaken = Time.monotonicNow() - startTime;
                    FsDatasetImpl.LOG.info("Time to add replicas to map for block pool" + " " + bpid + " on volume " + v + ": " + timeTaken + "ms");
                } catch (ClosedChannelException e) {
                    FsDatasetImpl.LOG.info("The volume " + v + " is closed while " + "adding replicas, ignored.");
                } catch (IOException ioe) {
                    FsDatasetImpl.LOG.info("Caught exception while adding replicas " + "from " + v + ". Will throw later.", ioe);
                    exceptions.add(ioe);
                }
            }
        };
        replicaAddingThreads.add(t);
        t.start();
    }
    for (Thread t : replicaAddingThreads) {
        try {
            t.join();
        } catch (InterruptedException ie) {
            throw new IOException(ie);
        }
    }
    if (!exceptions.isEmpty()) {
        throw exceptions.get(0);
    }
    long totalTimeTaken = Time.monotonicNow() - totalStartTime;
    FsDatasetImpl.LOG.info("Total time to add all replicas to map: " + totalTimeTaken + "ms");
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ArrayList(java.util.ArrayList) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) IOException(java.io.IOException)

Aggregations

FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)20 IOException (java.io.IOException)12 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)10 MultipleIOException (org.apache.hadoop.io.MultipleIOException)9 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)7 ClosedChannelException (java.nio.channels.ClosedChannelException)6 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 ArrayList (java.util.ArrayList)4 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)4 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 File (java.io.File)2 HashSet (java.util.HashSet)2 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)2 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)2 Test (org.junit.Test)2