Search in sources :

Example 6 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class FsDatasetImpl method invalidate.

/**
   * We're informed that a block is no longer valid. Delete it.
   */
// FsDatasetSpi
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
    final List<String> errors = new ArrayList<String>();
    for (int i = 0; i < invalidBlks.length; i++) {
        final ReplicaInfo removing;
        final FsVolumeImpl v;
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
            if (info == null) {
                ReplicaInfo infoByBlockId = volumeMap.get(bpid, invalidBlks[i].getBlockId());
                if (infoByBlockId == null) {
                    // It is okay if the block is not found -- it
                    // may be deleted earlier.
                    LOG.info("Failed to delete replica " + invalidBlks[i] + ": ReplicaInfo not found.");
                } else {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ": GenerationStamp not matched, existing replica is " + Block.toString(infoByBlockId));
                }
                continue;
            }
            v = (FsVolumeImpl) info.getVolume();
            if (v == null) {
                errors.add("Failed to delete replica " + invalidBlks[i] + ". No volume for replica " + info);
                continue;
            }
            try {
                File blockFile = new File(info.getBlockURI());
                if (blockFile != null && blockFile.getParentFile() == null) {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ". Parent not found for block file: " + blockFile);
                    continue;
                }
            } catch (IllegalArgumentException e) {
                LOG.warn("Parent directory check failed; replica " + info + " is not backed by a local file");
            }
            removing = volumeMap.remove(bpid, invalidBlks[i]);
            addDeletingBlock(bpid, removing.getBlockId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Block file " + removing.getBlockURI() + " is to be deleted");
            }
            if (removing instanceof ReplicaInPipeline) {
                ((ReplicaInPipeline) removing).releaseAllBytesReserved();
            }
        }
        if (v.isTransientStorage()) {
            RamDiskReplica replicaInfo = ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
            if (replicaInfo != null) {
                if (!replicaInfo.getIsPersisted()) {
                    datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
                }
                ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(), replicaInfo.getBlockId(), true);
            }
        }
        // If a DFSClient has the replica in its cache of short-circuit file
        // descriptors (and the client is using ShortCircuitShm), invalidate it.
        datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
        // If the block is cached, start uncaching it.
        cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
        // finishes.
        try {
            asyncDiskService.deleteAsync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
        } catch (ClosedChannelException e) {
            LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + "block " + invalidBlks[i]);
        }
    }
    if (!errors.isEmpty()) {
        StringBuilder b = new StringBuilder("Failed to delete ").append(errors.size()).append(" (out of ").append(invalidBlks.length).append(") replica(s):");
        for (int i = 0; i < errors.size(); i++) {
            b.append("\n").append(i).append(") ").append(errors.get(i));
        }
        throw new IOException(b.toString());
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) File(java.io.File) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica)

Example 7 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class FsDatasetImpl method getStorageReports.

// FsDatasetSpi
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
    List<StorageReport> reports;
    synchronized (statsLock) {
        List<FsVolumeImpl> curVolumes = volumes.getVolumes();
        reports = new ArrayList<>(curVolumes.size());
        for (FsVolumeImpl volume : curVolumes) {
            try (FsVolumeReference ref = volume.obtainReference()) {
                StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false, volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(), volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
                reports.add(sr);
            } catch (ClosedChannelException e) {
                continue;
            }
        }
    }
    return reports.toArray(new StorageReport[reports.size()]);
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 8 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class FsVolumeImpl method setClosed.

/**
   * Close this volume.
   * @throws IOException if the volume is closed.
   */
void setClosed() throws IOException {
    try {
        this.reference.setClosed();
        dataset.stopAllDataxceiverThreads(this);
    } catch (ClosedChannelException e) {
        throw new IOException("The volume has already closed.", e);
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) IOException(java.io.IOException)

Example 9 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class DataStreamer method waitForAckedSeqno.

/**
   * wait for the ack of seqno
   *
   * @param seqno the sequence number to be acked
   * @throws IOException
   */
void waitForAckedSeqno(long seqno) throws IOException {
    try (TraceScope ignored = dfsClient.getTracer().newScope("waitForAckedSeqno")) {
        LOG.debug("{} waiting for ack for: {}", this, seqno);
        long begin = Time.monotonicNow();
        try {
            synchronized (dataQueue) {
                while (!streamerClosed) {
                    checkClosed();
                    if (lastAckedSeqno >= seqno) {
                        break;
                    }
                    try {
                        // when we receive an ack, we notify on
                        dataQueue.wait(1000);
                    // dataQueue
                    } catch (InterruptedException ie) {
                        throw new InterruptedIOException("Interrupted while waiting for data to be acknowledged by pipeline");
                    }
                }
            }
            checkClosed();
        } catch (ClosedChannelException cce) {
        }
        long duration = Time.monotonicNow() - begin;
        if (duration > dfsclientSlowLogThresholdMs) {
            LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", duration, dfsclientSlowLogThresholdMs, src, block, nodes);
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) TraceScope(org.apache.htrace.core.TraceScope)

Example 10 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class DFSStripedOutputStream method closeImpl.

@Override
protected synchronized void closeImpl() throws IOException {
    if (isClosed()) {
        final MultipleIOException.Builder b = new MultipleIOException.Builder();
        for (int i = 0; i < streamers.size(); i++) {
            final StripedDataStreamer si = getStripedDataStreamer(i);
            try {
                si.getLastException().check(true);
            } catch (IOException e) {
                b.add(e);
            }
        }
        final IOException ioe = b.build();
        if (ioe != null) {
            throw ioe;
        }
        return;
    }
    try {
        try {
            // flush from all upper layers
            flushBuffer();
            // if the last stripe is incomplete, generate and write parity cells
            if (generateParityCellsForLastStripe()) {
                writeParityCells();
            }
            enqueueAllCurrentPackets();
            // flush all the data packets
            flushAllInternals();
            // check failures
            checkStreamerFailures();
            for (int i = 0; i < numAllBlocks; i++) {
                final StripedDataStreamer s = setCurrentStreamer(i);
                if (s.isHealthy()) {
                    try {
                        if (s.getBytesCurBlock() > 0) {
                            setCurrentPacketToEmpty();
                        }
                        // flush the last "close" packet to Datanode
                        flushInternal();
                    } catch (Exception e) {
                    // TODO for both close and endBlock, we currently do not handle
                    // failures when sending the last packet. We actually do not need to
                    // bump GS for this kind of failure. Thus counting the total number
                    // of failures may be good enough.
                    }
                }
            }
        } finally {
            // Failures may happen when flushing data/parity data out. Exceptions
            // may be thrown if more than 3 streamers fail, or updatePipeline RPC
            // fails. Streamers may keep waiting for the new block/GS information.
            // Thus need to force closing these threads.
            closeThreads(true);
        }
        try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
            completeFile(currentBlockGroup);
        }
        logCorruptBlocks();
    } catch (ClosedChannelException ignored) {
    } finally {
        setClosed();
        // shutdown executor of flushAll tasks
        flushAllExecutor.shutdownNow();
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) InterruptedIOException(java.io.InterruptedIOException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) ClosedChannelException(java.nio.channels.ClosedChannelException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

ClosedChannelException (java.nio.channels.ClosedChannelException)211 ByteBuffer (java.nio.ByteBuffer)67 IOException (java.io.IOException)60 Test (org.junit.Test)23 InetSocketAddress (java.net.InetSocketAddress)19 SelectionKey (java.nio.channels.SelectionKey)18 SocketChannel (java.nio.channels.SocketChannel)15 ArrayList (java.util.ArrayList)13 NotYetConnectedException (java.nio.channels.NotYetConnectedException)11 InterruptedIOException (java.io.InterruptedIOException)10 CancelledKeyException (java.nio.channels.CancelledKeyException)10 ShutdownCommand (com.cloud.agent.api.ShutdownCommand)9 File (java.io.File)9 ServerSocketChannel (java.nio.channels.ServerSocketChannel)9 PooledByteBuffer (io.undertow.connector.PooledByteBuffer)8 FileChannel (java.nio.channels.FileChannel)8 ConnectException (java.net.ConnectException)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)6 AgentControlCommand (com.cloud.agent.api.AgentControlCommand)5 Command (com.cloud.agent.api.Command)5