Search in sources :

Example 21 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class BlockReceiver method receiveBlock.

void receiveBlock(// output to next datanode
DataOutputStream mirrOut, // input from next datanode
DataInputStream mirrIn, // output to previous datanode
DataOutputStream replyOut, String mirrAddr, DataTransferThrottler throttlerArg, DatanodeInfo[] downstreams, boolean isReplaceBlock) throws IOException {
    syncOnClose = datanode.getDnConf().syncOnClose;
    boolean responderClosed = false;
    mirrorOut = mirrOut;
    mirrorAddr = mirrAddr;
    isPenultimateNode = ((downstreams != null) && (downstreams.length == 1));
    if (isPenultimateNode) {
        mirrorNameForMetrics = (downstreams[0].getInfoSecurePort() != 0 ? downstreams[0].getInfoSecureAddr() : downstreams[0].getInfoAddr());
        LOG.debug("Will collect peer metrics for downstream node {}", mirrorNameForMetrics);
    }
    throttler = throttlerArg;
    this.replyOut = replyOut;
    this.isReplaceBlock = isReplaceBlock;
    try {
        if (isClient && !isTransfer) {
            responder = new Daemon(datanode.threadGroup, new PacketResponder(replyOut, mirrIn, downstreams));
            // start thread to processes responses
            responder.start();
        }
        while (receivePacket() >= 0) {
        /* Receive until the last packet */
        }
        // Mark that responder has been closed for future processing
        if (responder != null) {
            ((PacketResponder) responder.getRunnable()).close();
            responderClosed = true;
        }
        // For client-writes, the block is finalized in the PacketResponder.
        if (isDatanode || isTransfer) {
            // Hold a volume reference to finalize block.
            try (ReplicaHandler handler = claimReplicaHandler()) {
                // close the block/crc files
                close();
                block.setNumBytes(replicaInfo.getNumBytes());
                if (stage == BlockConstructionStage.TRANSFER_RBW) {
                    // for TRANSFER_RBW, convert temporary to RBW
                    datanode.data.convertTemporaryToRbw(block);
                } else {
                    // for isDatnode or TRANSFER_FINALIZED
                    // Finalize the block.
                    datanode.data.finalizeBlock(block);
                }
            }
            datanode.metrics.incrBlocksWritten();
        }
    } catch (IOException ioe) {
        replicaInfo.releaseAllBytesReserved();
        if (datanode.isRestarting()) {
            // Do not throw if shutting down for restart. Otherwise, it will cause
            // premature termination of responder.
            LOG.info("Shutting down for restart (" + block + ").");
        } else {
            LOG.info("Exception for " + block, ioe);
            throw ioe;
        }
    } finally {
        // Clear the previous interrupt state of this thread.
        Thread.interrupted();
        // normally.
        if (!responderClosed) {
            // Data transfer was not complete.
            if (responder != null) {
                // send a special ack upstream.
                if (datanode.isRestarting() && isClient && !isTransfer) {
                    try (Writer out = new OutputStreamWriter(replicaInfo.createRestartMetaStream(), "UTF-8")) {
                        // write out the current time.
                        out.write(Long.toString(Time.now() + restartBudget));
                        out.flush();
                    } catch (IOException ioe) {
                    // The worst case is not recovering this RBW replica. 
                    // Client will fall back to regular pipeline recovery.
                    } finally {
                        IOUtils.closeStream(streams.getDataOut());
                    }
                    try {
                        // Even if the connection is closed after the ack packet is
                        // flushed, the client can react to the connection closure 
                        // first. Insert a delay to lower the chance of client 
                        // missing the OOB ack.
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                    // It is already going down. Ignore this.
                    }
                }
                responder.interrupt();
            }
            IOUtils.closeStream(this);
            cleanupBlock();
        }
        if (responder != null) {
            try {
                responder.interrupt();
                // join() on the responder should timeout a bit earlier than the
                // configured deadline. Otherwise, the join() on this thread will
                // likely timeout as well.
                long joinTimeout = datanode.getDnConf().getXceiverStopTimeout();
                joinTimeout = joinTimeout > 1 ? joinTimeout * 8 / 10 : joinTimeout;
                responder.join(joinTimeout);
                if (responder.isAlive()) {
                    String msg = "Join on responder thread " + responder + " timed out";
                    LOG.warn(msg + "\n" + StringUtils.getStackTrace(responder));
                    throw new IOException(msg);
                }
            } catch (InterruptedException e) {
                responder.interrupt();
                // do not throw if shutting down for restart.
                if (!datanode.isRestarting()) {
                    throw new IOException("Interrupted receiveBlock");
                }
            }
            responder = null;
        }
    }
}
Also used : Daemon(org.apache.hadoop.util.Daemon) OutputStreamWriter(java.io.OutputStreamWriter) IOException(java.io.IOException) OutputStreamWriter(java.io.OutputStreamWriter) Writer(java.io.Writer)

Example 22 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class DataStorage method doFinalize.

/**
   * Finalize procedure deletes an existing snapshot.
   * <ol>
   * <li>Rename previous to finalized.tmp directory</li>
   * <li>Fully delete the finalized.tmp directory</li>
   * </ol>
   * 
   * Do nothing, if previous directory does not exist
   */
void doFinalize(StorageDirectory sd) throws IOException {
    File prevDir = sd.getPreviousDir();
    if (!prevDir.exists())
        // already discarded
        return;
    final String dataDirPath = sd.getRoot().getCanonicalPath();
    LOG.info("Finalizing upgrade for storage directory " + dataDirPath + ".\n   cur LV = " + this.getLayoutVersion() + "; cur CTime = " + this.getCTime());
    assert sd.getCurrentDir().exists() : "Current directory must exist.";
    //finalized.tmp directory
    final File tmpDir = sd.getFinalizedTmp();
    final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
    // 1. rename previous to finalized.tmp
    rename(prevDir, tmpDir);
    // 2. delete finalized.tmp dir in a separate thread
    // Also delete the blocksBeingWritten from HDFS 1.x and earlier, if
    // it exists.
    new Daemon(new Runnable() {

        @Override
        public void run() {
            try {
                deleteDir(tmpDir);
                if (bbwDir.exists()) {
                    deleteDir(bbwDir);
                }
            } catch (IOException ex) {
                LOG.error("Finalize upgrade for " + dataDirPath + " failed", ex);
            }
            LOG.info("Finalize upgrade for " + dataDirPath + " is complete");
        }

        @Override
        public String toString() {
            return "Finalize " + dataDirPath;
        }
    }).start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon) IOException(java.io.IOException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 23 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class PendingReconstructionBlocks method start.

void start() {
    timerThread = new Daemon(new PendingReconstructionMonitor());
    timerThread.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Example 24 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class LeaseManager method startMonitor.

void startMonitor() {
    Preconditions.checkState(lmthread == null, "Lease Monitor already running");
    shouldRunMonitor = true;
    lmthread = new Daemon(new Monitor());
    lmthread.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Example 25 with Daemon

use of org.apache.hadoop.util.Daemon in project SSM by Intel-bigdata.

the class CommandExecutor method start.

/**
   * Start CommandExecutor.
   */
public boolean start() throws IOException {
    // TODO add recovery code
    commandExecutorThread = new Daemon(this);
    commandExecutorThread.setName(this.getClass().getCanonicalName());
    commandExecutorThread.start();
    running = true;
    return true;
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Aggregations

Daemon (org.apache.hadoop.util.Daemon)25 IOException (java.io.IOException)11 File (java.io.File)3 Method (java.lang.reflect.Method)2 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 EventLoop (io.netty.channel.EventLoop)1 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStreamWriter (java.io.OutputStreamWriter)1 RandomAccessFile (java.io.RandomAccessFile)1 Writer (java.io.Writer)1 Field (java.lang.reflect.Field)1 SocketTimeoutException (java.net.SocketTimeoutException)1 AsynchronousCloseException (java.nio.channels.AsynchronousCloseException)1 ArrayList (java.util.ArrayList)1 ExecutionException (java.util.concurrent.ExecutionException)1 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)1 Path (org.apache.hadoop.fs.Path)1