Search in sources :

Example 6 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class DataNode method transferBlock.

@VisibleForTesting
void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets, StorageType[] xferTargetStorageTypes) throws IOException {
    BPOfferService bpos = getBPOSForBlock(block);
    DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
    boolean replicaNotExist = false;
    boolean replicaStateNotFinalized = false;
    boolean blockFileNotExist = false;
    boolean lengthTooShort = false;
    try {
        data.checkBlock(block, block.getNumBytes(), ReplicaState.FINALIZED);
    } catch (ReplicaNotFoundException e) {
        replicaNotExist = true;
    } catch (UnexpectedReplicaStateException e) {
        replicaStateNotFinalized = true;
    } catch (FileNotFoundException e) {
        blockFileNotExist = true;
    } catch (EOFException e) {
        lengthTooShort = true;
    } catch (IOException e) {
        // The IOException indicates not being able to access block file,
        // treat it the same here as blockFileNotExist, to trigger 
        // reporting it as a bad block
        blockFileNotExist = true;
    }
    if (replicaNotExist || replicaStateNotFinalized) {
        String errStr = "Can't send invalid block " + block;
        LOG.info(errStr);
        bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errStr);
        return;
    }
    if (blockFileNotExist) {
        // Report back to NN bad block caused by non-existent block file.
        reportBadBlock(bpos, block, "Can't replicate block " + block + " because the block file doesn't exist, or is not accessible");
        return;
    }
    if (lengthTooShort) {
        // Check if NN recorded length matches on-disk length 
        // Shorter on-disk len indicates corruption so report NN the corrupt block
        reportBadBlock(bpos, block, "Can't replicate block " + block + " because on-disk length " + data.getLength(block) + " is shorter than NameNode recorded length " + block.getNumBytes());
        return;
    }
    int numTargets = xferTargets.length;
    if (numTargets > 0) {
        StringBuilder xfersBuilder = new StringBuilder();
        for (int i = 0; i < numTargets; i++) {
            xfersBuilder.append(xferTargets[i]);
            xfersBuilder.append(" ");
        }
        LOG.info(bpReg + " Starting thread to transfer " + block + " to " + xfersBuilder);
        new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes, block, BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start();
    }
}
Also used : FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Daemon(org.apache.hadoop.util.Daemon) EOFException(java.io.EOFException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 7 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class BlockManager method processMisReplicatedBlocks.

/**
   * For each block in the name-node verify whether it belongs to any file,
   * extra or low redundancy. Place it into the respective queue.
   */
public void processMisReplicatedBlocks() {
    assert namesystem.hasWriteLock();
    stopReconstructionInitializer();
    neededReconstruction.clear();
    reconstructionQueuesInitializer = new Daemon() {

        @Override
        public void run() {
            try {
                processMisReplicatesAsync();
            } catch (InterruptedException ie) {
                LOG.info("Interrupted while processing reconstruction queues.");
            } catch (Exception e) {
                LOG.error("Error while processing reconstruction queues async", e);
            }
        }
    };
    reconstructionQueuesInitializer.setName("Reconstruction Queue Initializer");
    reconstructionQueuesInitializer.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) UnregisteredNodeException(org.apache.hadoop.hdfs.protocol.UnregisteredNodeException)

Example 8 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class BlockPoolSliceStorage method clearTrash.

/**
   * Delete all files and directories in the trash directories.
   */
public void clearTrash() {
    final List<File> trashRoots = new ArrayList<>();
    for (StorageDirectory sd : getStorageDirs()) {
        File trashRoot = getTrashRootDir(sd);
        if (trashRoot.exists() && sd.getPreviousDir().exists()) {
            LOG.error("Trash and PreviousDir shouldn't both exist for storage " + "directory " + sd);
            assert false;
        } else {
            trashRoots.add(trashRoot);
        }
    }
    stopTrashCleaner();
    trashCleaner = new Daemon(new Runnable() {

        @Override
        public void run() {
            for (File trashRoot : trashRoots) {
                FileUtil.fullyDelete(trashRoot);
                LOG.info("Cleared trash for storage directory " + trashRoot);
            }
        }

        @Override
        public String toString() {
            return "clearTrash() for " + blockpoolID;
        }
    });
    trashCleaner.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon) ArrayList(java.util.ArrayList) File(java.io.File)

Example 9 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class BlockRecoveryWorker method recoverBlocks.

public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) {
    Daemon d = new Daemon(datanode.threadGroup, new Runnable() {

        @Override
        public void run() {
            for (RecoveringBlock b : blocks) {
                try {
                    logRecoverBlock(who, b);
                    if (b.isStriped()) {
                        new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
                    } else {
                        new RecoveryTaskContiguous(b).recover();
                    }
                } catch (IOException e) {
                    LOG.warn("recoverBlocks FAILED: " + b, e);
                }
            }
        }
    });
    d.start();
    return d;
}
Also used : Daemon(org.apache.hadoop.util.Daemon) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) IOException(java.io.IOException)

Example 10 with Daemon

use of org.apache.hadoop.util.Daemon in project hadoop by apache.

the class OpenFileCtx method waitForDump.

// Check if need to dump the new writes
private void waitForDump() {
    if (!enabledDump) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Do nothing, dump is disabled.");
        }
        return;
    }
    if (nonSequentialWriteInMemory.get() < DUMP_WRITE_WATER_MARK) {
        return;
    }
    // wake up the dumper thread to dump the data
    synchronized (this) {
        if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Asking dumper to dump...");
            }
            if (dumpThread == null) {
                dumpThread = new Daemon(new Dumper());
                dumpThread.start();
            } else {
                this.notifyAll();
            }
        }
        while (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
            try {
                this.wait();
            } catch (InterruptedException ignored) {
            }
        }
    }
}
Also used : Daemon(org.apache.hadoop.util.Daemon)

Aggregations

Daemon (org.apache.hadoop.util.Daemon)27 IOException (java.io.IOException)12 File (java.io.File)3 Method (java.lang.reflect.Method)3 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 EventLoop (io.netty.channel.EventLoop)1 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStreamWriter (java.io.OutputStreamWriter)1 RandomAccessFile (java.io.RandomAccessFile)1 Writer (java.io.Writer)1 Field (java.lang.reflect.Field)1 SocketTimeoutException (java.net.SocketTimeoutException)1 AsynchronousCloseException (java.nio.channels.AsynchronousCloseException)1 ArrayList (java.util.ArrayList)1 ExecutionException (java.util.concurrent.ExecutionException)1 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)1 Path (org.apache.hadoop.fs.Path)1