Search in sources :

Example 1 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSInputStream method readBlockLength.

/** Read the block length from one of the datanodes. */
private long readBlockLength(LocatedBlock locatedblock) throws IOException {
    assert locatedblock != null : "LocatedBlock cannot be null";
    int replicaNotFoundCount = locatedblock.getLocations().length;
    final DfsClientConf conf = dfsClient.getConf();
    final int timeout = conf.getSocketTimeout();
    LinkedList<DatanodeInfo> nodeList = new LinkedList<DatanodeInfo>(Arrays.asList(locatedblock.getLocations()));
    LinkedList<DatanodeInfo> retryList = new LinkedList<DatanodeInfo>();
    boolean isRetry = false;
    StopWatch sw = new StopWatch();
    while (nodeList.size() > 0) {
        DatanodeInfo datanode = nodeList.pop();
        ClientDatanodeProtocol cdp = null;
        try {
            cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), timeout, conf.isConnectToDnViaHostname(), locatedblock);
            final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
            if (n >= 0) {
                return n;
            }
        } catch (IOException ioe) {
            checkInterrupted(ioe);
            if (ioe instanceof RemoteException) {
                if (((RemoteException) ioe).unwrapRemoteException() instanceof ReplicaNotFoundException) {
                    // replica is not on the DN. We will treat it as 0 length
                    // if no one actually has a replica.
                    replicaNotFoundCount--;
                } else if (((RemoteException) ioe).unwrapRemoteException() instanceof RetriableException) {
                    // add to the list to be retried if necessary.
                    retryList.add(datanode);
                }
            }
            DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" + " for block {}", datanode, locatedblock.getBlock(), ioe);
        } finally {
            if (cdp != null) {
                RPC.stopProxy(cdp);
            }
        }
        // Ran out of nodes, but there are retriable nodes.
        if (nodeList.size() == 0 && retryList.size() > 0) {
            nodeList.addAll(retryList);
            retryList.clear();
            isRetry = true;
        }
        if (isRetry) {
            // start the stop watch if not already running.
            if (!sw.isRunning()) {
                sw.start();
            }
            try {
                // delay between retries.
                Thread.sleep(500);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new InterruptedIOException("Interrupted while getting the length.");
            }
        }
        // see if we ran out of retry time
        if (sw.isRunning() && sw.now(TimeUnit.MILLISECONDS) > timeout) {
            break;
        }
    }
    // on a DN that has it.  we want to report that error
    if (replicaNotFoundCount == 0) {
        return 0;
    }
    throw new IOException("Cannot obtain block length for " + locatedblock);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) LinkedList(java.util.LinkedList) StopWatch(org.apache.hadoop.util.StopWatch) DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) RemoteException(org.apache.hadoop.ipc.RemoteException) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 2 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method shutdownDatanode.

private int shutdownDatanode(String[] argv, int i) throws IOException {
    final String dn = argv[i];
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
    boolean upgrade = false;
    if (argv.length - 1 == i + 1) {
        if ("upgrade".equalsIgnoreCase(argv[i + 1])) {
            upgrade = true;
        } else {
            printUsage("-shutdownDatanode");
            return -1;
        }
    }
    dnProxy.shutdownDatanode(upgrade);
    System.out.println("Submitted a shutdown request to datanode " + dn);
    return 0;
}
Also used : ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 3 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method deleteBlockPool.

private int deleteBlockPool(String[] argv, int i) throws IOException {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
    boolean force = false;
    if (argv.length - 1 == i + 2) {
        if ("force".equals(argv[i + 2])) {
            force = true;
        } else {
            printUsage("-deleteBlockPool");
            return -1;
        }
    }
    dnProxy.deleteBlockPool(argv[i + 1], force);
    return 0;
}
Also used : ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 4 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method startReconfigurationDispatch.

int startReconfigurationDispatch(final String nodeType, final String address, final PrintStream out, final PrintStream err) throws IOException {
    if ("namenode".equals(nodeType)) {
        ReconfigurationProtocol reconfProxy = getNameNodeProxy(address);
        reconfProxy.startReconfiguration();
        return 0;
    } else if ("datanode".equals(nodeType)) {
        ClientDatanodeProtocol reconfProxy = getDataNodeProxy(address);
        reconfProxy.startReconfiguration();
        return 0;
    } else {
        System.err.println("Node type " + nodeType + " does not support reconfiguration.");
        return 1;
    }
}
Also used : ReconfigurationProtocol(org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 5 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method evictWriters.

private int evictWriters(String[] argv, int i) throws IOException {
    final String dn = argv[i];
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
    try {
        dnProxy.evictWriters();
        System.out.println("Requested writer eviction to datanode " + dn);
    } catch (IOException ioe) {
        throw new IOException("Datanode unreachable. " + ioe, ioe);
    }
    return 0;
}
Also used : IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Aggregations

ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)22 IOException (java.io.IOException)7 InetSocketAddress (java.net.InetSocketAddress)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 Configuration (org.apache.hadoop.conf.Configuration)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Server (org.apache.hadoop.ipc.Server)3 Test (org.junit.Test)3 LinkedList (java.util.LinkedList)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2