use of org.apache.hadoop.hdfs.client.impl.DfsClientConf in project hadoop by apache.
the class NameNodeProxiesClient method createHAProxy.
/**
* Creates an explicitly HA-enabled proxy object.
*
* @param conf the configuration object
* @param nameNodeUri the URI pointing either to a specific NameNode or to a
* logical nameservice.
* @param xface the IPC interface which should be created
* @param failoverProxyProvider Failover proxy provider
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
*/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createHAProxy(Configuration conf, URI nameNodeUri, Class<T> xface, AbstractNNFailoverProxyProvider<T> failoverProxyProvider) {
Preconditions.checkNotNull(failoverProxyProvider);
// HA case
DfsClientConf config = new DfsClientConf(conf);
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, config.getMaxFailoverAttempts(), config.getMaxRetryAttempts(), config.getFailoverSleepBaseMillis(), config.getFailoverSleepMaxMillis()));
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(DFSUtilClient.getNNAddress(nameNodeUri));
}
return new ProxyAndInfo<>(proxy, dtService, DFSUtilClient.getNNAddressCheckLogical(conf, nameNodeUri));
}
use of org.apache.hadoop.hdfs.client.impl.DfsClientConf in project hadoop by apache.
the class TestDFSOutputStream method testCongestionBackoff.
@Test
public void testCongestionBackoff() throws IOException {
DfsClientConf dfsClientConf = mock(DfsClientConf.class);
DFSClient client = mock(DFSClient.class);
when(client.getConf()).thenReturn(dfsClientConf);
when(client.getTracer()).thenReturn(FsTracer.get(new Configuration()));
client.clientRunning = true;
DataStreamer stream = new DataStreamer(mock(HdfsFileStatus.class), mock(ExtendedBlock.class), client, "foo", null, null, null, null, null, null);
DataOutputStream blockStream = mock(DataOutputStream.class);
doThrow(new IOException()).when(blockStream).flush();
Whitebox.setInternalState(stream, "blockStream", blockStream);
Whitebox.setInternalState(stream, "stage", BlockConstructionStage.PIPELINE_CLOSE);
@SuppressWarnings("unchecked") LinkedList<DFSPacket> dataQueue = (LinkedList<DFSPacket>) Whitebox.getInternalState(stream, "dataQueue");
@SuppressWarnings("unchecked") ArrayList<DatanodeInfo> congestedNodes = (ArrayList<DatanodeInfo>) Whitebox.getInternalState(stream, "congestedNodes");
congestedNodes.add(mock(DatanodeInfo.class));
DFSPacket packet = mock(DFSPacket.class);
when(packet.getTraceParents()).thenReturn(new SpanId[] {});
dataQueue.add(packet);
stream.run();
Assert.assertTrue(congestedNodes.isEmpty());
}
use of org.apache.hadoop.hdfs.client.impl.DfsClientConf in project hadoop by apache.
the class DFSInputStream method readBlockLength.
/** Read the block length from one of the datanodes. */
private long readBlockLength(LocatedBlock locatedblock) throws IOException {
assert locatedblock != null : "LocatedBlock cannot be null";
int replicaNotFoundCount = locatedblock.getLocations().length;
final DfsClientConf conf = dfsClient.getConf();
final int timeout = conf.getSocketTimeout();
LinkedList<DatanodeInfo> nodeList = new LinkedList<DatanodeInfo>(Arrays.asList(locatedblock.getLocations()));
LinkedList<DatanodeInfo> retryList = new LinkedList<DatanodeInfo>();
boolean isRetry = false;
StopWatch sw = new StopWatch();
while (nodeList.size() > 0) {
DatanodeInfo datanode = nodeList.pop();
ClientDatanodeProtocol cdp = null;
try {
cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), timeout, conf.isConnectToDnViaHostname(), locatedblock);
final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
if (n >= 0) {
return n;
}
} catch (IOException ioe) {
checkInterrupted(ioe);
if (ioe instanceof RemoteException) {
if (((RemoteException) ioe).unwrapRemoteException() instanceof ReplicaNotFoundException) {
// replica is not on the DN. We will treat it as 0 length
// if no one actually has a replica.
replicaNotFoundCount--;
} else if (((RemoteException) ioe).unwrapRemoteException() instanceof RetriableException) {
// add to the list to be retried if necessary.
retryList.add(datanode);
}
}
DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" + " for block {}", datanode, locatedblock.getBlock(), ioe);
} finally {
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
// Ran out of nodes, but there are retriable nodes.
if (nodeList.size() == 0 && retryList.size() > 0) {
nodeList.addAll(retryList);
retryList.clear();
isRetry = true;
}
if (isRetry) {
// start the stop watch if not already running.
if (!sw.isRunning()) {
sw.start();
}
try {
// delay between retries.
Thread.sleep(500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException("Interrupted while getting the length.");
}
}
// see if we ran out of retry time
if (sw.isRunning() && sw.now(TimeUnit.MILLISECONDS) > timeout) {
break;
}
}
// on a DN that has it. we want to report that error
if (replicaNotFoundCount == 0) {
return 0;
}
throw new IOException("Cannot obtain block length for " + locatedblock);
}
use of org.apache.hadoop.hdfs.client.impl.DfsClientConf in project hadoop by apache.
the class DFSOutputStream method completeFile.
// should be called holding (this) lock since setTestFilename() may
// be called during unit tests
protected void completeFile(ExtendedBlock last) throws IOException {
long localstart = Time.monotonicNow();
final DfsClientConf conf = dfsClient.getConf();
long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
boolean fileComplete = false;
int retries = conf.getNumBlockWriteLocateFollowingRetry();
while (!fileComplete) {
fileComplete = dfsClient.namenode.complete(src, dfsClient.clientName, last, fileId);
if (!fileComplete) {
final int hdfsTimeout = conf.getHdfsTimeout();
if (!dfsClient.clientRunning || (hdfsTimeout > 0 && localstart + hdfsTimeout < Time.monotonicNow())) {
String msg = "Unable to close file because dfsclient " + " was unable to contact the HDFS servers. clientRunning " + dfsClient.clientRunning + " hdfsTimeout " + hdfsTimeout;
DFSClient.LOG.info(msg);
throw new IOException(msg);
}
try {
if (retries == 0) {
throw new IOException("Unable to close file because the last block" + last + " does not have enough number of replicas.");
}
retries--;
Thread.sleep(sleeptime);
sleeptime *= 2;
if (Time.monotonicNow() - localstart > 5000) {
DFSClient.LOG.info("Could not complete " + src + " retrying...");
}
} catch (InterruptedException ie) {
DFSClient.LOG.warn("Caught exception ", ie);
}
}
}
}
use of org.apache.hadoop.hdfs.client.impl.DfsClientConf in project hadoop by apache.
the class DataStreamer method createSocketForPipeline.
/**
* Create a socket for a write pipeline
*
* @param first the first datanode
* @param length the pipeline length
* @param client client
* @return the socket connected to the first datanode
*/
static Socket createSocketForPipeline(final DatanodeInfo first, final int length, final DFSClient client) throws IOException {
final DfsClientConf conf = client.getConf();
final String dnAddr = first.getXferAddr(conf.isConnectToDnViaHostname());
LOG.debug("Connecting to datanode {}", dnAddr);
final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
final Socket sock = client.socketFactory.createSocket();
final int timeout = client.getDatanodeReadTimeout(length);
NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), conf.getSocketTimeout());
sock.setTcpNoDelay(conf.getDataTransferTcpNoDelay());
sock.setSoTimeout(timeout);
sock.setKeepAlive(true);
if (conf.getSocketSendBufferSize() > 0) {
sock.setSendBufferSize(conf.getSocketSendBufferSize());
}
LOG.debug("Send buf size {}", sock.getSendBufferSize());
return sock;
}
Aggregations