use of java.io.InterruptedIOException in project hadoop by apache.
the class DFSInputStream method readBlockLength.
/** Read the block length from one of the datanodes. */
private long readBlockLength(LocatedBlock locatedblock) throws IOException {
assert locatedblock != null : "LocatedBlock cannot be null";
int replicaNotFoundCount = locatedblock.getLocations().length;
final DfsClientConf conf = dfsClient.getConf();
final int timeout = conf.getSocketTimeout();
LinkedList<DatanodeInfo> nodeList = new LinkedList<DatanodeInfo>(Arrays.asList(locatedblock.getLocations()));
LinkedList<DatanodeInfo> retryList = new LinkedList<DatanodeInfo>();
boolean isRetry = false;
StopWatch sw = new StopWatch();
while (nodeList.size() > 0) {
DatanodeInfo datanode = nodeList.pop();
ClientDatanodeProtocol cdp = null;
try {
cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), timeout, conf.isConnectToDnViaHostname(), locatedblock);
final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
if (n >= 0) {
return n;
}
} catch (IOException ioe) {
checkInterrupted(ioe);
if (ioe instanceof RemoteException) {
if (((RemoteException) ioe).unwrapRemoteException() instanceof ReplicaNotFoundException) {
// replica is not on the DN. We will treat it as 0 length
// if no one actually has a replica.
replicaNotFoundCount--;
} else if (((RemoteException) ioe).unwrapRemoteException() instanceof RetriableException) {
// add to the list to be retried if necessary.
retryList.add(datanode);
}
}
DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" + " for block {}", datanode, locatedblock.getBlock(), ioe);
} finally {
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
// Ran out of nodes, but there are retriable nodes.
if (nodeList.size() == 0 && retryList.size() > 0) {
nodeList.addAll(retryList);
retryList.clear();
isRetry = true;
}
if (isRetry) {
// start the stop watch if not already running.
if (!sw.isRunning()) {
sw.start();
}
try {
// delay between retries.
Thread.sleep(500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException("Interrupted while getting the length.");
}
}
// see if we ran out of retry time
if (sw.isRunning() && sw.now(TimeUnit.MILLISECONDS) > timeout) {
break;
}
}
// on a DN that has it. we want to report that error
if (replicaNotFoundCount == 0) {
return 0;
}
throw new IOException("Cannot obtain block length for " + locatedblock);
}
use of java.io.InterruptedIOException in project hadoop by apache.
the class DataStreamer method waitForAckedSeqno.
/**
* wait for the ack of seqno
*
* @param seqno the sequence number to be acked
* @throws IOException
*/
void waitForAckedSeqno(long seqno) throws IOException {
try (TraceScope ignored = dfsClient.getTracer().newScope("waitForAckedSeqno")) {
LOG.debug("{} waiting for ack for: {}", this, seqno);
long begin = Time.monotonicNow();
try {
synchronized (dataQueue) {
while (!streamerClosed) {
checkClosed();
if (lastAckedSeqno >= seqno) {
break;
}
try {
// when we receive an ack, we notify on
dataQueue.wait(1000);
// dataQueue
} catch (InterruptedException ie) {
throw new InterruptedIOException("Interrupted while waiting for data to be acknowledged by pipeline");
}
}
}
checkClosed();
} catch (ClosedChannelException cce) {
}
long duration = Time.monotonicNow() - begin;
if (duration > dfsclientSlowLogThresholdMs) {
LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", duration, dfsclientSlowLogThresholdMs, src, block, nodes);
}
}
}
use of java.io.InterruptedIOException in project hadoop by apache.
the class DFSOutputStream method createPacket.
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
final byte[] buf;
final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
try {
buf = byteArrayManager.newByteArray(bufferSize);
} catch (InterruptedException ie) {
final InterruptedIOException iioe = new InterruptedIOException("seqno=" + seqno);
iioe.initCause(ie);
throw iioe;
}
return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize(), lastPacketInBlock);
}
use of java.io.InterruptedIOException in project hadoop by apache.
the class RecoveredContainerLaunch method call.
/**
* Wait on the process specified in pid file and return its exit code
*/
@SuppressWarnings("unchecked")
@Override
public Integer call() {
int retCode = ExitCode.LOST.getExitCode();
ContainerId containerId = container.getContainerId();
String appIdStr = containerId.getApplicationAttemptId().getApplicationId().toString();
String containerIdStr = containerId.toString();
dispatcher.getEventHandler().handle(new ContainerEvent(containerId, ContainerEventType.CONTAINER_LAUNCHED));
boolean notInterrupted = true;
try {
File pidFile = locatePidFile(appIdStr, containerIdStr);
if (pidFile != null) {
String pidPathStr = pidFile.getPath();
pidFilePath = new Path(pidPathStr);
exec.activateContainer(containerId, pidFilePath);
retCode = exec.reacquireContainer(new ContainerReacquisitionContext.Builder().setContainer(container).setUser(container.getUser()).setContainerId(containerId).build());
} else {
LOG.warn("Unable to locate pid file for container " + containerIdStr);
}
} catch (InterruptedException | InterruptedIOException e) {
LOG.warn("Interrupted while waiting for exit code from " + containerId);
notInterrupted = false;
} catch (IOException e) {
LOG.error("Unable to recover container " + containerIdStr, e);
} finally {
if (notInterrupted) {
this.completed.set(true);
exec.deactivateContainer(containerId);
try {
getContext().getNMStateStore().storeContainerCompleted(containerId, retCode);
} catch (IOException e) {
LOG.error("Unable to set exit code for container " + containerId);
}
}
}
if (retCode != 0) {
LOG.warn("Recovered container exited with a non-zero exit code " + retCode);
this.dispatcher.getEventHandler().handle(new ContainerExitEvent(containerId, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, retCode, "Container exited with a non-zero exit code " + retCode));
return retCode;
}
LOG.info("Recovered container " + containerId + " succeeded");
dispatcher.getEventHandler().handle(new ContainerEvent(containerId, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS));
return 0;
}
use of java.io.InterruptedIOException in project hadoop by apache.
the class S3AFileSystem method copyFile.
/**
* Copy a single object in the bucket via a COPY operation.
* @param srcKey source object path
* @param dstKey destination object path
* @param size object size
* @throws AmazonClientException on failures inside the AWS SDK
* @throws InterruptedIOException the operation was interrupted
* @throws IOException Other IO problems
*/
private void copyFile(String srcKey, String dstKey, long size) throws IOException, InterruptedIOException, AmazonClientException {
LOG.debug("copyFile {} -> {} ", srcKey, dstKey);
try {
ObjectMetadata srcom = getObjectMetadata(srcKey);
ObjectMetadata dstom = cloneObjectMetadata(srcom);
setOptionalObjectMetadata(dstom);
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
setOptionalCopyObjectRequestParameters(copyObjectRequest);
copyObjectRequest.setCannedAccessControlList(cannedACL);
copyObjectRequest.setNewObjectMetadata(dstom);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch(progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
incrementWriteOperations();
break;
default:
break;
}
}
};
Copy copy = transfers.copy(copyObjectRequest);
copy.addProgressListener(progressListener);
try {
copy.waitForCopyResult();
incrementWriteOperations();
instrumentation.filesCopied(1, size);
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + srcKey + " to " + dstKey + ", cancelling");
}
} catch (AmazonClientException e) {
throw translateException("copyFile(" + srcKey + ", " + dstKey + ")", srcKey, e);
}
}
Aggregations