use of java.io.InterruptedIOException in project tomcat by apache.
the class AjpProcessor method service.
@Override
public SocketState service(SocketWrapperBase<?> socket) throws IOException {
RequestInfo rp = request.getRequestProcessor();
rp.setStage(org.apache.coyote.Constants.STAGE_PARSE);
// Setting up the socket
this.socketWrapper = socket;
boolean cping = false;
boolean keptAlive = false;
while (!getErrorState().isError() && !protocol.isPaused()) {
// Parsing the request header
try {
// Get first message of the request
if (!readMessage(requestHeaderMessage, !keptAlive)) {
break;
}
// Processing the request so make sure the connection rather
// than keep-alive timeout is used
socketWrapper.setReadTimeout(protocol.getConnectionTimeout());
// Check message type, process right away and break if
// not regular request processing
int type = requestHeaderMessage.getByte();
if (type == Constants.JK_AJP13_CPING_REQUEST) {
if (protocol.isPaused()) {
recycle();
break;
}
cping = true;
try {
socketWrapper.write(true, pongMessageArray, 0, pongMessageArray.length);
socketWrapper.flush(true);
} catch (IOException e) {
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, e);
}
recycle();
continue;
} else if (type != Constants.JK_AJP13_FORWARD_REQUEST) {
// been swallowed in finish().
if (getLog().isDebugEnabled()) {
getLog().debug("Unexpected message: " + type);
}
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, null);
break;
}
keptAlive = true;
request.setStartTime(System.currentTimeMillis());
} catch (IOException e) {
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, e);
break;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
getLog().debug(sm.getString("ajpprocessor.header.error"), t);
// 400 - Bad Request
response.setStatus(400);
setErrorState(ErrorState.CLOSE_CLEAN, t);
getAdapter().log(request, response, 0);
}
if (!getErrorState().isError()) {
// Setting up filters, and parse some request headers
rp.setStage(org.apache.coyote.Constants.STAGE_PREPARE);
try {
prepareRequest();
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
getLog().debug(sm.getString("ajpprocessor.request.prepare"), t);
// 500 - Internal Server Error
response.setStatus(500);
setErrorState(ErrorState.CLOSE_CLEAN, t);
getAdapter().log(request, response, 0);
}
}
if (!getErrorState().isError() && !cping && protocol.isPaused()) {
// 503 - Service unavailable
response.setStatus(503);
setErrorState(ErrorState.CLOSE_CLEAN, null);
getAdapter().log(request, response, 0);
}
cping = false;
// Process the request in the adapter
if (!getErrorState().isError()) {
try {
rp.setStage(org.apache.coyote.Constants.STAGE_SERVICE);
getAdapter().service(request, response);
} catch (InterruptedIOException e) {
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, e);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
getLog().error(sm.getString("ajpprocessor.request.process"), t);
// 500 - Internal Server Error
response.setStatus(500);
setErrorState(ErrorState.CLOSE_CLEAN, t);
getAdapter().log(request, response, 0);
}
}
if (isAsync() && !getErrorState().isError()) {
break;
}
// Finish the response if not done yet
if (!responseFinished && getErrorState().isIoAllowed()) {
try {
action(ActionCode.COMMIT, null);
finishResponse();
} catch (IOException ioe) {
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, ioe);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
setErrorState(ErrorState.CLOSE_NOW, t);
}
}
// and error, and update the statistics counter
if (getErrorState().isError()) {
response.setStatus(500);
}
request.updateCounters();
rp.setStage(org.apache.coyote.Constants.STAGE_KEEPALIVE);
// Set keep alive timeout for next request
socketWrapper.setReadTimeout(protocol.getKeepAliveTimeout());
recycle();
}
rp.setStage(org.apache.coyote.Constants.STAGE_ENDED);
if (getErrorState().isError() || protocol.isPaused()) {
return SocketState.CLOSED;
} else {
if (isAsync()) {
return SocketState.LONG;
} else {
return SocketState.OPEN;
}
}
}
use of java.io.InterruptedIOException in project hbase by apache.
the class ConnectionImplementation method locateRegionInMeta.
/*
* Search the hbase:meta table for the HRegionLocation
* info that contains the table and row we're seeking.
*/
private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
// we already have the region.
if (useCache) {
RegionLocations locations = getCachedLocation(tableName, row);
if (locations != null && locations.getRegionLocation(replicaId) != null) {
return locations;
}
}
// build the key of the meta region we should be looking for.
// the extra 9's on the end are necessary to allow "exact" matches
// without knowing the precise region names.
byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan s = new Scan();
s.setReversed(true);
s.withStartRow(metaKey);
s.addFamily(HConstants.CATALOG_FAMILY);
s.setOneRowLimit();
if (this.useMetaReplicas) {
s.setConsistency(Consistency.TIMELINE);
}
int maxAttempts = (retry ? numTries : 1);
for (int tries = 0; true; tries++) {
if (tries >= maxAttempts) {
throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries.");
}
if (useCache) {
RegionLocations locations = getCachedLocation(tableName, row);
if (locations != null && locations.getRegionLocation(replicaId) != null) {
return locations;
}
} else {
// If we are not supposed to be using the cache, delete any existing cached location
// so it won't interfere.
metaCache.clearCache(tableName, row);
}
// Query the meta region
long pauseBase = this.pause;
try {
Result regionInfoRow = null;
s.resetMvccReadPoint();
try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0)) {
regionInfoRow = rcs.next();
}
if (regionInfoRow == null) {
throw new TableNotFoundException(tableName);
}
// convert the row result into the HRegionLocation we need!
RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
if (locations == null || locations.getRegionLocation(replicaId) == null) {
throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
}
HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
}
// possible we got a region of a different table...
if (!regionInfo.getTable().equals(tableName)) {
throw new TableNotFoundException("Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
}
if (regionInfo.isSplit()) {
throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + " the daughters should be online soon: " + regionInfo.getRegionNameAsString());
}
if (regionInfo.isOffline()) {
throw new RegionOfflineException("the region is offline, could" + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
}
ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
if (serverName == null) {
throw new NoServerForRegionException("No server address listed " + "in " + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row));
}
if (isDeadServer(serverName)) {
throw new RegionServerStoppedException("hbase:meta says the region " + regionInfo.getRegionNameAsString() + " is managed by the server " + serverName + ", but it is dead.");
}
// Instantiate the location
cacheLocation(tableName, locations);
return locations;
} catch (TableNotFoundException e) {
// from the HTable constructor.
throw e;
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
if (e instanceof RemoteException) {
e = ((RemoteException) e).unwrapRemoteException();
}
if (e instanceof CallQueueTooBigException) {
// Give a special check on CallQueueTooBigException, see #HBASE-17114
pauseBase = this.pauseForCQTBE;
}
if (tries < maxAttempts - 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts + " failed; retrying after sleep of " + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage());
}
} else {
throw e;
}
// Only relocate the parent region if necessary
if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
}
}
try {
Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
} catch (InterruptedException e) {
throw new InterruptedIOException("Giving up trying to location region in " + "meta: thread is interrupted.");
}
}
}
use of java.io.InterruptedIOException in project hbase by apache.
the class AsyncTableResultScanner method next.
@Override
public synchronized Result next() throws IOException {
while (queue.isEmpty()) {
if (closed) {
return null;
}
if (error != null) {
Throwables.propagateIfPossible(error, IOException.class);
throw new IOException(error);
}
try {
wait();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
}
Result result = queue.poll();
cacheSize -= calcEstimatedSize(result);
if (resumer != null && cacheSize <= maxCacheSize / 2) {
resumePrefetch();
}
return result;
}
use of java.io.InterruptedIOException in project hbase by apache.
the class HFileReplicator method copyHFilesToStagingDir.
private Map<String, Path> copyHFilesToStagingDir() throws IOException {
Map<String, Path> mapOfCopiedHFiles = new HashMap<>();
Pair<byte[], List<String>> familyHFilePathsPair;
List<String> hfilePaths;
byte[] family;
Path familyStagingDir;
int familyHFilePathsPairsListSize;
int totalNoOfHFiles;
List<Pair<byte[], List<String>>> familyHFilePathsPairsList;
FileSystem sourceFs = null;
try {
Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath);
/*
* Path#getFileSystem will by default get the FS from cache. If both source and sink cluster
* has same FS name service then it will return peer cluster FS. To avoid this we explicitly
* disable the loading of FS from cache, so that a new FS is created with source cluster
* configuration.
*/
String sourceScheme = sourceClusterPath.toUri().getScheme();
String disableCacheName = String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme });
sourceClusterConf.setBoolean(disableCacheName, true);
sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf);
User user = userProvider.getCurrent();
// For each table name in the map
for (Entry<String, List<Pair<byte[], List<String>>>> tableEntry : bulkLoadHFileMap.entrySet()) {
String tableName = tableEntry.getKey();
// Create staging directory for each table
Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName));
familyHFilePathsPairsList = tableEntry.getValue();
familyHFilePathsPairsListSize = familyHFilePathsPairsList.size();
// For each list of family hfile paths pair in the table
for (int i = 0; i < familyHFilePathsPairsListSize; i++) {
familyHFilePathsPair = familyHFilePathsPairsList.get(i);
family = familyHFilePathsPair.getFirst();
hfilePaths = familyHFilePathsPair.getSecond();
familyStagingDir = new Path(stagingDir, Bytes.toString(family));
totalNoOfHFiles = hfilePaths.size();
// For each list of hfile paths for the family
List<Future<Void>> futures = new ArrayList<>();
Callable<Void> c;
Future<Void> future;
int currentCopied = 0;
// Copy the hfiles parallely
while (totalNoOfHFiles > currentCopied + this.copiesPerThread) {
c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread));
future = exec.submit(c);
futures.add(future);
currentCopied += this.copiesPerThread;
}
int remaining = totalNoOfHFiles - currentCopied;
if (remaining > 0) {
c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + remaining));
future = exec.submit(c);
futures.add(future);
}
for (Future<Void> f : futures) {
try {
f.get();
} catch (InterruptedException e) {
InterruptedIOException iioe = new InterruptedIOException("Failed to copy HFiles to local file system. This will be retried again " + "by the source cluster.");
iioe.initCause(e);
throw iioe;
} catch (ExecutionException e) {
throw new IOException("Failed to copy HFiles to local file system. This will " + "be retried again by the source cluster.", e);
}
}
}
// Add the staging directory to this table. Staging directory contains all the hfiles
// belonging to this table
mapOfCopiedHFiles.put(tableName, stagingDir);
}
return mapOfCopiedHFiles;
} finally {
if (sourceFs != null) {
sourceFs.close();
}
if (exec != null) {
exec.shutdown();
}
}
}
use of java.io.InterruptedIOException in project hbase by apache.
the class WALSplitter method getReader.
/**
* Create a new {@link Reader} for reading logs to split.
*
* @param file
* @return A new Reader instance, caller should close
* @throws IOException
* @throws CorruptedLogFileException
*/
protected Reader getReader(FileStatus file, boolean skipErrors, CancelableProgressable reporter) throws IOException, CorruptedLogFileException {
Path path = file.getPath();
long length = file.getLen();
Reader in;
// HDFS-878 is committed.
if (length <= 0) {
LOG.warn("File " + path + " might be still open, length is 0");
}
try {
FSUtils.getInstance(fs, conf).recoverFileLease(fs, path, conf, reporter);
try {
in = getReader(path, reporter);
} catch (EOFException e) {
if (length <= 0) {
// TODO should we ignore an empty, not-last log file if skip.errors
// is false? Either way, the caller should decide what to do. E.g.
// ignore if this is the last log in sequence.
// TODO is this scenario still possible if the log has been
// recovered (i.e. closed)
LOG.warn("Could not open " + path + " for reading. File is empty", e);
return null;
} else {
// EOFException being ignored
return null;
}
}
} catch (IOException e) {
if (e instanceof FileNotFoundException) {
// A wal file may not exist anymore. Nothing can be recovered so move on
LOG.warn("File " + path + " doesn't exist anymore.", e);
return null;
}
if (!skipErrors || e instanceof InterruptedIOException) {
// Don't mark the file corrupted if interrupted, or not skipErrors
throw e;
}
CorruptedLogFileException t = new CorruptedLogFileException("skipErrors=true Could not open wal " + path + " ignoring");
t.initCause(e);
throw t;
}
return in;
}
Aggregations