use of java.io.InterruptedIOException in project hbase by apache.
the class MergeTableRegionsProcedure method MoveRegionsToSameRS.
/**
* Move all regions to the same region server
* @param env MasterProcedureEnv
* @return whether target regions hosted by the same RS
* @throws IOException
*/
private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException {
// Make sure regions are on the same regionserver before send merge
// regions request to region server.
//
boolean onSameRS = isRegionsOnTheSameServer(env);
if (!onSameRS) {
// Note: the following logic assumes that we only have 2 regions to merge. In the future,
// if we want to extend to more than 2 regions, the code needs to modify a little bit.
//
RegionStates regionStates = getAssignmentManager(env).getRegionStates();
ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]);
RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]);
if (loadOfRegionA != null && loadOfRegionB != null && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) {
// switch regionsToMerge[0] and regionsToMerge[1]
HRegionInfo tmpRegion = this.regionsToMerge[0];
this.regionsToMerge[0] = this.regionsToMerge[1];
this.regionsToMerge[1] = tmpRegion;
ServerName tmpLocation = regionLocation;
regionLocation = regionLocation2;
regionLocation2 = tmpLocation;
}
long startTime = EnvironmentEdgeManager.currentTime();
RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation);
LOG.info("Moving regions to same server for merge: " + regionPlan.toString());
getAssignmentManager(env).balance(regionPlan);
do {
try {
Thread.sleep(20);
// Make sure check RIT first, then get region location, otherwise
// we would make a wrong result if region is online between getting
// region location and checking RIT
boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]);
regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
onSameRS = regionLocation.equals(regionLocation2);
if (onSameRS || !isRIT) {
// RegionInTransition any more
break;
}
} catch (InterruptedException e) {
InterruptedIOException iioe = new InterruptedIOException();
iioe.initCause(e);
throw iioe;
}
} while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
}
return onSameRS;
}
use of java.io.InterruptedIOException in project hbase by apache.
the class Compactor method performCompaction.
/**
* Performs the compaction.
* @param fd FileDetails of cell sink writer
* @param scanner Where to read from.
* @param writer Where to write to.
* @param smallestReadPoint Smallest read point.
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <=
* smallestReadPoint
* @param major Is a major compaction.
* @param numofFilesToCompact the number of files to compact
* @return Whether compaction ended; false if it was interrupted for some reason.
*/
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, boolean major, int numofFilesToCompact) throws IOException {
assert writer instanceof ShipperListener;
long bytesWrittenProgressForCloseCheck = 0;
long bytesWrittenProgressForLog = 0;
long bytesWrittenProgressForShippedCall = 0;
// Since scanner.next() can return 'false' but still be delivering data,
// we have to use a do/while loop.
List<Cell> cells = new ArrayList<>();
long closeCheckSizeLimit = HStore.getCloseCheckInterval();
long lastMillis = 0;
if (LOG.isDebugEnabled()) {
lastMillis = EnvironmentEdgeManager.currentTime();
}
String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
long now = 0;
boolean hasMore;
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
throughputController.start(compactionName);
KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
long shippedCallSizeLimit = (long) numofFilesToCompact * this.store.getFamily().getBlocksize();
try {
do {
hasMore = scanner.next(cells, scannerContext);
if (LOG.isDebugEnabled()) {
now = EnvironmentEdgeManager.currentTime();
}
// output to writer:
Cell lastCleanCell = null;
long lastCleanCellSeqId = 0;
for (Cell c : cells) {
if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) {
lastCleanCell = c;
lastCleanCellSeqId = c.getSequenceId();
CellUtil.setSequenceId(c, 0);
} else {
lastCleanCell = null;
lastCleanCellSeqId = 0;
}
writer.append(c);
int len = KeyValueUtil.length(c);
++progress.currentCompactedKVs;
progress.totalCompactedSize += len;
bytesWrittenProgressForShippedCall += len;
if (LOG.isDebugEnabled()) {
bytesWrittenProgressForLog += len;
}
throughputController.control(compactionName, len);
// check periodically to see if a system stop is requested
if (closeCheckSizeLimit > 0) {
bytesWrittenProgressForCloseCheck += len;
if (bytesWrittenProgressForCloseCheck > closeCheckSizeLimit) {
bytesWrittenProgressForCloseCheck = 0;
if (!store.areWritesEnabled()) {
progress.cancel();
return false;
}
}
}
if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
if (lastCleanCell != null) {
// HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly.
// ShipperListener will do a clone of the last cells it refer, so need to set back
// sequence id before ShipperListener.beforeShipped
CellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
}
// Clone the cells that are in the writer so that they are freed of references,
// if they are holding any.
((ShipperListener) writer).beforeShipped();
// The SHARED block references, being read for compaction, will be kept in prevBlocks
// list(See HFileScannerImpl#prevBlocks). In case of scan flow, after each set of cells
// being returned to client, we will call shipped() which can clear this list. Here by
// we are doing the similar thing. In between the compaction (after every N cells
// written with collective size of 'shippedCallSizeLimit') we will call shipped which
// may clear prevBlocks list.
kvs.shipped();
bytesWrittenProgressForShippedCall = 0;
}
}
if (lastCleanCell != null) {
// HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly
CellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
}
// logging at DEBUG level
if (LOG.isDebugEnabled()) {
if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
LOG.debug("Compaction progress: " + compactionName + " " + progress + String.format(", rate=%.2f kB/sec", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0)) + ", throughputController is " + throughputController);
lastMillis = now;
bytesWrittenProgressForLog = 0;
}
}
cells.clear();
} while (hasMore);
} catch (InterruptedException e) {
progress.cancel();
throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
} finally {
throughputController.finish(compactionName);
}
progress.complete();
return true;
}
use of java.io.InterruptedIOException in project hbase by apache.
the class RegionReplicaFlushHandler method triggerFlushInPrimaryRegion.
void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException {
long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
int maxAttempts = getRetriesCount(connection.getConfiguration());
RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create();
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " to trigger a flush");
}
while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) {
FlushRegionCallable flushCallable = new FlushRegionCallable(connection, rpcControllerFactory, RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true);
// TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we
// do not have to wait for the whole flush here, just initiate it.
FlushRegionResponse response = null;
try {
response = rpcRetryingCallerFactory.<FlushRegionResponse>newCaller().callWithRetries(flushCallable, this.operationTimeout);
} catch (IOException ex) {
if (ex instanceof TableNotFoundException || connection.isTableDisabled(region.getRegionInfo().getTable())) {
return;
}
throw ex;
}
if (response.getFlushed()) {
// a complete flush cycle or replay a region open event
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully triggered a flush of primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and blocking reads until observing a full flush cycle");
}
break;
} else {
if (response.hasWroteFlushWalMarker()) {
if (response.getWroteFlushWalMarker()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + "region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and " + "blocking reads until observing a flush marker");
}
break;
} else {
// closing or already flushing. Retry flush again after some sleep.
if (!counter.shouldRetry()) {
throw new IOException("Cannot cause primary to flush or drop a wal marker after " + "retries. Failing opening of this region replica " + region.getRegionInfo().getEncodedName());
}
}
} else {
// nothing to do. Are we dealing with an old server?
LOG.warn("Was not able to trigger a flush from primary region due to old server version? " + "Continuing to open the secondary region replica: " + region.getRegionInfo().getEncodedName());
region.setReadsEnabled(true);
break;
}
}
try {
counter.sleepUntilNextRetry();
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
}
}
}
use of java.io.InterruptedIOException in project hbase by apache.
the class HTable method existsAll.
/**
* {@inheritDoc}
*/
@Override
public boolean[] existsAll(final List<Get> gets) throws IOException {
if (gets.isEmpty())
return new boolean[] {};
if (gets.size() == 1)
return new boolean[] { exists(gets.get(0)) };
ArrayList<Get> exists = new ArrayList<>(gets.size());
for (Get g : gets) {
Get ge = new Get(g);
ge.setCheckExistenceOnly(true);
exists.add(ge);
}
Object[] r1 = new Object[exists.size()];
try {
batch(exists, r1, readRpcTimeout);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
// translate.
boolean[] results = new boolean[r1.length];
int i = 0;
for (Object o : r1) {
// batch ensures if there is a failure we get an exception instead
results[i++] = ((Result) o).getExists();
}
return results;
}
use of java.io.InterruptedIOException in project hbase by apache.
the class HBaseAdmin method execProcedure.
@Override
public void execProcedure(String signature, String instance, Map<String, String> props) throws IOException {
ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
builder.setSignature(signature).setInstance(instance);
for (Entry<String, String> entry : props.entrySet()) {
NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue()).build();
builder.addConfiguration(pair);
}
final ExecProcedureRequest request = ExecProcedureRequest.newBuilder().setProcedure(builder.build()).build();
// run the procedure on the master
ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) {
@Override
protected ExecProcedureResponse rpcCall() throws Exception {
return master.execProcedure(getRpcController(), request);
}
});
long start = EnvironmentEdgeManager.currentTime();
long max = response.getExpectedTimeout();
long maxPauseTime = max / this.numRetries;
int tries = 0;
LOG.debug("Waiting a max of " + max + " ms for procedure '" + signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
boolean done = false;
while (tries == 0 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
try {
// sleep a backoff <= pauseTime amount
long sleep = getPauseTime(tries++);
sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for procedure completion.");
Thread.sleep(sleep);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
}
LOG.debug("Getting current status of procedure from master...");
done = isProcedureFinished(signature, instance, props);
}
if (!done) {
throw new IOException("Procedure '" + signature + " : " + instance + "' wasn't completed in expectedTime:" + max + " ms");
}
}
Aggregations