use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class RpcRetryingCallerImpl method translateException.
/**
* Get the good or the remote exception if any, throws the DoNotRetryIOException.
* @param t the throwable to analyze
* @return the translated exception, if it's not a DoNotRetryIOException
* @throws DoNotRetryIOException - if we find it, we throw it instead of translating.
*/
static Throwable translateException(Throwable t) throws DoNotRetryIOException {
if (t instanceof UndeclaredThrowableException) {
if (t.getCause() != null) {
t = t.getCause();
}
}
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
if (t instanceof LinkageError) {
throw new DoNotRetryIOException(t);
}
if (t instanceof ServiceException) {
ServiceException se = (ServiceException) t;
Throwable cause = se.getCause();
if (cause != null && cause instanceof DoNotRetryIOException) {
throw (DoNotRetryIOException) cause;
}
// Don't let ServiceException out; its rpc specific.
t = cause;
// t could be a RemoteException so go around again.
translateException(t);
} else if (t instanceof DoNotRetryIOException) {
throw (DoNotRetryIOException) t;
}
return t;
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class TableRecordReaderImpl method next.
/**
* @param key HStoreKey as input key.
* @param value MapWritable as input value
* @return true if there was more data
* @throws IOException
*/
public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
Result result;
try {
try {
result = this.scanner.next();
if (logScannerActivity) {
rowcount++;
if (rowcount >= logPerRowCount) {
long now = System.currentTimeMillis();
LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
timestamp = now;
rowcount = 0;
}
}
} catch (IOException e) {
// do not retry if the exception tells us not to do so
if (e instanceof DoNotRetryIOException) {
throw e;
}
// try to handle all other IOExceptions by restarting
// the scanner, if the second call fails, it will be rethrown
LOG.debug("recovered from " + StringUtils.stringifyException(e));
if (lastSuccessfulRow == null) {
LOG.warn("We are restarting the first next() invocation," + " if your mapper has restarted a few other times like this" + " then you should consider killing this job and investigate" + " why it's taking so long.");
}
if (lastSuccessfulRow == null) {
restart(startRow);
} else {
restart(lastSuccessfulRow);
// skip presumed already mapped row
this.scanner.next();
}
result = this.scanner.next();
}
if (result != null && result.size() > 0) {
key.set(result.getRow());
lastSuccessfulRow = key.get();
value.copyFrom(result);
return true;
}
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
long now = System.currentTimeMillis();
LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
LOG.info(ioe);
String lastRow = lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow);
LOG.info("lastSuccessfulRow=" + lastRow);
}
throw ioe;
}
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class RpcServer method call.
/**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
* exception name and the stack trace are returned in the protobuf response.
*/
@Override
public Pair<Message, CellScanner> call(RpcCall call, MonitoredRPCHandler status) throws IOException {
try {
MethodDescriptor md = call.getMethod();
Message param = call.getParam();
status.setRPC(md.getName(), new Object[] { param }, call.getReceiveTime());
// TODO: Review after we add in encoded data blocks.
status.setRPCPacket(param);
status.resume("Servicing call");
//get an instance of the method arg type
HBaseRpcController controller = new HBaseRpcControllerImpl(call.getCellScanner());
controller.setCallTimeout(call.getTimeout());
Message result = call.getService().callBlockingMethod(md, controller, param);
long receiveTime = call.getReceiveTime();
long startTime = call.getStartTime();
long endTime = System.currentTimeMillis();
int processingTime = (int) (endTime - startTime);
int qTime = (int) (startTime - receiveTime);
int totalTime = (int) (endTime - receiveTime);
if (LOG.isTraceEnabled()) {
LOG.trace(CurCall.get().toString() + ", response " + TextFormat.shortDebugString(result) + " queueTime: " + qTime + " processingTime: " + processingTime + " totalTime: " + totalTime);
}
// Use the raw request call size for now.
long requestSize = call.getSize();
long responseSize = result.getSerializedSize();
if (call.isClientCellBlockSupported()) {
// Include the payload size in HBaseRpcController
responseSize += call.getResponseCellSize();
}
metrics.dequeuedCall(qTime);
metrics.processedCall(processingTime);
metrics.totalCall(totalTime);
metrics.receivedRequest(requestSize);
metrics.sentResponse(responseSize);
// log any RPC responses that are slower than the configured warn
// response time or larger than configured warning size
boolean tooSlow = (processingTime > warnResponseTime && warnResponseTime > -1);
boolean tooLarge = (responseSize > warnResponseSize && warnResponseSize > -1);
if (tooSlow || tooLarge) {
// when tagging, we let TooLarge trump TooSmall to keep output simple
// note that large responses will often also be slow.
logResponse(param, md.getName(), md.getName() + "(" + param.getClass().getName() + ")", (tooLarge ? "TooLarge" : "TooSlow"), status.getClient(), startTime, processingTime, qTime, responseSize);
}
return new Pair<>(result, controller.cellScanner());
} catch (Throwable e) {
// need to pass it over the wire.
if (e instanceof ServiceException) {
if (e.getCause() == null) {
LOG.debug("Caught a ServiceException with null cause", e);
} else {
e = e.getCause();
}
}
// increment the number of requests that were exceptions.
metrics.exception(e);
if (e instanceof LinkageError)
throw new DoNotRetryIOException(e);
if (e instanceof IOException)
throw (IOException) e;
LOG.error("Unexpected throwable object ", e);
throw new IOException(e.getMessage(), e);
}
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class TestFromClientSide method testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE.
/**
* Tests the case where a coprocessor throws a regular IOException in the scan. The expectation
* is that the we will keep on retrying, but fail after the retries are exhausted instead of
* retrying indefinitely.
*/
@Test(timeout = 180000)
public void testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE() throws IOException, InterruptedException {
TEST_UTIL.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
HTableDescriptor htd = TEST_UTIL.createTableDescriptor(tableName, FAMILY);
htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
TEST_UTIL.getAdmin().createTable(htd);
ExceptionInReseekRegionObserver.reset();
// throw exceptions in every retry
ExceptionInReseekRegionObserver.throwOnce.set(false);
try (Table t = TEST_UTIL.getConnection().getTable(tableName)) {
TEST_UTIL.loadTable(t, FAMILY, false);
TEST_UTIL.getAdmin().flush(tableName);
TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY));
fail("Should have thrown an exception");
} catch (DoNotRetryIOException expected) {
assertTrue(expected instanceof ScannerResetException);
// expected
}
assertTrue(ExceptionInReseekRegionObserver.reqCount.get() >= 3);
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class TestModifyTableProcedure method testModifyTableDeleteCF.
@Test(timeout = 60000)
public void testModifyTableDeleteCF() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final String cf1 = "cf1";
final String cf2 = "cf2";
final String cf3 = "cf3";
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, cf2, cf3);
HTableDescriptor currentHtd = UTIL.getAdmin().getTableDescriptor(tableName);
assertEquals(3, currentHtd.getFamiliesKeys().size());
// Test 1: Modify the table descriptor
HTableDescriptor htd = new HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName));
htd.removeFamily(cf2.getBytes());
long procId = ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
currentHtd = UTIL.getAdmin().getTableDescriptor(tableName);
assertEquals(2, currentHtd.getFamiliesKeys().size());
assertFalse(currentHtd.hasFamily(cf2.getBytes()));
// Test 2: Modify the table descriptor offline
UTIL.getAdmin().disableTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
HTableDescriptor htd2 = new HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName));
htd2.removeFamily(cf3.getBytes());
// Disable Sanity check
htd2.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
long procId2 = ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd2));
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
currentHtd = UTIL.getAdmin().getTableDescriptor(tableName);
assertEquals(1, currentHtd.getFamiliesKeys().size());
assertFalse(currentHtd.hasFamily(cf3.getBytes()));
//Removing the last family will fail
HTableDescriptor htd3 = new HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName));
htd3.removeFamily(cf1.getBytes());
long procId3 = ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd3));
final ProcedureInfo result = procExec.getResult(procId3);
assertEquals(true, result.isFailed());
Throwable cause = ProcedureTestingUtility.getExceptionCause(result);
assertTrue("expected DoNotRetryIOException, got " + cause, cause instanceof DoNotRetryIOException);
assertEquals(1, currentHtd.getFamiliesKeys().size());
assertTrue(currentHtd.hasFamily(cf1.getBytes()));
}
Aggregations