use of org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException in project hbase by apache.
the class MultiThreadedUpdater method mutate.
public void mutate(Table table, Mutation m, long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) {
long start = System.currentTimeMillis();
try {
m = dataGenerator.beforeMutate(keyBase, m);
if (m instanceof Increment) {
table.increment((Increment) m);
} else if (m instanceof Append) {
table.append((Append) m);
} else if (m instanceof Put) {
table.checkAndPut(row, cf, q, v, (Put) m);
} else if (m instanceof Delete) {
table.checkAndDelete(row, cf, q, v, (Delete) m);
} else {
throw new IllegalArgumentException("unsupported mutation " + m.getClass().getSimpleName());
}
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
} catch (IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to mutate: " + keyBase + " after " + (System.currentTimeMillis() - start) + "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo);
}
}
use of org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException in project hbase by apache.
the class TestDistributedLogSplitting method testDisallowWritesInRecovering.
@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testDisallowWritesInRecovering() throws Exception {
LOG.info("testDisallowWritesInRecovering");
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
conf.setBoolean(HConstants.DISALLOW_WRITES_IN_RECOVERING, true);
startCluster(NUM_RS);
final int NUM_REGIONS_TO_CREATE = 40;
// turn off load balancing to prevent regions from moving around otherwise
// they will consume recovered.edits
master.balanceSwitch(false);
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
try {
final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
Set<HRegionInfo> regionSet = new HashSet<>();
HRegionInfo region = null;
HRegionServer hrs = null;
HRegionServer dstRS = null;
for (int i = 0; i < NUM_RS; i++) {
hrs = rsts.get(i).getRegionServer();
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
if (regions.isEmpty())
continue;
region = regions.get(0);
regionSet.add(region);
dstRS = rsts.get((i + 1) % NUM_RS).getRegionServer();
break;
}
slm.markRegionsRecovering(hrs.getServerName(), regionSet);
// move region in order for the region opened in recovering state
final HRegionInfo hri = region;
final HRegionServer tmpRS = dstRS;
TEST_UTIL.getAdmin().move(region.getEncodedNameAsBytes(), Bytes.toBytes(dstRS.getServerName().getServerName()));
// wait for region move completes
final RegionStates regionStates = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
TEST_UTIL.waitFor(45000, 200, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
ServerName sn = regionStates.getRegionServerOfRegion(hri);
return (sn != null && sn.equals(tmpRS.getServerName()));
}
});
try {
byte[] key = region.getStartKey();
if (key == null || key.length == 0) {
key = new byte[] { 0, 0, 0, 0, 1 };
}
Put put = new Put(key);
put.addColumn(Bytes.toBytes("family"), Bytes.toBytes("c1"), new byte[] { 'b' });
ht.put(put);
} catch (IOException ioe) {
Assert.assertTrue(ioe instanceof RetriesExhaustedWithDetailsException);
RetriesExhaustedWithDetailsException re = (RetriesExhaustedWithDetailsException) ioe;
boolean foundRegionInRecoveryException = false;
for (Throwable t : re.getCauses()) {
if (t instanceof RegionInRecoveryException) {
foundRegionInRecoveryException = true;
break;
}
}
Assert.assertTrue("No RegionInRecoveryException. Following exceptions returned=" + re.getCauses(), foundRegionInRecoveryException);
}
} finally {
if (ht != null)
ht.close();
if (ht != null)
zkw.close();
}
}
use of org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException in project bagheera by mozilla-metrics.
the class HBaseSinkTest method setup.
@Before
public void setup() throws IOException {
sinkConfig = new SinkConfiguration();
sinkConfig.setString("hbasesink.hbase.tablename", "test");
sinkConfig.setString("hbasesink.hbase.column.family", "data");
sinkConfig.setString("hbasesink.hbase.column.qualifier", "json");
sinkConfig.setBoolean("hbasesink.hbase.rowkey.prefixdate", false);
sinkFactory = KeyValueSinkFactory.getInstance(HBaseSink.class, sinkConfig);
hbasePool = Mockito.mock(HTablePool.class);
htable = Mockito.mock(HTable.class);
Mockito.when(hbasePool.getTable("test".getBytes())).thenReturn(htable);
Mockito.doAnswer(new Answer<Object>() {
int count = 0;
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
count++;
// Force code to retry once.
if (count <= 1) {
throw new RetriesExhaustedWithDetailsException(new ArrayList<Throwable>(), new ArrayList<Row>(), new ArrayList<String>());
}
return null;
}
}).when(htable).put(Mockito.anyListOf(Put.class));
}
use of org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException in project hbase by apache.
the class ResourceBase method processException.
protected Response processException(Throwable exp) {
Throwable curr = exp;
if (accessDeniedClazz != null) {
//some access denied exceptions are buried
while (curr != null) {
if (accessDeniedClazz.isAssignableFrom(curr.getClass())) {
throw new WebApplicationException(Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
curr = curr.getCause();
}
}
//TableNotFound may also be buried one level deep
if (exp instanceof TableNotFoundException || exp.getCause() instanceof TableNotFoundException) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
if (exp instanceof NoSuchColumnFamilyException) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
if (exp instanceof RuntimeException) {
throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
if (exp instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException retryException = (RetriesExhaustedWithDetailsException) exp;
processException(retryException.getCause(0));
}
throw new WebApplicationException(Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
use of org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException in project hbase by apache.
the class TestQuotaThrottle method doPuts.
private int doPuts(int maxOps, final Table... tables) throws Exception {
int count = 0;
try {
while (count < maxOps) {
Put put = new Put(Bytes.toBytes("row-" + count));
put.addColumn(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
for (final Table table : tables) {
table.put(put);
}
count += tables.length;
}
} catch (RetriesExhaustedWithDetailsException e) {
for (Throwable t : e.getCauses()) {
if (!(t instanceof ThrottlingException)) {
throw e;
}
}
LOG.error("put failed after nRetries=" + count, e);
}
return count;
}
Aggregations