use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class TestClientNoCluster method testConnectionClosedOnRegionLocate.
@Test
public void testConnectionClosedOnRegionLocate() throws IOException {
Configuration testConf = new Configuration(this.conf);
testConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
// Go against meta else we will try to find first region for the table on construction which
// means we'll have to do a bunch more mocking. Tests that go against meta only should be
// good for a bit of testing.
Connection connection = ConnectionFactory.createConnection(testConf);
Table table = connection.getTable(TableName.META_TABLE_NAME);
connection.close();
try {
Get get = new Get(Bytes.toBytes("dummyRow"));
table.get(get);
fail("Should have thrown DoNotRetryException but no exception thrown");
} catch (Exception e) {
if (!(e instanceof DoNotRetryIOException)) {
String errMsg = "Should have thrown DoNotRetryException but actually " + e.getClass().getSimpleName();
LOG.error(errMsg, e);
fail(errMsg);
}
} finally {
table.close();
}
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class ColumnAggregationEndpointWithErrors method sum.
@Override
public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, RpcCallback<ColumnAggregationWithErrorsSumResponse> done) {
// aggregate at each region
Scan scan = new Scan();
// Family is required in pb. Qualifier is not.
byte[] family = request.getFamily().toByteArray();
byte[] qualifier = request.hasQualifier() ? request.getQualifier().toByteArray() : null;
if (request.hasQualifier()) {
scan.addColumn(family, qualifier);
} else {
scan.addFamily(family);
}
int sumResult = 0;
InternalScanner scanner = null;
try {
Region region = this.env.getRegion();
// throw an exception for requests to the last region in the table, to test error handling
if (Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) {
throw new DoNotRetryIOException("An expected exception");
}
scanner = region.getScanner(scan);
List<Cell> curVals = new ArrayList<>();
boolean hasMore = false;
do {
curVals.clear();
hasMore = scanner.next(curVals);
for (Cell kv : curVals) {
if (CellUtil.matchingQualifier(kv, qualifier)) {
sumResult += Bytes.toInt(kv.getValueArray(), kv.getValueOffset());
}
}
} while (hasMore);
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
// Set result to -1 to indicate error.
sumResult = -1;
LOG.info("Setting sum result to -1 to indicate error", e);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
sumResult = -1;
LOG.info("Setting sum result to -1 to indicate error", e);
}
}
}
done.run(ColumnAggregationWithErrorsSumResponse.newBuilder().setSum(sumResult).build());
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class RSGroupInfoManagerImpl method moveTables.
@Override
public synchronized void moveTables(Set<TableName> tableNames, String groupName) throws IOException {
if (groupName != null && !rsGroupMap.containsKey(groupName)) {
throw new DoNotRetryIOException("Group " + groupName + " does not exist or is a special group");
}
Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
for (TableName tableName : tableNames) {
if (tableMap.containsKey(tableName)) {
RSGroupInfo src = new RSGroupInfo(newGroupMap.get(tableMap.get(tableName)));
src.removeTable(tableName);
newGroupMap.put(src.getName(), src);
}
if (groupName != null) {
RSGroupInfo dst = new RSGroupInfo(newGroupMap.get(groupName));
dst.addTable(tableName);
newGroupMap.put(dst.getName(), dst);
}
}
flushConfig(newGroupMap);
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class RSGroupInfoManagerImpl method multiMutate.
private void multiMutate(List<Mutation> mutations) throws IOException {
CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multiMutate doesn't support " + mutation.getClass().getName());
}
}
MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service = MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
try {
service.mutateRows(null, mmrBuilder.build());
} catch (ServiceException ex) {
ProtobufUtil.toIOException(ex);
}
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class MasterRpcServices method requestLock.
@Override
public LockResponse requestLock(RpcController controller, final LockRequest request) throws ServiceException {
try {
if (request.getDescription().isEmpty()) {
throw new IllegalArgumentException("Empty description");
}
NonceProcedureRunnable npr;
LockProcedure.LockType type = LockProcedure.LockType.valueOf(request.getLockType().name());
if (request.getRegionInfoCount() > 0) {
final HRegionInfo[] regionInfos = new HRegionInfo[request.getRegionInfoCount()];
for (int i = 0; i < request.getRegionInfoCount(); ++i) {
regionInfos[i] = HRegionInfo.convert(request.getRegionInfo(i));
}
npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
@Override
protected void run() throws IOException {
setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos, request.getDescription(), getNonceKey()));
}
@Override
protected String getDescription() {
return "RequestLock";
}
};
} else if (request.hasTableName()) {
final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
@Override
protected void run() throws IOException {
setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type, request.getDescription(), getNonceKey()));
}
@Override
protected String getDescription() {
return "RequestLock";
}
};
} else if (request.hasNamespace()) {
npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
@Override
protected void run() throws IOException {
setProcId(master.getLockManager().remoteLocks().requestNamespaceLock(request.getNamespace(), type, request.getDescription(), getNonceKey()));
}
@Override
protected String getDescription() {
return "RequestLock";
}
};
} else {
throw new IllegalArgumentException("one of table/namespace/region should be specified");
}
long procId = MasterProcedureUtil.submitProcedure(npr);
return LockResponse.newBuilder().setProcId(procId).build();
} catch (IllegalArgumentException e) {
LOG.warn("Exception when queuing lock", e);
throw new ServiceException(new DoNotRetryIOException(e));
} catch (IOException e) {
LOG.warn("Exception when queuing lock", e);
throw new ServiceException(e);
}
}
Aggregations