use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class RSGroupAdminClient method moveServers.
/**
* Move given set of servers to the specified target RegionServer group.
*/
public void moveServers(Set<Address> servers, String targetGroup) throws IOException {
Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
for (Address el : servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
}
MoveServersRequest request = MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts).build();
try {
stub.moveServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class RSGroupAdminClient method removeServers.
/**
* Remove decommissioned servers from rsgroup. 1. Sometimes we may find the server aborted due to
* some hardware failure and we must offline the server for repairing. Or we need to move some
* servers to join other clusters. So we need to remove these servers from the rsgroup. 2.
* Dead/recovering/live servers will be disallowed.
* @param servers set of servers to remove
*/
public void removeServers(Set<Address> servers) throws IOException {
Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
for (Address el : servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
}
RemoveServersRequest request = RemoveServersRequest.newBuilder().addAllServers(hostPorts).build();
try {
stub.removeServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class TestEntityLocks method testHeartbeatException.
/**
* Test that abort is called when lockHeartbeat fails with IOException.
*/
@Test
public void testHeartbeatException() throws Exception {
// in ms
final long workerSleepTime = 100;
Abortable abortable = Mockito.mock(Abortable.class);
EntityLock lock = admin.namespaceLock("namespace", "description", abortable);
lock.setTestingSleepTime(workerSleepTime);
when(master.requestLock(any(), any())).thenReturn(LockResponse.newBuilder().setProcId(procId).build());
when(master.lockHeartbeat(any(), any())).thenReturn(LOCKED_RESPONSE).thenThrow(new ServiceException("Failed heartbeat!"));
lock.requestLock();
lock.await();
assertTrue(waitLockTimeOut(lock, 100 * workerSleepTime));
while (lock.getWorker().isAlive()) {
TimeUnit.MILLISECONDS.sleep(100);
}
verify(abortable, times(1)).abort(any(), isA(HBaseIOException.class));
assertFalse(lock.getWorker().isAlive());
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class TestRegionReplicas method testFlushAndCompactionsInPrimary.
@Test
public void testFlushAndCompactionsInPrimary() throws Exception {
long runtime = 30 * 1000;
// enable store file refreshing
// 100ms refresh is a lot
final int refreshPeriod = 100;
HTU.getConfiguration().setInt("hbase.hstore.compactionThreshold", 3);
HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, refreshPeriod);
// restart the region server so that it starts the refresher chore
restartRegionServer();
final int startKey = 0, endKey = 1000;
try {
openRegion(HTU, getRS(), hriSecondary);
// load some data to primary so that reader won't fail
HTU.loadNumericRows(table, f, startKey, endKey);
TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
// ensure that chore is run
Threads.sleep(2 * refreshPeriod);
final AtomicBoolean running = new AtomicBoolean(true);
@SuppressWarnings("unchecked") final AtomicReference<Exception>[] exceptions = new AtomicReference[3];
for (int i = 0; i < exceptions.length; i++) {
exceptions[i] = new AtomicReference<>();
}
Runnable writer = new Runnable() {
int key = startKey;
@Override
public void run() {
try {
while (running.get()) {
byte[] data = Bytes.toBytes(String.valueOf(key));
Put put = new Put(data);
put.addColumn(f, null, data);
table.put(put);
key++;
if (key == endKey) {
key = startKey;
}
}
} catch (Exception ex) {
LOG.warn(ex.toString(), ex);
exceptions[0].compareAndSet(null, ex);
}
}
};
Runnable flusherCompactor = new Runnable() {
Random random = new Random();
@Override
public void run() {
try {
while (running.get()) {
// flush or compact
if (random.nextBoolean()) {
TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
} else {
HTU.compact(table.getName(), random.nextBoolean());
}
}
} catch (Exception ex) {
LOG.warn(ex.toString(), ex);
exceptions[1].compareAndSet(null, ex);
}
}
};
Runnable reader = new Runnable() {
Random random = new Random();
@Override
public void run() {
try {
while (running.get()) {
// whether to do a close and open
if (random.nextInt(10) == 0) {
try {
closeRegion(HTU, getRS(), hriSecondary);
} catch (Exception ex) {
LOG.warn("Failed closing the region " + hriSecondary + " " + StringUtils.stringifyException(ex));
exceptions[2].compareAndSet(null, ex);
}
try {
openRegion(HTU, getRS(), hriSecondary);
} catch (Exception ex) {
LOG.warn("Failed opening the region " + hriSecondary + " " + StringUtils.stringifyException(ex));
exceptions[2].compareAndSet(null, ex);
}
}
int key = random.nextInt(endKey - startKey) + startKey;
assertGetRpc(hriSecondary, key, true);
}
} catch (Exception ex) {
LOG.warn("Failed getting the value in the region " + hriSecondary + " " + StringUtils.stringifyException(ex));
exceptions[2].compareAndSet(null, ex);
}
}
};
LOG.info("Starting writer and reader, secondary={}", hriSecondary.getEncodedName());
ExecutorService executor = Executors.newFixedThreadPool(3);
executor.submit(writer);
executor.submit(flusherCompactor);
executor.submit(reader);
// wait for threads
Threads.sleep(runtime);
running.set(false);
executor.shutdown();
executor.awaitTermination(30, TimeUnit.SECONDS);
for (AtomicReference<Exception> exRef : exceptions) {
Assert.assertNull(exRef.get());
}
} finally {
HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, startKey, endKey);
try {
closeRegion(HTU, getRS(), hriSecondary);
} catch (ServiceException e) {
LOG.info("Closing wrong region {}", hriSecondary, e);
}
}
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class TestConnection method testClosedConnection.
@Test(expected = DoNotRetryIOException.class)
public void testClosedConnection() throws ServiceException, Throwable {
byte[] family = Bytes.toBytes("cf");
TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName).setCoprocessor(MultiRowMutationEndpoint.class.getName()).setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
TEST_UTIL.getAdmin().createTable(builder.build());
Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
// cache the location
try (Table table = conn.getTable(tableName)) {
table.get(new Get(Bytes.toBytes(0)));
} finally {
conn.close();
}
Batch.Call<MultiRowMutationService, MutateRowsResponse> callable = service -> {
throw new RuntimeException("Should not arrive here");
};
conn.getTable(tableName).coprocessorService(MultiRowMutationService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, callable);
}
Aggregations