Search in sources :

Example 61 with ServiceException

use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.

the class RSGroupAdminClient method moveServers.

/**
 * Move given set of servers to the specified target RegionServer group.
 */
public void moveServers(Set<Address> servers, String targetGroup) throws IOException {
    Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
    for (Address el : servers) {
        hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
    }
    MoveServersRequest request = MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts).build();
    try {
        stub.moveServers(null, request);
    } catch (ServiceException e) {
        throw ProtobufUtil.handleRemoteException(e);
    }
}
Also used : Address(org.apache.hadoop.hbase.net.Address) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) MoveServersRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest)

Example 62 with ServiceException

use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.

the class RSGroupAdminClient method removeServers.

/**
 * Remove decommissioned servers from rsgroup. 1. Sometimes we may find the server aborted due to
 * some hardware failure and we must offline the server for repairing. Or we need to move some
 * servers to join other clusters. So we need to remove these servers from the rsgroup. 2.
 * Dead/recovering/live servers will be disallowed.
 * @param servers set of servers to remove
 */
public void removeServers(Set<Address> servers) throws IOException {
    Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
    for (Address el : servers) {
        hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
    }
    RemoveServersRequest request = RemoveServersRequest.newBuilder().addAllServers(hostPorts).build();
    try {
        stub.removeServers(null, request);
    } catch (ServiceException e) {
        throw ProtobufUtil.handleRemoteException(e);
    }
}
Also used : RemoveServersRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest) Address(org.apache.hadoop.hbase.net.Address) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException)

Example 63 with ServiceException

use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.

the class TestEntityLocks method testHeartbeatException.

/**
 * Test that abort is called when lockHeartbeat fails with IOException.
 */
@Test
public void testHeartbeatException() throws Exception {
    // in ms
    final long workerSleepTime = 100;
    Abortable abortable = Mockito.mock(Abortable.class);
    EntityLock lock = admin.namespaceLock("namespace", "description", abortable);
    lock.setTestingSleepTime(workerSleepTime);
    when(master.requestLock(any(), any())).thenReturn(LockResponse.newBuilder().setProcId(procId).build());
    when(master.lockHeartbeat(any(), any())).thenReturn(LOCKED_RESPONSE).thenThrow(new ServiceException("Failed heartbeat!"));
    lock.requestLock();
    lock.await();
    assertTrue(waitLockTimeOut(lock, 100 * workerSleepTime));
    while (lock.getWorker().isAlive()) {
        TimeUnit.MILLISECONDS.sleep(100);
    }
    verify(abortable, times(1)).abort(any(), isA(HBaseIOException.class));
    assertFalse(lock.getWorker().isAlive());
}
Also used : ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) Abortable(org.apache.hadoop.hbase.Abortable) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) Test(org.junit.Test)

Example 64 with ServiceException

use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.

the class TestRegionReplicas method testFlushAndCompactionsInPrimary.

@Test
public void testFlushAndCompactionsInPrimary() throws Exception {
    long runtime = 30 * 1000;
    // enable store file refreshing
    // 100ms refresh is a lot
    final int refreshPeriod = 100;
    HTU.getConfiguration().setInt("hbase.hstore.compactionThreshold", 3);
    HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, refreshPeriod);
    // restart the region server so that it starts the refresher chore
    restartRegionServer();
    final int startKey = 0, endKey = 1000;
    try {
        openRegion(HTU, getRS(), hriSecondary);
        // load some data to primary so that reader won't fail
        HTU.loadNumericRows(table, f, startKey, endKey);
        TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
        // ensure that chore is run
        Threads.sleep(2 * refreshPeriod);
        final AtomicBoolean running = new AtomicBoolean(true);
        @SuppressWarnings("unchecked") final AtomicReference<Exception>[] exceptions = new AtomicReference[3];
        for (int i = 0; i < exceptions.length; i++) {
            exceptions[i] = new AtomicReference<>();
        }
        Runnable writer = new Runnable() {

            int key = startKey;

            @Override
            public void run() {
                try {
                    while (running.get()) {
                        byte[] data = Bytes.toBytes(String.valueOf(key));
                        Put put = new Put(data);
                        put.addColumn(f, null, data);
                        table.put(put);
                        key++;
                        if (key == endKey) {
                            key = startKey;
                        }
                    }
                } catch (Exception ex) {
                    LOG.warn(ex.toString(), ex);
                    exceptions[0].compareAndSet(null, ex);
                }
            }
        };
        Runnable flusherCompactor = new Runnable() {

            Random random = new Random();

            @Override
            public void run() {
                try {
                    while (running.get()) {
                        // flush or compact
                        if (random.nextBoolean()) {
                            TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
                        } else {
                            HTU.compact(table.getName(), random.nextBoolean());
                        }
                    }
                } catch (Exception ex) {
                    LOG.warn(ex.toString(), ex);
                    exceptions[1].compareAndSet(null, ex);
                }
            }
        };
        Runnable reader = new Runnable() {

            Random random = new Random();

            @Override
            public void run() {
                try {
                    while (running.get()) {
                        // whether to do a close and open
                        if (random.nextInt(10) == 0) {
                            try {
                                closeRegion(HTU, getRS(), hriSecondary);
                            } catch (Exception ex) {
                                LOG.warn("Failed closing the region " + hriSecondary + " " + StringUtils.stringifyException(ex));
                                exceptions[2].compareAndSet(null, ex);
                            }
                            try {
                                openRegion(HTU, getRS(), hriSecondary);
                            } catch (Exception ex) {
                                LOG.warn("Failed opening the region " + hriSecondary + " " + StringUtils.stringifyException(ex));
                                exceptions[2].compareAndSet(null, ex);
                            }
                        }
                        int key = random.nextInt(endKey - startKey) + startKey;
                        assertGetRpc(hriSecondary, key, true);
                    }
                } catch (Exception ex) {
                    LOG.warn("Failed getting the value in the region " + hriSecondary + " " + StringUtils.stringifyException(ex));
                    exceptions[2].compareAndSet(null, ex);
                }
            }
        };
        LOG.info("Starting writer and reader, secondary={}", hriSecondary.getEncodedName());
        ExecutorService executor = Executors.newFixedThreadPool(3);
        executor.submit(writer);
        executor.submit(flusherCompactor);
        executor.submit(reader);
        // wait for threads
        Threads.sleep(runtime);
        running.set(false);
        executor.shutdown();
        executor.awaitTermination(30, TimeUnit.SECONDS);
        for (AtomicReference<Exception> exRef : exceptions) {
            Assert.assertNull(exRef.get());
        }
    } finally {
        HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, startKey, endKey);
        try {
            closeRegion(HTU, getRS(), hriSecondary);
        } catch (ServiceException e) {
            LOG.info("Closing wrong region {}", hriSecondary, e);
        }
    }
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) Put(org.apache.hadoop.hbase.client.Put) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) ExecutorService(java.util.concurrent.ExecutorService) Test(org.junit.Test)

Example 65 with ServiceException

use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.

the class TestConnection method testClosedConnection.

@Test(expected = DoNotRetryIOException.class)
public void testClosedConnection() throws ServiceException, Throwable {
    byte[] family = Bytes.toBytes("cf");
    TableName tableName = TableName.valueOf(name.getMethodName());
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName).setCoprocessor(MultiRowMutationEndpoint.class.getName()).setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
    TEST_UTIL.getAdmin().createTable(builder.build());
    Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
    // cache the location
    try (Table table = conn.getTable(tableName)) {
        table.get(new Get(Bytes.toBytes(0)));
    } finally {
        conn.close();
    }
    Batch.Call<MultiRowMutationService, MutateRowsResponse> callable = service -> {
        throw new RuntimeException("Should not arrive here");
    };
    conn.getTable(tableName).coprocessorService(MultiRowMutationService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, callable);
}
Also used : IntStream(java.util.stream.IntStream) Waiter(org.apache.hadoop.hbase.Waiter) BeforeClass(org.junit.BeforeClass) MultiRowMutationService(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) LoggerFactory(org.slf4j.LoggerFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) HConstants(org.apache.hadoop.hbase.HConstants) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MutateRowsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) TestName(org.junit.rules.TestName) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) ClassRule(org.junit.ClassRule) ServerName(org.apache.hadoop.hbase.ServerName) Level(org.apache.hbase.thirdparty.io.netty.util.ResourceLeakDetector.Level) Bytes(org.apache.hadoop.hbase.util.Bytes) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TableName(org.apache.hadoop.hbase.TableName) ManualEnvironmentEdge(org.apache.hadoop.hbase.util.ManualEnvironmentEdge) AfterClass(org.junit.AfterClass) Logger(org.slf4j.Logger) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Assert.assertTrue(org.junit.Assert.assertTrue) Set(java.util.Set) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) ResourceLeakDetector(org.apache.hbase.thirdparty.io.netty.util.ResourceLeakDetector) List(java.util.List) Rule(org.junit.Rule) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Assert.assertFalse(org.junit.Assert.assertFalse) RpcClient(org.apache.hadoop.hbase.ipc.RpcClient) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) Assert(org.junit.Assert) Assert.assertEquals(org.junit.Assert.assertEquals) TableName(org.apache.hadoop.hbase.TableName) MutateRowsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) MultiRowMutationService(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) Test(org.junit.Test)

Aggregations

ServiceException (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException)130 IOException (java.io.IOException)112 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)100 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)39 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)28 UncheckedIOException (java.io.UncheckedIOException)27 TableName (org.apache.hadoop.hbase.TableName)22 QosPriority (org.apache.hadoop.hbase.ipc.QosPriority)22 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)19 UnknownRegionException (org.apache.hadoop.hbase.UnknownRegionException)16 UnknownProtocolException (org.apache.hadoop.hbase.exceptions.UnknownProtocolException)16 Test (org.junit.Test)16 InvocationTargetException (java.lang.reflect.InvocationTargetException)15 ArrayList (java.util.ArrayList)15 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)15 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)15 ServerNotRunningYetException (org.apache.hadoop.hbase.ipc.ServerNotRunningYetException)15 KeeperException (org.apache.zookeeper.KeeperException)14 Table (org.apache.hadoop.hbase.client.Table)13 User (org.apache.hadoop.hbase.security.User)13