Search in sources :

Example 11 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method getMockedConnection.

@SuppressWarnings("deprecation")
private ClusterConnection getMockedConnection(final Configuration conf) throws IOException, org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
    ClusterConnection c = Mockito.mock(ClusterConnection.class);
    Mockito.when(c.getConfiguration()).thenReturn(conf);
    Mockito.doNothing().when(c).close();
    // Make it so we return a particular location when asked.
    final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, ServerName.valueOf("example.org", 1234, 0));
    Mockito.when(c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())).thenReturn(loc);
    Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).thenReturn(loc);
    ClientProtos.ClientService.BlockingInterface hri = Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
    Mockito.when(hri.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any())).thenThrow(new ServiceException(new IOException("injecting bulk load error")));
    Mockito.when(c.getClient(Mockito.any(ServerName.class))).thenReturn(hri);
    return c;
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) IOException(java.io.IOException)

Example 12 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestRegionReplicaReplicationEndpointNoMaster method testReplayCallableWithRegionMove.

@Test(timeout = 240000)
public void testReplayCallableWithRegionMove() throws Exception {
    // tests replaying the edits to a secondary region replica using the Callable directly while
    // the region is moved to another location.It tests handling of RME.
    openRegion(HTU, rs0, hriSecondary);
    ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
    //load some data to primary
    HTU.loadNumericRows(table, f, 0, 1000);
    Assert.assertEquals(1000, entries.size());
    // replay the edits to the secondary using replay callable
    replicateUsingCallable(connection, entries);
    Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName());
    HTU.verifyNumericRows(region, f, 0, 1000);
    // load some more data to primary
    HTU.loadNumericRows(table, f, 1000, 2000);
    // move the secondary region from RS0 to RS1
    closeRegion(HTU, rs0, hriSecondary);
    openRegion(HTU, rs1, hriSecondary);
    // replicate the new data
    replicateUsingCallable(connection, entries);
    region = rs1.getFromOnlineRegions(hriSecondary.getEncodedName());
    // verify the new data. old data may or may not be there
    HTU.verifyNumericRows(region, f, 1000, 2000);
    HTU.deleteNumericRows(table, f, 0, 2000);
    closeRegion(HTU, rs1, hriSecondary);
    connection.close();
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Region(org.apache.hadoop.hbase.regionserver.Region) TestRegionServerNoMaster.closeRegion(org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.closeRegion) TestRegionServerNoMaster.openRegion(org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.openRegion) Test(org.junit.Test)

Example 13 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestRegionReplicaReplicationEndpointNoMaster method testReplayedEditsAreSkipped.

@Test(timeout = 240000)
public void testReplayedEditsAreSkipped() throws Exception {
    openRegion(HTU, rs0, hriSecondary);
    ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
    RegionReplicaReplicationEndpoint replicator = new RegionReplicaReplicationEndpoint();
    ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class);
    when(context.getConfiguration()).thenReturn(HTU.getConfiguration());
    when(context.getMetrics()).thenReturn(mock(MetricsSource.class));
    ReplicationPeer mockPeer = mock(ReplicationPeer.class);
    when(mockPeer.getNamespaces()).thenReturn(null);
    when(mockPeer.getTableCFs()).thenReturn(null);
    when(mockPeer.getPeerConfig()).thenReturn(new ReplicationPeerConfig());
    when(context.getReplicationPeer()).thenReturn(mockPeer);
    replicator.init(context);
    replicator.start();
    // test the filter for the RE, not actual replication
    WALEntryFilter filter = replicator.getWALEntryfilter();
    //load some data to primary
    HTU.loadNumericRows(table, f, 0, 1000);
    Assert.assertEquals(1000, entries.size());
    for (Entry e : entries) {
        Cell _c = e.getEdit().getCells().get(0);
        if (Integer.parseInt(Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())) % 2 == 0) {
            // simulate dist log replay by setting orig seq id
            e.getKey().setOrigLogSeqNum(1);
        }
    }
    long skipped = 0, replayed = 0;
    for (Entry e : entries) {
        if (filter.filter(e) == null) {
            skipped++;
        } else {
            replayed++;
        }
    }
    assertEquals(500, skipped);
    assertEquals(500, replayed);
    HTU.deleteNumericRows(table, f, 0, 1000);
    closeRegion(HTU, rs0, hriSecondary);
    connection.close();
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) WALEntryFilter(org.apache.hadoop.hbase.replication.WALEntryFilter) ReplicationPeer(org.apache.hadoop.hbase.replication.ReplicationPeer) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 14 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestMetaTableAccessorNoCluster method testRideOverServerNotRunning.

/**
   * Test that MetaTableAccessor will ride over server throwing
   * "Server not running" IOEs.
   * @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
   * @throws IOException
   * @throws InterruptedException
   */
@Test
public void testRideOverServerNotRunning() throws IOException, InterruptedException, ServiceException {
    // Need a zk watcher.
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), ABORTABLE, true);
    // This is a servername we use in a few places below.
    ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
    ClusterConnection connection = null;
    try {
        // Mock an ClientProtocol. Our mock implementation will fail a few
        // times when we go to open a scanner.
        final ClientProtos.ClientService.BlockingInterface implementation = Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
        // When scan called throw IOE 'Server not running' a few times
        // before we return a scanner id.  Whats WEIRD is that these
        // exceptions do not show in the log because they are caught and only
        // printed if we FAIL.  We eventually succeed after retry so these don't
        // show.  We will know if they happened or not because we will ask
        // mockito at the end of this test to verify that scan was indeed
        // called the wanted number of times.
        List<Cell> kvs = new ArrayList<>();
        final byte[] rowToVerify = Bytes.toBytes("rowToVerify");
        kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
        kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())));
        kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())));
        final List<CellScannable> cellScannables = new ArrayList<>(1);
        cellScannables.add(Result.create(kvs));
        final ScanResponse.Builder builder = ScanResponse.newBuilder();
        for (CellScannable result : cellScannables) {
            builder.addCellsPerResult(((Result) result).size());
        }
        Mockito.when(implementation.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any())).thenThrow(new ServiceException("Server not running (1 of 3)")).thenThrow(new ServiceException("Server not running (2 of 3)")).thenThrow(new ServiceException("Server not running (3 of 3)")).thenAnswer(new Answer<ScanResponse>() {

            public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
                ((HBaseRpcController) invocation.getArguments()[0]).setCellScanner(CellUtil.createCellScanner(cellScannables));
                return builder.setScannerId(1234567890L).setMoreResults(false).build();
            }
        });
        // Associate a spied-upon Connection with UTIL.getConfiguration.  Need
        // to shove this in here first so it gets picked up all over; e.g. by
        // HTable.
        connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration());
        // Fix the location lookup so it 'works' though no network.  First
        // make an 'any location' object.
        final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn);
        final RegionLocations rl = new RegionLocations(anyLocation);
        // Return the RegionLocations object when locateRegion
        // The ugly format below comes of 'Important gotcha on spying real objects!' from
        // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
        Mockito.doReturn(rl).when(connection).locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());
        // Now shove our HRI implementation into the spied-upon connection.
        Mockito.doReturn(implementation).when(connection).getClient(Mockito.any(ServerName.class));
        // Scan meta for user tables and verify we got back expected answer.
        NavigableMap<HRegionInfo, Result> hris = MetaTableAccessor.getServerUserRegions(connection, sn);
        assertEquals(1, hris.size());
        assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO));
        assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
        // Finally verify that scan was called four times -- three times
        // with exception and then on 4th attempt we succeed
        Mockito.verify(implementation, Mockito.times(4)).scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any());
    } finally {
        if (connection != null && !connection.isClosed())
            connection.close();
        zkw.close();
    }
}
Also used : ArrayList(java.util.ArrayList) Result(org.apache.hadoop.hbase.client.Result) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ScanResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) RpcController(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController) ScanRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Test(org.junit.Test)

Example 15 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestMetaTableLocator method mockConnection.

/**
   * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
   * want to pass a mocked HRS; can be null.
   * @param client A mocked ClientProtocol instance, can be null
   * @return Mock up a connection that returns a {@link Configuration} when
   * {@link HConnection#getConfiguration()} is called, a 'location' when
   * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
   * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
   * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
   * {@link ClientProtos.ClientService.BlockingInterface} instance when
   * {@link HConnection#getClient(ServerName)} is called.
   * @throws IOException
   */
private ClusterConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin, final ClientProtos.ClientService.BlockingInterface client) throws IOException {
    ClusterConnection connection = HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
    Mockito.doNothing().when(connection).close();
    // Make it so we return any old location when asked.
    final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
    Mockito.when(connection.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())).thenReturn(anyLocation);
    Mockito.when(connection.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).thenReturn(anyLocation);
    if (admin != null) {
        // If a call to getHRegionConnection, return this implementation.
        Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).thenReturn(admin);
    }
    if (client != null) {
        // If a call to getClient, return this implementation.
        Mockito.when(connection.getClient(Mockito.any(ServerName.class))).thenReturn(client);
    }
    return connection;
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection)

Aggregations

ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)23 Test (org.junit.Test)12 ServerName (org.apache.hadoop.hbase.ServerName)8 IOException (java.io.IOException)6 Configuration (org.apache.hadoop.conf.Configuration)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 TableName (org.apache.hadoop.hbase.TableName)4 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 Waiter (org.apache.hadoop.hbase.Waiter)3 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)3 Result (org.apache.hadoop.hbase.client.Result)3 Table (org.apache.hadoop.hbase.client.Table)3 LinkedList (java.util.LinkedList)2 Map (java.util.Map)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2