Search in sources :

Example 6 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class RegionSplitter method splitScan.

static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
    LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
    // Get table info
    Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
    Path tableDir = tableDirAndSplitFile.getFirst();
    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
    // Clear the cache to forcibly refresh region information
    ((ClusterConnection) connection).clearRegionCache();
    HTableDescriptor htd = null;
    try (Table table = connection.getTable(tableName)) {
        htd = table.getTableDescriptor();
    }
    try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
        // for every region that hasn't been verified as a finished split
        for (Pair<byte[], byte[]> region : regionList) {
            byte[] start = region.getFirst();
            byte[] split = region.getSecond();
            // see if the new split daughter region has come online
            try {
                HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
                if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
                    logicalSplitting.add(region);
                    continue;
                }
            } catch (NoServerForRegionException nsfre) {
                // NSFRE will occur if the old hbase:meta entry has no server assigned
                LOG.info(nsfre);
                logicalSplitting.add(region);
                continue;
            }
            try {
                // when a daughter region is opened, a compaction is triggered
                // wait until compaction completes for both daughter regions
                LinkedList<HRegionInfo> check = Lists.newLinkedList();
                check.add(regionLocator.getRegionLocation(start).getRegionInfo());
                check.add(regionLocator.getRegionLocation(split).getRegionInfo());
                for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
                    byte[] sk = hri.getStartKey();
                    if (sk.length == 0)
                        sk = splitAlgo.firstRow();
                    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
                    // Check every Column Family for that region -- check does not have references.
                    boolean refFound = false;
                    for (HColumnDescriptor c : htd.getFamilies()) {
                        if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
                            break;
                        }
                    }
                    // compaction is completed when all reference files are gone
                    if (!refFound) {
                        check.remove(hri);
                    }
                }
                if (check.isEmpty()) {
                    finished.add(region);
                } else {
                    physicalSplitting.add(region);
                }
            } catch (NoServerForRegionException nsfre) {
                LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
                physicalSplitting.add(region);
                ((ClusterConnection) connection).clearRegionCache();
            }
        }
        LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
        return finished;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) NoServerForRegionException(org.apache.hadoop.hbase.client.NoServerForRegionException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 7 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestMetaTableLocator method testVerifyMetaRegionLocationFails.

/**
   * Test get of meta region fails properly if nothing to connect to.
   * @throws IOException
   * @throws InterruptedException
   * @throws KeeperException
   * @throws ServiceException
   */
@Test
public void testVerifyMetaRegionLocationFails() throws IOException, InterruptedException, KeeperException, ServiceException {
    ClusterConnection connection = Mockito.mock(ClusterConnection.class);
    ServiceException connectException = new ServiceException(new ConnectException("Connection refused"));
    final AdminProtos.AdminService.BlockingInterface implementation = Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
    Mockito.when(implementation.getRegionInfo((RpcController) Mockito.any(), (GetRegionInfoRequest) Mockito.any())).thenThrow(connectException);
    Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).thenReturn(implementation);
    RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class);
    Mockito.when(controllerFactory.newController()).thenReturn(Mockito.mock(HBaseRpcController.class));
    Mockito.when(connection.getRpcControllerFactory()).thenReturn(controllerFactory);
    ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
    MetaTableLocator.setMetaLocation(this.watcher, sn, RegionState.State.OPENING);
    assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection, watcher, 100));
    MetaTableLocator.setMetaLocation(this.watcher, sn, RegionState.State.OPEN);
    assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection, watcher, 100));
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) MetaTableLocator(org.apache.hadoop.hbase.zookeeper.MetaTableLocator) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) RpcControllerFactory(org.apache.hadoop.hbase.ipc.RpcControllerFactory) ConnectException(java.net.ConnectException) Test(org.junit.Test)

Example 8 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class MultiHConnection method processBatchCallback.

/**
   * Randomly pick a connection and process the batch of actions for a given table
   * @param actions the actions
   * @param tableName table name
   * @param results the results array
   * @param callback to run when results are in
   * @throws IOException If IO failure occurs
   */
@SuppressWarnings("deprecation")
public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName, Object[] results, Batch.Callback<R> callback) throws IOException {
    // Currently used by RegionStateStore
    ClusterConnection conn = (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)];
    HTable.doBatchWithCallback(actions, results, callback, conn, batchPool, tableName);
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection)

Example 9 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestRegionStates method testCanMakeProgressThoughMetaIsDown.

@Test(timeout = 10000)
public void testCanMakeProgressThoughMetaIsDown() throws IOException, InterruptedException, BrokenBarrierException {
    MasterServices server = mock(MasterServices.class);
    when(server.getServerName()).thenReturn(ServerName.valueOf("master,1,1"));
    Connection connection = mock(ClusterConnection.class);
    // Set up a table that gets 'stuck' when we try to fetch a row from the meta table.
    // It is stuck on a CyclicBarrier latch. We use CyclicBarrier because it will tell us when
    // thread is waiting on latch.
    Table metaTable = Mockito.mock(Table.class);
    final CyclicBarrier latch = new CyclicBarrier(2);
    when(metaTable.get((Get) Mockito.any())).thenAnswer(new Answer<Result>() {

        @Override
        public Result answer(InvocationOnMock invocation) throws Throwable {
            latch.await();
            throw new java.net.ConnectException("Connection refused");
        }
    });
    when(connection.getTable(TableName.META_TABLE_NAME)).thenReturn(metaTable);
    when(server.getConnection()).thenReturn((ClusterConnection) connection);
    Configuration configuration = mock(Configuration.class);
    when(server.getConfiguration()).thenReturn(configuration);
    TableStateManager tsm = mock(TableStateManager.class);
    ServerManager sm = mock(ServerManager.class);
    when(sm.isServerOnline(isA(ServerName.class))).thenReturn(true);
    RegionStateStore rss = mock(RegionStateStore.class);
    final RegionStates regionStates = new RegionStates(server, tsm, sm, rss);
    final ServerName sn = mockServer("one", 1);
    regionStates.updateRegionState(HRegionInfo.FIRST_META_REGIONINFO, State.SPLITTING_NEW, sn);
    Thread backgroundThread = new Thread("Get stuck setting server offline") {

        @Override
        public void run() {
            regionStates.serverOffline(sn);
        }
    };
    assertTrue(latch.getNumberWaiting() == 0);
    backgroundThread.start();
    while (latch.getNumberWaiting() == 0) ;
    // Verify I can do stuff with synchronized RegionStates methods, that I am not locked out.
    // Below is a call that is synchronized.  Can I do it and not block?
    regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO);
    // Done. Trip the barrier on the background thread.
    latch.await();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) CyclicBarrier(java.util.concurrent.CyclicBarrier) Result(org.apache.hadoop.hbase.client.Result) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Example 10 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestMasterNoCluster method testFailover.

/**
   * Test master failover.
   * Start up three fake regionservers and a master.
   * @throws IOException
   * @throws KeeperException
   * @throws InterruptedException
   * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException 
   */
@Test
public void testFailover() throws Exception {
    final long now = System.currentTimeMillis();
    // Names for our three servers.  Make the port numbers match hostname.
    // Will come in use down in the server when we need to figure how to respond.
    final ServerName sn0 = ServerName.valueOf("0.example.org", 0, now);
    final ServerName sn1 = ServerName.valueOf("1.example.org", 1, now);
    final ServerName sn2 = ServerName.valueOf("2.example.org", 2, now);
    final ServerName[] sns = new ServerName[] { sn0, sn1, sn2 };
    // Put up the mock servers
    final Configuration conf = TESTUTIL.getConfiguration();
    final MockRegionServer rs0 = new MockRegionServer(conf, sn0);
    final MockRegionServer rs1 = new MockRegionServer(conf, sn1);
    final MockRegionServer rs2 = new MockRegionServer(conf, sn2);
    // Put some data into the servers.  Make it look like sn0 has the metaH
    // Put data into sn2 so it looks like it has a few regions for a table named 't'.
    MetaTableLocator.setMetaLocation(rs0.getZooKeeper(), rs0.getServerName(), RegionState.State.OPEN);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    Result[] results = new Result[] { MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HBaseTestingUtility.KEYS[1]), rs2.getServerName()), MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[1], HBaseTestingUtility.KEYS[2]), rs2.getServerName()), MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[2], HConstants.EMPTY_END_ROW), rs2.getServerName()) };
    rs1.setNextResults(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), results);
    // Create master.  Subclass to override a few methods so we can insert mocks
    // and get notification on transitions.  We need to fake out any rpcs the
    // master does opening/closing regions.  Also need to fake out the address
    // of the 'remote' mocked up regionservers.
    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(TESTUTIL.getConfiguration());
    // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
    // the conf from the master; the conf will already have an ClusterConnection
    // associate so the below mocking of a connection will fail.
    final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
    HMaster master = new HMaster(conf, cp) {

        InetAddress getRemoteInetAddress(final int port, final long serverStartCode) throws UnknownHostException {
            // Return different address dependent on port passed.
            if (port > sns.length) {
                return super.getRemoteInetAddress(port, serverStartCode);
            }
            ServerName sn = sns[port];
            return InetAddress.getByAddress(sn.getHostname(), new byte[] { 10, 0, 0, (byte) sn.getPort() });
        }

        @Override
        void initClusterSchemaService() throws IOException, InterruptedException {
        }

        @Override
        ServerManager createServerManager(MasterServices master) throws IOException {
            ServerManager sm = super.createServerManager(master);
            // Spy on the created servermanager
            ServerManager spy = Mockito.spy(sm);
            // Fake a successful close.
            Mockito.doReturn(true).when(spy).sendRegionClose((ServerName) Mockito.any(), (HRegionInfo) Mockito.any(), (ServerName) Mockito.any());
            return spy;
        }

        @Override
        public ClusterConnection getConnection() {
            return mockedConnection;
        }

        @Override
        public ClusterConnection getClusterConnection() {
            return mockedConnection;
        }
    };
    master.start();
    try {
        // Wait till master is up ready for RPCs.
        while (!master.serviceStarted) Threads.sleep(10);
        // Fake master that there are regionservers out there.  Report in.
        for (int i = 0; i < sns.length; i++) {
            RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
            ;
            ServerName sn = ServerName.parseVersionedServerName(sns[i].getVersionedBytes());
            request.setServer(ProtobufUtil.toServerName(sn));
            request.setLoad(ServerLoad.EMPTY_SERVERLOAD.obtainServerLoadPB());
            master.getMasterRpcServices().regionServerReport(null, request.build());
        }
        // Master should now come up.
        while (!master.isInitialized()) {
            Threads.sleep(100);
        }
        assertTrue(master.isInitialized());
    } finally {
        rs0.stop("Test is done");
        rs1.stop("Test is done");
        rs2.stop("Test is done");
        master.stopMaster();
        master.join();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) RegionServerReportRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) CoordinatedStateManager(org.apache.hadoop.hbase.CoordinatedStateManager) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Aggregations

ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)23 Test (org.junit.Test)12 ServerName (org.apache.hadoop.hbase.ServerName)8 IOException (java.io.IOException)6 Configuration (org.apache.hadoop.conf.Configuration)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 TableName (org.apache.hadoop.hbase.TableName)4 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 Waiter (org.apache.hadoop.hbase.Waiter)3 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)3 Result (org.apache.hadoop.hbase.client.Result)3 Table (org.apache.hadoop.hbase.client.Table)3 LinkedList (java.util.LinkedList)2 Map (java.util.Map)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2