Search in sources :

Example 56 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestDistributedLogSplitting method testLogReplayWithNonMetaRSDown.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testLogReplayWithNonMetaRSDown() throws Exception {
    LOG.info("testLogReplayWithNonMetaRSDown");
    // create more than one wal
    conf.setLong("hbase.regionserver.hlog.blocksize", 30 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    startCluster(NUM_RS);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 1000;
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
    try {
        HRegionServer hrs = findRSToKill(false, "table");
        List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
        makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
        // wait for abort completes
        this.abortRSAndVerifyRecovery(hrs, ht, zkw, NUM_REGIONS_TO_CREATE, NUM_LOG_LINES);
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Table(org.apache.hadoop.hbase.client.Table) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 57 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestMasterNoCluster method testFailover.

/**
   * Test master failover.
   * Start up three fake regionservers and a master.
   * @throws IOException
   * @throws KeeperException
   * @throws InterruptedException
   * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException 
   */
@Test
public void testFailover() throws Exception {
    final long now = System.currentTimeMillis();
    // Names for our three servers.  Make the port numbers match hostname.
    // Will come in use down in the server when we need to figure how to respond.
    final ServerName sn0 = ServerName.valueOf("0.example.org", 0, now);
    final ServerName sn1 = ServerName.valueOf("1.example.org", 1, now);
    final ServerName sn2 = ServerName.valueOf("2.example.org", 2, now);
    final ServerName[] sns = new ServerName[] { sn0, sn1, sn2 };
    // Put up the mock servers
    final Configuration conf = TESTUTIL.getConfiguration();
    final MockRegionServer rs0 = new MockRegionServer(conf, sn0);
    final MockRegionServer rs1 = new MockRegionServer(conf, sn1);
    final MockRegionServer rs2 = new MockRegionServer(conf, sn2);
    // Put some data into the servers.  Make it look like sn0 has the metaH
    // Put data into sn2 so it looks like it has a few regions for a table named 't'.
    MetaTableLocator.setMetaLocation(rs0.getZooKeeper(), rs0.getServerName(), RegionState.State.OPEN);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    Result[] results = new Result[] { MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HBaseTestingUtility.KEYS[1]), rs2.getServerName()), MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[1], HBaseTestingUtility.KEYS[2]), rs2.getServerName()), MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[2], HConstants.EMPTY_END_ROW), rs2.getServerName()) };
    rs1.setNextResults(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), results);
    // Create master.  Subclass to override a few methods so we can insert mocks
    // and get notification on transitions.  We need to fake out any rpcs the
    // master does opening/closing regions.  Also need to fake out the address
    // of the 'remote' mocked up regionservers.
    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(TESTUTIL.getConfiguration());
    // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
    // the conf from the master; the conf will already have an ClusterConnection
    // associate so the below mocking of a connection will fail.
    final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
    HMaster master = new HMaster(conf, cp) {

        InetAddress getRemoteInetAddress(final int port, final long serverStartCode) throws UnknownHostException {
            // Return different address dependent on port passed.
            if (port > sns.length) {
                return super.getRemoteInetAddress(port, serverStartCode);
            }
            ServerName sn = sns[port];
            return InetAddress.getByAddress(sn.getHostname(), new byte[] { 10, 0, 0, (byte) sn.getPort() });
        }

        @Override
        void initClusterSchemaService() throws IOException, InterruptedException {
        }

        @Override
        ServerManager createServerManager(MasterServices master) throws IOException {
            ServerManager sm = super.createServerManager(master);
            // Spy on the created servermanager
            ServerManager spy = Mockito.spy(sm);
            // Fake a successful close.
            Mockito.doReturn(true).when(spy).sendRegionClose((ServerName) Mockito.any(), (HRegionInfo) Mockito.any(), (ServerName) Mockito.any());
            return spy;
        }

        @Override
        public ClusterConnection getConnection() {
            return mockedConnection;
        }

        @Override
        public ClusterConnection getClusterConnection() {
            return mockedConnection;
        }
    };
    master.start();
    try {
        // Wait till master is up ready for RPCs.
        while (!master.serviceStarted) Threads.sleep(10);
        // Fake master that there are regionservers out there.  Report in.
        for (int i = 0; i < sns.length; i++) {
            RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
            ;
            ServerName sn = ServerName.parseVersionedServerName(sns[i].getVersionedBytes());
            request.setServer(ProtobufUtil.toServerName(sn));
            request.setLoad(ServerLoad.EMPTY_SERVERLOAD.obtainServerLoadPB());
            master.getMasterRpcServices().regionServerReport(null, request.build());
        }
        // Master should now come up.
        while (!master.isInitialized()) {
            Threads.sleep(100);
        }
        assertTrue(master.isInitialized());
    } finally {
        rs0.stop("Test is done");
        rs1.stop("Test is done");
        rs2.stop("Test is done");
        master.stopMaster();
        master.join();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) RegionServerReportRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) CoordinatedStateManager(org.apache.hadoop.hbase.CoordinatedStateManager) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Example 58 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestAssignmentListener method testAssignmentListener.

@Test(timeout = 60000)
public void testAssignmentListener() throws IOException, InterruptedException {
    AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    Admin admin = TEST_UTIL.getAdmin();
    DummyAssignmentListener listener = new DummyAssignmentListener();
    am.registerListener(listener);
    try {
        final TableName tableName = TableName.valueOf(name.getMethodName());
        final byte[] FAMILY = Bytes.toBytes("cf");
        // Create a new table, with a single region
        LOG.info("Create Table");
        TEST_UTIL.createTable(tableName, FAMILY);
        listener.awaitModifications(1);
        assertEquals(1, listener.getLoadCount());
        assertEquals(0, listener.getCloseCount());
        // Add some data
        Table table = TEST_UTIL.getConnection().getTable(tableName);
        try {
            for (int i = 0; i < 10; ++i) {
                byte[] key = Bytes.toBytes("row-" + i);
                Put put = new Put(key);
                put.addColumn(FAMILY, null, key);
                table.put(put);
            }
        } finally {
            table.close();
        }
        // Split the table in two
        LOG.info("Split Table");
        listener.reset();
        admin.split(tableName, Bytes.toBytes("row-3"));
        listener.awaitModifications(3);
        // daughters added
        assertEquals(2, listener.getLoadCount());
        // parent removed
        assertEquals(1, listener.getCloseCount());
        // Wait for the Regions to be mergeable
        MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
        int mergeable = 0;
        while (mergeable < 2) {
            Thread.sleep(100);
            admin.majorCompact(tableName);
            mergeable = 0;
            for (JVMClusterUtil.RegionServerThread regionThread : miniCluster.getRegionServerThreads()) {
                for (Region region : regionThread.getRegionServer().getOnlineRegions(tableName)) {
                    mergeable += ((HRegion) region).isMergeable() ? 1 : 0;
                }
            }
        }
        // Merge the two regions
        LOG.info("Merge Regions");
        listener.reset();
        List<HRegionInfo> regions = admin.getTableRegions(tableName);
        assertEquals(2, regions.size());
        boolean sameServer = areAllRegionsLocatedOnSameServer(tableName);
        // If the regions are located by different server, we need to move
        // regions to same server before merging. So the expected modifications
        // will increaes to 5. (open + close)
        final int expectedModifications = sameServer ? 3 : 5;
        final int expectedLoadCount = sameServer ? 1 : 2;
        final int expectedCloseCount = sameServer ? 2 : 3;
        admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), true);
        listener.awaitModifications(expectedModifications);
        assertEquals(1, admin.getTableRegions(tableName).size());
        // new merged region added
        assertEquals(expectedLoadCount, listener.getLoadCount());
        // daughters removed
        assertEquals(expectedCloseCount, listener.getCloseCount());
        // Delete the table
        LOG.info("Drop Table");
        listener.reset();
        TEST_UTIL.deleteTable(tableName);
        listener.awaitModifications(1);
        assertEquals(0, listener.getLoadCount());
        assertEquals(1, listener.getCloseCount());
    } finally {
        am.unregisterListener(listener);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 59 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestAssignmentManagerOnCluster method testOpenFailedUnrecoverable.

/**
   * This tests region open failure which is not recoverable
   */
@Test(timeout = 60000)
public void testOpenFailedUnrecoverable() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        FileSystem fs = FileSystem.get(conf);
        Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableName);
        Path regionDir = new Path(tableDir, hri.getEncodedName());
        // create a file named the same as the region dir to
        // mess up with region opening
        fs.create(regionDir, true);
        HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
        AssignmentManager am = master.getAssignmentManager();
        assertFalse(TEST_UTIL.assignRegion(hri));
        RegionState state = am.getRegionStates().getRegionState(hri);
        assertEquals(RegionState.State.FAILED_OPEN, state.getState());
        // Failed to open due to file system issue. Region state should
        // carry the opening region server so that we can force close it
        // later on before opening it again. See HBASE-9092.
        assertNotNull(state.getServerName());
        // remove the blocking file, so that region can be opened
        fs.delete(regionDir, true);
        assertTrue(TEST_UTIL.assignRegion(hri));
        ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
        TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
    } finally {
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) ServerName(org.apache.hadoop.hbase.ServerName) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 60 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestAssignmentManagerOnCluster method testOpenFailed.

/**
   * This tests region open failed
   */
@Test(timeout = 60000)
public void testOpenFailed() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        MyLoadBalancer.controledRegion = hri;
        HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
        AssignmentManager am = master.getAssignmentManager();
        assertFalse(TEST_UTIL.assignRegion(hri));
        RegionState state = am.getRegionStates().getRegionState(hri);
        assertEquals(RegionState.State.FAILED_OPEN, state.getState());
        // Failed to open since no plan, so it's on no server
        assertNull(state.getServerName());
        MyLoadBalancer.controledRegion = null;
        assertTrue(TEST_UTIL.assignRegion(hri));
        ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
        TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
    } finally {
        MyLoadBalancer.controledRegion = null;
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ServerName(org.apache.hadoop.hbase.ServerName) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)408 ServerName (org.apache.hadoop.hbase.ServerName)153 Test (org.junit.Test)141 TableName (org.apache.hadoop.hbase.TableName)118 ArrayList (java.util.ArrayList)86 IOException (java.io.IOException)83 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)75 Path (org.apache.hadoop.fs.Path)63 List (java.util.List)59 HashMap (java.util.HashMap)57 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)49 Table (org.apache.hadoop.hbase.client.Table)47 Map (java.util.Map)43 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)41 FileSystem (org.apache.hadoop.fs.FileSystem)40 Configuration (org.apache.hadoop.conf.Configuration)38 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)35 TreeMap (java.util.TreeMap)26 HashSet (java.util.HashSet)23 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)22