Search in sources :

Example 71 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestMasterFailover method testPendingOpenOrCloseWhenMasterFailover.

/**
   * Test region in pending_open/close when master failover
   */
@Test(timeout = 180000)
public void testPendingOpenOrCloseWhenMasterFailover() throws Exception {
    final int NUM_MASTERS = 1;
    final int NUM_RS = 1;
    // Create config to use for this cluster
    Configuration conf = HBaseConfiguration.create();
    // Start the cluster
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Cluster started");
    // get all the master threads
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    assertEquals(1, masterThreads.size());
    // only one master thread, let's wait for it to be initialized
    assertTrue(cluster.waitForActiveAndReadyMaster());
    HMaster master = masterThreads.get(0).getMaster();
    assertTrue(master.isActiveMaster());
    assertTrue(master.isInitialized());
    // Create a table with a region online
    Table onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family");
    onlineTable.close();
    // Create a table in META, so it has a region offline
    HTableDescriptor offlineTable = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("offlineTable")));
    offlineTable.addFamily(new HColumnDescriptor(Bytes.toBytes("family")));
    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = FSUtils.getRootDir(conf);
    FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir);
    fstd.createTableDescriptor(offlineTable);
    HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
    createRegion(hriOffline, rootdir, conf, offlineTable);
    MetaTableAccessor.addRegionToMeta(master.getConnection(), hriOffline);
    log("Regions in hbase:meta and namespace have been created");
    // at this point we only expect 3 regions to be assigned out
    // (catalogs and namespace, + 1 online region)
    assertEquals(3, cluster.countServedRegions());
    HRegionInfo hriOnline = null;
    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TableName.valueOf("onlineTable"))) {
        hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
    }
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    RegionStateStore stateStore = master.getAssignmentManager().getRegionStateStore();
    // Put the online region in pending_close. It is actually already opened.
    // This is to simulate that the region close RPC is not sent out before failover
    RegionState oldState = regionStates.getRegionState(hriOnline);
    RegionState newState = new RegionState(hriOnline, State.PENDING_CLOSE, oldState.getServerName());
    stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState);
    // Put the offline region in pending_open. It is actually not opened yet.
    // This is to simulate that the region open RPC is not sent out before failover
    oldState = new RegionState(hriOffline, State.OFFLINE);
    newState = new RegionState(hriOffline, State.PENDING_OPEN, newState.getServerName());
    stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState);
    HRegionInfo failedClose = new HRegionInfo(offlineTable.getTableName(), null, null);
    createRegion(failedClose, rootdir, conf, offlineTable);
    MetaTableAccessor.addRegionToMeta(master.getConnection(), failedClose);
    oldState = new RegionState(failedClose, State.PENDING_CLOSE);
    newState = new RegionState(failedClose, State.FAILED_CLOSE, newState.getServerName());
    stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState);
    HRegionInfo failedOpen = new HRegionInfo(offlineTable.getTableName(), null, null);
    createRegion(failedOpen, rootdir, conf, offlineTable);
    MetaTableAccessor.addRegionToMeta(master.getConnection(), failedOpen);
    // Simulate a region transitioning to failed open when the region server reports the
    // transition as FAILED_OPEN
    oldState = new RegionState(failedOpen, State.PENDING_OPEN);
    newState = new RegionState(failedOpen, State.FAILED_OPEN, newState.getServerName());
    stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState);
    HRegionInfo failedOpenNullServer = new HRegionInfo(offlineTable.getTableName(), null, null);
    LOG.info("Failed open NUll server " + failedOpenNullServer.getEncodedName());
    createRegion(failedOpenNullServer, rootdir, conf, offlineTable);
    MetaTableAccessor.addRegionToMeta(master.getConnection(), failedOpenNullServer);
    // Simulate a region transitioning to failed open when the master couldn't find a plan for
    // the region
    oldState = new RegionState(failedOpenNullServer, State.OFFLINE);
    newState = new RegionState(failedOpenNullServer, State.FAILED_OPEN, null);
    stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState);
    // Stop the master
    log("Aborting master");
    cluster.abortMaster(0);
    cluster.waitOnMaster(0);
    log("Master has aborted");
    // Start up a new master
    log("Starting up a new master");
    master = cluster.startMaster().getMaster();
    log("Waiting for master to be ready");
    cluster.waitForActiveAndReadyMaster();
    log("Master is ready");
    // Wait till no region in transition any more
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    // Get new region states since master restarted
    regionStates = master.getAssignmentManager().getRegionStates();
    // Both pending_open (RPC sent/not yet) regions should be online
    assertTrue(regionStates.isRegionOnline(hriOffline));
    assertTrue(regionStates.isRegionOnline(hriOnline));
    assertTrue(regionStates.isRegionOnline(failedClose));
    assertTrue(regionStates.isRegionOnline(failedOpenNullServer));
    assertTrue(regionStates.isRegionOnline(failedOpen));
    log("Done with verification, shutting down cluster");
    // Done, shutdown the cluster
    TEST_UTIL.shutdownMiniCluster();
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) FileSystem(org.apache.hadoop.fs.FileSystem) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) Test(org.junit.Test)

Example 72 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestRegionPlacement method createTable.

/**
   * Create a table with specified table name and region number.
   * @param tablename
   * @param regionNum
   * @return
   * @throws IOException
   */
private static void createTable(TableName tableName, int regionNum) throws IOException {
    int expectedRegions = regionNum;
    byte[][] splitKeys = new byte[expectedRegions - 1][];
    for (int i = 1; i < expectedRegions; i++) {
        byte splitKey = (byte) i;
        splitKeys[i - 1] = new byte[] { splitKey, splitKey, splitKey };
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, splitKeys);
    try (RegionLocator r = CONNECTION.getRegionLocator(tableName)) {
        List<HRegionLocation> regions = r.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 73 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestMasterRestartAfterDisablingTable method testForCheckingIfEnableAndDisableWorksFineAfterSwitch.

@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() throws Exception {
    final int NUM_MASTERS = 2;
    final int NUM_RS = 1;
    final int NUM_REGIONS_TO_CREATE = 4;
    // Start the cluster
    log("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    // Create a table with regions
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] family = Bytes.toBytes("family");
    log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
    Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
    int numRegions = -1;
    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        numRegions = r.getStartKeys().length;
    }
    // catalogs
    numRegions += 1;
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Disabling table\n");
    TEST_UTIL.getAdmin().disableTable(tableName);
    NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals("The number of regions for the table tableRestart should be 0 and only" + "the catalog and namespace tables should be present.", 2, regions.size());
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    MasterThread activeMaster = null;
    if (masterThreads.get(0).getMaster().isActiveMaster()) {
        activeMaster = masterThreads.get(0);
    } else {
        activeMaster = masterThreads.get(1);
    }
    activeMaster.getMaster().stop("stopping the active master so that the backup can become active");
    cluster.hbaseCluster.waitOnMaster(activeMaster);
    cluster.waitForActiveAndReadyMaster();
    assertTrue("The table should not be in enabled state", cluster.getMaster().getTableStateManager().isTableState(TableName.valueOf(name.getMethodName()), TableState.State.DISABLED, TableState.State.DISABLING));
    log("Enabling table\n");
    // Need a new Admin, the previous one is on the old master
    Admin admin = TEST_UTIL.getAdmin();
    admin.enableTable(tableName);
    admin.close();
    log("Waiting for no more RIT\n");
    TEST_UTIL.waitUntilNoRegionsInTransition(60000);
    log("Verifying there are " + numRegions + " assigned on cluster\n");
    regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals("The assigned regions were not onlined after master" + " switch except for the catalog and namespace tables.", 6, regions.size());
    assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager().isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
    ht.close();
    TEST_UTIL.shutdownMiniCluster();
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Test(org.junit.Test)

Example 74 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestNamespaceAuditor method testCloneSnapshot.

@Test
public void testCloneSnapshot() throws Exception {
    String nsp = prefix + "_testCloneSnapshot";
    NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20").build();
    ADMIN.createNamespace(nspDesc);
    assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
    TableName tableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1");
    TableName cloneTableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2");
    HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
    HTableDescriptor tableDescOne = new HTableDescriptor(tableName);
    tableDescOne.addFamily(fam1);
    ADMIN.createTable(tableDescOne, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
    String snapshot = "snapshot_testCloneSnapshot";
    ADMIN.snapshot(snapshot, tableName);
    ADMIN.cloneSnapshot(snapshot, cloneTableName);
    int tableLength;
    try (RegionLocator locator = ADMIN.getConnection().getRegionLocator(tableName)) {
        tableLength = locator.getStartKeys().length;
    }
    assertEquals(tableName.getNameAsString() + " should have four regions.", 4, tableLength);
    try (RegionLocator locator = ADMIN.getConnection().getRegionLocator(cloneTableName)) {
        tableLength = locator.getStartKeys().length;
    }
    assertEquals(cloneTableName.getNameAsString() + " should have four regions.", 4, tableLength);
    NamespaceTableAndRegionInfo nstate = getNamespaceState(nsp);
    assertEquals("Total tables count should be 2.", 2, nstate.getTables().size());
    assertEquals("Total regions count should be.", 8, nstate.getRegionCount());
    ADMIN.deleteSnapshot(snapshot);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 75 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class HFileReplicator method replicate.

public Void replicate() throws IOException {
    // Copy all the hfiles to the local file system
    Map<String, Path> tableStagingDirsMap = copyHFilesToStagingDir();
    int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
    for (Entry<String, Path> tableStagingDir : tableStagingDirsMap.entrySet()) {
        String tableNameString = tableStagingDir.getKey();
        Path stagingDir = tableStagingDir.getValue();
        LoadIncrementalHFiles loadHFiles = null;
        try {
            loadHFiles = new LoadIncrementalHFiles(conf);
        } catch (Exception e) {
            LOG.error("Failed to initialize LoadIncrementalHFiles for replicating bulk loaded" + " data.", e);
            throw new IOException(e);
        }
        Configuration newConf = HBaseConfiguration.create(conf);
        newConf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no");
        loadHFiles.setConf(newConf);
        TableName tableName = TableName.valueOf(tableNameString);
        Table table = this.connection.getTable(tableName);
        // Prepare collection of queue of hfiles to be loaded(replicated)
        Deque<LoadQueueItem> queue = new LinkedList<>();
        loadHFiles.prepareHFileQueue(stagingDir, table, queue, false);
        if (queue.isEmpty()) {
            LOG.warn("Replication process did not find any files to replicate in directory " + stagingDir.toUri());
            return null;
        }
        try (RegionLocator locator = connection.getRegionLocator(tableName)) {
            fsDelegationToken.acquireDelegationToken(sinkFs);
            // Set the staging directory which will be used by LoadIncrementalHFiles for loading the
            // data
            loadHFiles.setBulkToken(stagingDir.toString());
            doBulkLoad(loadHFiles, table, queue, locator, maxRetries);
        } finally {
            cleanup(stagingDir.toString(), table);
        }
    }
    return null;
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) ExecutionException(java.util.concurrent.ExecutionException) LinkedList(java.util.LinkedList) TableName(org.apache.hadoop.hbase.TableName) LoadIncrementalHFiles(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles) LoadQueueItem(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem)

Aggregations

RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)84 Table (org.apache.hadoop.hbase.client.Table)59 Test (org.junit.Test)49 TableName (org.apache.hadoop.hbase.TableName)39 Admin (org.apache.hadoop.hbase.client.Admin)33 Path (org.apache.hadoop.fs.Path)31 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)30 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)29 Connection (org.apache.hadoop.hbase.client.Connection)25 Configuration (org.apache.hadoop.conf.Configuration)21 IOException (java.io.IOException)19 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 FileSystem (org.apache.hadoop.fs.FileSystem)14 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 ServerName (org.apache.hadoop.hbase.ServerName)13 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)10 Put (org.apache.hadoop.hbase.client.Put)10 ArrayList (java.util.ArrayList)9 Result (org.apache.hadoop.hbase.client.Result)8