Search in sources :

Example 71 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitWhenRegionHoleExistsInMeta.

@Test(timeout = 120000)
public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
    // Share connection. We were failing to find the table with our new reverse scan because it
    // looks for first region, not any region -- that is how it works now.  The below removes first
    // region in test.  Was reliant on the Connection caching having first region.
    Connection connection = ConnectionFactory.createConnection(util.getConfiguration());
    Table table = connection.getTable(tableName);
    setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
    Path dir = buildBulkFiles(tableName, 2);
    final AtomicInteger countedLqis = new AtomicInteger();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()) {

        @Override
        protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
            Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
            if (lqis != null && lqis.getFirst() != null) {
                countedLqis.addAndGet(lqis.getFirst().size());
            }
            return lqis;
        }
    };
    // do bulkload when there is no region hole in hbase:meta.
    try (Table t = connection.getTable(tableName);
        RegionLocator locator = connection.getRegionLocator(tableName);
        Admin admin = connection.getAdmin()) {
        loader.doBulkLoad(dir, admin, t, locator);
    } catch (Exception e) {
        LOG.error("exeception=", e);
    }
    // check if all the data are loaded into the table.
    this.assertExpectedTable(tableName, ROWCOUNT, 2);
    dir = buildBulkFiles(tableName, 3);
    // Mess it up by leaving a hole in the hbase:meta
    List<HRegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
    for (HRegionInfo regionInfo : regionInfos) {
        if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
            MetaTableAccessor.deleteRegion(connection, regionInfo);
            break;
        }
    }
    try (Table t = connection.getTable(tableName);
        RegionLocator locator = connection.getRegionLocator(tableName);
        Admin admin = connection.getAdmin()) {
        loader.doBulkLoad(dir, admin, t, locator);
    } catch (Exception e) {
        LOG.error("exception=", e);
        assertTrue("IOException expected", e instanceof IOException);
    }
    table.close();
    // Make sure at least the one region that still exists can be found.
    regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
    assertTrue(regionInfos.size() >= 1);
    this.assertExpectedTable(connection, tableName, ROWCOUNT, 2);
    connection.close();
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableExistsException(org.apache.hadoop.hbase.TableExistsException) IOException(java.io.IOException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 72 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestMasterFailoverWithProcedures method testCreateWithFailoverAtStep.

private void testCreateWithFailoverAtStep(final int step) throws Exception {
    final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step);
    // create the table
    ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
    ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
    // Start the Create procedure && kill the executor
    byte[][] splitKeys = null;
    HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
    HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
    long procId = procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
    testRecoveryAndDoubleExecution(UTIL, procId, step, CreateTableState.values());
    MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 73 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestSplitTableRegionProcedure method testSplitTableRegionNoStoreFile.

@Test(timeout = 60000)
public void testSplitTableRegionNoStoreFile() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
    int splitRowNum = startRowNum + rowCount / 2;
    byte[] splitKey = Bytes.toBytes("" + splitRowNum);
    assertTrue("not able to find a splittable region", regions != null);
    assertTrue("not able to find a splittable region", regions.length == 1);
    // Split region of the table
    long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
    // Wait the completion
    ProcedureTestingUtility.waitProcedure(procExec, procId);
    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
    assertTrue(UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2);
    assertTrue(UTIL.countRows(tableName) == 0);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 74 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestSplitTableRegionProcedure method testRecoveryAndDoubleExecution.

@Test(timeout = 60000)
public void testRecoveryAndDoubleExecution() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
    insertData(tableName);
    int splitRowNum = startRowNum + rowCount / 2;
    byte[] splitKey = Bytes.toBytes("" + splitRowNum);
    assertTrue("not able to find a splittable region", regions != null);
    assertTrue("not able to find a splittable region", regions.length == 1);
    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
    // Split region of the table
    long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
    // Restart the executor and execute the step twice
    int numberOfSteps = SplitTableRegionState.values().length;
    MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
    verify(tableName, splitRowNum);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 75 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestSplitTableRegionProcedure method testInvalidSplitKey.

@Test(timeout = 60000)
public void testInvalidSplitKey() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
    insertData(tableName);
    assertTrue("not able to find a splittable region", regions != null);
    assertTrue("not able to find a splittable region", regions.length == 1);
    // Split region of the table with null split key
    try {
        long procId1 = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], null));
        ProcedureTestingUtility.waitProcedure(procExec, procId1);
        fail("unexpected procedure start with invalid split-key");
    } catch (DoNotRetryIOException e) {
        LOG.debug("Expected Split procedure construction failure: " + e.getMessage());
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Test(org.junit.Test)

Aggregations

HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)408 ServerName (org.apache.hadoop.hbase.ServerName)153 Test (org.junit.Test)141 TableName (org.apache.hadoop.hbase.TableName)118 ArrayList (java.util.ArrayList)86 IOException (java.io.IOException)83 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)75 Path (org.apache.hadoop.fs.Path)63 List (java.util.List)59 HashMap (java.util.HashMap)57 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)49 Table (org.apache.hadoop.hbase.client.Table)47 Map (java.util.Map)43 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)41 FileSystem (org.apache.hadoop.fs.FileSystem)40 Configuration (org.apache.hadoop.conf.Configuration)38 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)35 TreeMap (java.util.TreeMap)26 HashSet (java.util.HashSet)23 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)22