Search in sources :

Example 71 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestNamespaceAuditor method testRegionMerge.

@Test
public void testRegionMerge() throws Exception {
    String nsp1 = prefix + "_regiontest";
    final int initialRegions = 3;
    NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
    ADMIN.createNamespace(nspDesc);
    final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
    byte[] columnFamily = Bytes.toBytes("info");
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableTwo).setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build();
    ADMIN.createTable(tableDescriptor, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions);
    Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    try (Table table = connection.getTable(tableTwo)) {
        UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
    }
    ADMIN.flush(tableTwo);
    List<RegionInfo> hris = ADMIN.getRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris, RegionInfo.COMPARATOR);
    Future<?> f = ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
    f.get(10, TimeUnit.SECONDS);
    hris = ADMIN.getRegions(tableTwo);
    assertEquals(initialRegions - 1, hris.size());
    Collections.sort(hris, RegionInfo.COMPARATOR);
    byte[] splitKey = Bytes.toBytes("3");
    HRegion regionToSplit = UTIL.getMiniHBaseCluster().getRegions(tableTwo).stream().filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get();
    regionToSplit.compact(true);
    // Waiting for compaction to finish
    UTIL.waitFor(30000, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return (CompactionState.NONE == ADMIN.getCompactionStateForRegion(regionToSplit.getRegionInfo().getRegionName()));
        }
    });
    // Cleaning compacted references for split to proceed
    regionToSplit.getStores().stream().forEach(s -> {
        try {
            s.closeAndArchiveCompactedFiles();
        } catch (IOException e1) {
            LOG.error("Error whiling cleaning compacted file");
        }
    });
    // the above compact may quit immediately if there is a compaction ongoing, so here we need to
    // wait a while to let the ongoing compaction finish.
    UTIL.waitFor(10000, regionToSplit::isSplittable);
    ADMIN.splitRegionAsync(regionToSplit.getRegionInfo().getRegionName(), splitKey).get(10, TimeUnit.SECONDS);
    hris = ADMIN.getRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris, RegionInfo.COMPARATOR);
    // Fail region merge through Coprocessor hook
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
    Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class);
    CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
    masterObserver.failMerge(true);
    f = ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
    try {
        f.get(10, TimeUnit.SECONDS);
        fail("Merge was supposed to fail!");
    } catch (ExecutionException ee) {
    // Expected.
    }
    hris = ADMIN.getRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris, RegionInfo.COMPARATOR);
    // verify that we cannot split
    try {
        ADMIN.split(tableTwo, Bytes.toBytes("6"));
        fail();
    } catch (DoNotRetryRegionException e) {
    // Expected
    }
    Thread.sleep(2000);
    assertEquals(initialRegions, ADMIN.getRegions(tableTwo).size());
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) Table(org.apache.hadoop.hbase.client.Table) Connection(org.apache.hadoop.hbase.client.Connection) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) RestoreSnapshotException(org.apache.hadoop.hbase.snapshot.RestoreSnapshotException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) DoNotRetryRegionException(org.apache.hadoop.hbase.client.DoNotRetryRegionException) QuotaExceededException(org.apache.hadoop.hbase.quotas.QuotaExceededException) ExecutionException(java.util.concurrent.ExecutionException) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) RegionCoprocessor(org.apache.hadoop.hbase.coprocessor.RegionCoprocessor) MasterCoprocessor(org.apache.hadoop.hbase.coprocessor.MasterCoprocessor) RegionServerCoprocessor(org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) DoNotRetryRegionException(org.apache.hadoop.hbase.client.DoNotRetryRegionException) Waiter(org.apache.hadoop.hbase.Waiter) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Example 72 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsAfterRegionMovedWithMultiCF.

/**
 * @throws Exception
 */
@Test
public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
    byte[] family1 = Bytes.toBytes("cf1");
    byte[] family2 = Bytes.toBytes("cf2");
    byte[] qualifier = Bytes.toBytes("q");
    byte[] value = Bytes.toBytes("testV");
    byte[][] familys = { family1, family2 };
    TEST_UTIL.createTable(tableName, familys);
    Table htable = TEST_UTIL.getConnection().getTable(tableName);
    Put put = new Put(Bytes.toBytes("r1"));
    put.addColumn(family1, qualifier, value);
    htable.put(put);
    ResultScanner resultScanner = htable.getScanner(new Scan());
    int count = 0;
    while (resultScanner.next() != null) {
        count++;
    }
    resultScanner.close();
    assertEquals(1, count);
    SingleProcessHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster();
    List<HRegion> regions = hbaseCluster.getRegions(tableName);
    assertEquals(1, regions.size());
    // move region to another regionserver
    Region destRegion = regions.get(0);
    int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName());
    assertTrue("Please start more than 1 regionserver", hbaseCluster.getRegionServerThreads().size() > 1);
    int destServerNum = 0;
    while (destServerNum == originServerNum) {
        destServerNum++;
    }
    HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum);
    HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum);
    // move region to destination regionserver
    TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName());
    // delete the row
    Delete del = new Delete(Bytes.toBytes("r1"));
    htable.delete(del);
    resultScanner = htable.getScanner(new Scan());
    count = 0;
    while (resultScanner.next() != null) {
        count++;
    }
    resultScanner.close();
    assertEquals(0, count);
    // flush region and make major compaction
    HRegion region = (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
    region.flush(true);
    // wait to complete major compaction
    for (HStore store : region.getStores()) {
        store.triggerMajorCompaction();
    }
    region.compact(true);
    // move region to origin regionserver
    TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName());
    // abort the origin regionserver
    originServer.abort("testing");
    // see what we get
    Result result = htable.get(new Get(Bytes.toBytes("r1")));
    if (result != null) {
        assertTrue("Row is deleted, but we get" + result.toString(), (result == null) || result.isEmpty());
    }
    resultScanner.close();
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Put(org.apache.hadoop.hbase.client.Put) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Get(org.apache.hadoop.hbase.client.Get) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) HStore(org.apache.hadoop.hbase.regionserver.HStore) Test(org.junit.Test)

Example 73 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestFSErrorsExposed method testFullSystemBubblesFSErrors.

/**
 * Cluster test which starts a region server with a region, then
 * removes the data from HDFS underneath it, and ensures that
 * errors are bubbled to the client.
 */
@Test
public void testFullSystemBubblesFSErrors() throws Exception {
    // We won't have an error if the datanode is not there if we use short circuit
    // it's a known 'feature'.
    Assume.assumeTrue(!util.isReadShortCircuitOn());
    try {
        // Make it fail faster.
        util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
        util.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 90000);
        util.getConfiguration().setInt("hbase.lease.recovery.timeout", 10000);
        util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
        util.startMiniCluster(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        byte[] fam = Bytes.toBytes("fam");
        Admin admin = util.getAdmin();
        TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).setMaxVersions(1).setBlockCacheEnabled(false).build()).build();
        admin.createTable(tableDescriptor);
        // above configuration on it; else we use the old one w/ 10 as default.
        try (Table table = util.getConnection().getTable(tableName)) {
            // Load some data
            util.loadTable(table, fam, false);
            util.flush();
            util.countRows(table);
            // Kill the DFS cluster
            util.getDFSCluster().shutdownDataNodes();
            try {
                util.countRows(table);
                fail("Did not fail to count after removing data");
            } catch (Exception e) {
                LOG.info("Got expected error", e);
                assertTrue(e.getMessage().contains("Could not seek"));
            }
        }
        // Restart data nodes so that HBase can shut down cleanly.
        util.getDFSCluster().restartDataNodes();
    } finally {
        SingleProcessHBaseCluster cluster = util.getMiniHBaseCluster();
        if (cluster != null)
            cluster.killAll();
        util.shutdownMiniCluster();
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) IOException(java.io.IOException) Test(org.junit.Test)

Example 74 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestFIFOCompactionPolicy method getStoreWithName.

private HStore getStoreWithName(TableName tableName) {
    SingleProcessHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
        HRegionServer hrs = rsts.get(i).getRegionServer();
        for (HRegion region : hrs.getRegions(tableName)) {
            return region.getStores().iterator().next();
        }
    }
    return null;
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer)

Example 75 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestCompactionWithThroughputController method getStoreWithName.

private HStore getStoreWithName(TableName tableName) {
    SingleProcessHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
        HRegionServer hrs = rsts.get(i).getRegionServer();
        for (Region region : hrs.getRegions(tableName)) {
            return ((HRegion) region).getStores().iterator().next();
        }
    }
    return null;
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer)

Aggregations

SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)85 Test (org.junit.Test)69 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)31 TableName (org.apache.hadoop.hbase.TableName)26 Admin (org.apache.hadoop.hbase.client.Admin)24 Table (org.apache.hadoop.hbase.client.Table)22 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)22 HMaster (org.apache.hadoop.hbase.master.HMaster)21 ServerName (org.apache.hadoop.hbase.ServerName)18 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)18 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)14 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)13 IOException (java.io.IOException)12 Configuration (org.apache.hadoop.conf.Configuration)12 Put (org.apache.hadoop.hbase.client.Put)12 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)12 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)10 File (java.io.File)9 Path (org.apache.hadoop.fs.Path)9 RegionMoverBuilder (org.apache.hadoop.hbase.util.RegionMover.RegionMoverBuilder)9