Search in sources :

Example 46 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBaseTestBase method forceRegionFlush.

/**
   * Force and block on a flush to occur on all regions of table {@code tableName}.
   * @param tableName The table whose regions should be flushed.
   */
public void forceRegionFlush(byte[] tableName) throws IOException {
    MiniHBaseCluster hbaseCluster = getHBaseCluster();
    if (hbaseCluster != null) {
        TableName qualifiedTableName = TableName.valueOf(tableName);
        for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
            List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(qualifiedTableName);
            List<Runnable> flushers = new ArrayList<>();
            for (HRegion region : serverRegions) {
                flushers.add(createFlushRegion(region));
            }
            parallelRun(flushers);
            LOG.info("RegionServer {}: Flushed {} regions for table {}", t.getRegionServer().getServerName().toString(), serverRegions.size(), Bytes.toStringBinary(tableName));
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) ArrayList(java.util.ArrayList) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster)

Example 47 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBaseTestBase method forceRegionCompact.

/**
   * Force and block on a compaction on all regions of table {@code tableName}.
   * @param tableName The table whose regions should be compacted.
   * @param majorCompact Whether a major compaction should be requested.
   */
public void forceRegionCompact(byte[] tableName, boolean majorCompact) throws IOException {
    MiniHBaseCluster hbaseCluster = getHBaseCluster();
    if (hbaseCluster != null) {
        TableName qualifiedTableName = TableName.valueOf(tableName);
        for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
            List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(qualifiedTableName);
            List<Runnable> compacters = new ArrayList<>();
            for (HRegion region : serverRegions) {
                compacters.add(createCompactRegion(region, majorCompact));
            }
            parallelRun(compacters);
            LOG.info("RegionServer {}: Compacted {} regions for table {}", t.getRegionServer().getServerName().toString(), serverRegions.size(), Bytes.toStringBinary(tableName));
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) ArrayList(java.util.ArrayList) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster)

Example 48 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testWithBatchLimit.

@Test
public void testWithBatchLimit() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testWithBatchLimit");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c2");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long now = System.currentTimeMillis();
        // put a non increment columns
        Put p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c1"), Bytes.toBytes("value1"));
        region.put(p);
        // now put some increment deltas in a column
        p = new Put(Bytes.toBytes("r4"));
        for (int i = 0; i < 3; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        // put some non - increment columns
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c3"), Bytes.toBytes("value3"));
        region.put(p);
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c4"), Bytes.toBytes("value4"));
        region.put(p);
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c5"), Bytes.toBytes("value5"));
        region.put(p);
        // this put will appear as a "total" sum prior to all the delta puts
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
        region.put(p);
        Scan scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        RegionScanner scanner = new IncrementSummingScanner(region, 3, region.getScanner(scan), ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(3, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals("value1", Bytes.toString(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        cell = results.get(2);
        assertNotNull(cell);
        assertEquals("value3", Bytes.toString(cell.getValue()));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase12CDH570Test(co.cask.cdap.data.hbase.HBase12CDH570Test) Test(org.junit.Test)

Example 49 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class MessageTableRegionObserver method postFlush.

@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
    // Record whether the region is empty after a flush
    HRegion region = e.getEnvironment().getRegion();
    // After a flush, if the memstore size is zero and there are no store files for any stores in the region
    // then the region must be empty
    long numStoreFiles = numStoreFilesForRegion(e);
    long memstoreSize = region.getMemstoreSize().get();
    LOG.debug(String.format("Region %s: memstore size = %s, num store files = %s", region.getRegionInfo().getRegionNameAsString(), memstoreSize, numStoreFiles));
    if (memstoreSize == 0 && numStoreFiles == 0) {
        if (compactionState != null) {
            compactionState.persistRegionEmpty(System.currentTimeMillis());
        }
    }
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion)

Example 50 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBase10CDHTest method forEachRegion.

@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
    MiniHBaseCluster hbaseCluster = getHBaseCluster();
    Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    // make sure consumer config cache is updated
    for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
        List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
        for (HRegion region : serverRegions) {
            results.put(region.getRegionName(), function.apply(region));
        }
    }
    return results;
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) TreeMap(java.util.TreeMap)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19