Search in sources :

Example 16 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestSimpleRegionNormalizerOnCluster method testRegionNormalizationSplitOnCluster.

void testRegionNormalizationSplitOnCluster(boolean limitedByQuota) throws Exception {
    TableName TABLENAME;
    if (limitedByQuota) {
        String nsp = "np2";
        NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
        admin.createNamespace(nspDesc);
        TABLENAME = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + name.getMethodName());
    } else {
        TABLENAME = TableName.valueOf(name.getMethodName());
    }
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();
    try (Table ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) {
        // Need to get sorted list of regions here
        List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
        Collections.sort(generatedRegions, new Comparator<HRegion>() {

            @Override
            public int compare(HRegion o1, HRegion o2) {
                return o1.getRegionInfo().compareTo(o2.getRegionInfo());
            }
        });
        HRegion region = generatedRegions.get(0);
        generateTestData(region, 1);
        region.flush(true);
        region = generatedRegions.get(1);
        generateTestData(region, 1);
        region.flush(true);
        region = generatedRegions.get(2);
        generateTestData(region, 2);
        region.flush(true);
        region = generatedRegions.get(3);
        generateTestData(region, 2);
        region.flush(true);
        region = generatedRegions.get(4);
        generateTestData(region, 5);
        region.flush(true);
    }
    HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
    htd.setNormalizationEnabled(true);
    admin.modifyTable(TABLENAME, htd);
    admin.flush(TABLENAME);
    assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), TABLENAME));
    // Now trigger a split and stop when the split is in progress
    // to let region load to update
    Thread.sleep(5000);
    m.normalizeRegions();
    if (limitedByQuota) {
        long skippedSplitcnt = 0;
        do {
            skippedSplitcnt = m.getRegionNormalizer().getSkippedCount(PlanType.SPLIT);
            Thread.sleep(100);
        } while (skippedSplitcnt == 0L);
        assert (skippedSplitcnt > 0);
    } else {
        while (true) {
            List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
            int cnt = 0;
            for (HRegion region : regions) {
                String regionName = region.getRegionInfo().getRegionNameAsString();
                if (regionName.startsWith("testRegionNormalizationSplitOnCluster,zzzzz")) {
                    cnt++;
                }
            }
            if (cnt >= 2) {
                break;
            }
        }
    }
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HMaster(org.apache.hadoop.hbase.master.HMaster) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor)

Example 17 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestWarmupRegion method testWarmup.

/**
   * Basic client side validation of HBASE-4536
   */
@Test
public void testWarmup() throws Exception {
    int serverid = 0;
    HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0);
    HRegionInfo info = region.getRegionInfo();
    runwarmup();
    for (int i = 0; i < 10; i++) {
        HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverid);
        byte[] destName = Bytes.toBytes(rs.getServerName().toString());
        TEST_UTIL.getMiniHBaseCluster().getMaster().move(info.getEncodedNameAsBytes(), destName);
        serverid = (serverid + 1) % 2;
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegion.warmupHRegion(org.apache.hadoop.hbase.regionserver.HRegion.warmupHRegion) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 18 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestDurability method testIncrementWithReturnResultsSetToFalse.

/*
   * Test when returnResults set to false in increment it should not return the result instead it
   * resturn null.
   */
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] col1 = Bytes.toBytes("col1");
    // Setting up region
    final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", 16010, System.currentTimeMillis()).toString());
    byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
    final WAL wal = wals.getWAL(tableName, null);
    HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
    Increment inc1 = new Increment(row1);
    inc1.setReturnResults(false);
    inc1.addColumn(FAMILY, col1, 1);
    Result res = region.increment(inc1);
    assertTrue(res.isEmpty());
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) WAL(org.apache.hadoop.hbase.wal.WAL) Increment(org.apache.hadoop.hbase.client.Increment) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 19 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class AbstractTestWALReplay method testRegionMadeOfBulkLoadedFilesOnly.

/**
   * Test case of HRegion that is only made out of bulk loaded files.  Assert
   * that we don't 'crash'.
   * @throws IOException
   * @throws IllegalAccessException
   * @throws NoSuchFieldException
   * @throws IllegalArgumentException
   * @throws SecurityException
   */
@Test
public void testRegionMadeOfBulkLoadedFilesOnly() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testRegionMadeOfBulkLoadedFilesOnly");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    Region region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    Region region = HRegion.openHRegion(hri, htd, wal, this.conf);
    byte[] family = htd.getFamilies().iterator().next().getName();
    Path f = new Path(basedir, "hfile");
    HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(""), Bytes.toBytes("z"), 10);
    List<Pair<byte[], String>> hfs = new ArrayList<>(1);
    hfs.add(Pair.newPair(family, f.toString()));
    region.bulkLoadHFiles(hfs, true, null);
    // Add an edit so something in the WAL
    byte[] row = tableName.getName();
    region.put((new Put(row)).addColumn(family, family, family));
    wal.sync();
    final int rowsInsertedCount = 11;
    assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
    // Now 'crash' the region by stealing its wal
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {

        @Override
        public Object run() throws Exception {
            runWALSplit(newConf);
            WAL wal2 = createWAL(newConf, hbaseRootDir, logName);
            HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2);
            long seqid2 = region2.getOpenSeqNum();
            assertTrue(seqid2 > -1);
            assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));
            // I can't close wal1.  Its been appropriated when we split.
            region2.close();
            wal2.close();
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) Put(org.apache.hadoop.hbase.client.Put) IOException(java.io.IOException) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 20 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsWrittenIntoWAL.

/**
   * Create an HRegion with the result of a WAL split and test we only see the
   * good edits
   * @throws Exception
   */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();
    // Add 1k to each family.
    final int countPerFamily = 1000;
    Set<byte[]> familyNames = new HashSet<>();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, mvcc, scopes);
        familyNames.add(hcd.getName());
    }
    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName, familyNames);
    wal.completeCacheFlush(regionName);
    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);
    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);
    // Sync.
    wal.sync();
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            WAL newWal = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {

                    @Override
                    protected FlushResult internalFlushcache(final WAL wal, final long myseqid, final Collection<Store> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker);
                        flushcount.incrementAndGet();
                        return fs;
                    }
                };
                // The seq id this region has opened up with
                long seqid = region.initialize();
                // The mvcc readpoint of from inserting data.
                long writePoint = mvcc.getWritePoint();
                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue((seqid - 1) == writePoint);
                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size());
                region.close();
            } finally {
                newWal.close();
            }
            return null;
        }
    });
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) Store(org.apache.hadoop.hbase.regionserver.Store) CompactingMemStore(org.apache.hadoop.hbase.regionserver.CompactingMemStore) HStore(org.apache.hadoop.hbase.regionserver.HStore) Result(org.apache.hadoop.hbase.client.Result) WALKey(org.apache.hadoop.hbase.wal.WALKey) FileSystem(org.apache.hadoop.fs.FileSystem) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) TreeMap(java.util.TreeMap) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Get(org.apache.hadoop.hbase.client.Get) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19