Search in sources :

Example 36 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsAfterAbortingFlush.

/**
   * Test that we could recover the data correctly after aborting flush. In the
   * test, first we abort flush after writing some data, then writing more data
   * and flush again, at last verify the data.
   * @throws IOException
   */
@Test
public void testReplayEditsAfterAbortingFlush() throws IOException {
    final TableName tableName = TableName.valueOf("testReplayEditsAfterAbortingFlush");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families. Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
    Mockito.doReturn(false).when(rsServices).isAborted();
    when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
    Configuration customConf = new Configuration(this.conf);
    customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName());
    HRegion region = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
    int writtenRowCount = 10;
    List<HColumnDescriptor> families = new ArrayList<>(htd.getFamilies());
    for (int i = 0; i < writtenRowCount; i++) {
        Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    // Now assert edits made it in.
    RegionScanner scanner = region.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
    // Let us flush the region
    CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
    try {
        region.flush(true);
        fail("Injected exception hasn't been thrown");
    } catch (Throwable t) {
        LOG.info("Expected simulated exception when flushing region," + t.getMessage());
        // simulated to abort server
        Mockito.doReturn(true).when(rsServices).isAborted();
        // region normally does not accept writes after
        region.setClosing(false);
    // DroppedSnapshotException. We mock around it for this test.
    }
    // writing more data
    int moreRow = 10;
    for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
        Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    writtenRowCount += moreRow;
    // call flush again
    CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
    try {
        region.flush(true);
    } catch (IOException t) {
        LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage());
    }
    region.close(true);
    wal.shutdown();
    // Let us try to split and recover
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    Mockito.doReturn(false).when(rsServices).isAborted();
    HRegion region2 = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
    scanner = region2.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 37 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestReplicationEndpoint method testInterClusterReplication.

@Test(timeout = 120000)
public void testInterClusterReplication() throws Exception {
    final String id = "testInterClusterReplication";
    List<HRegion> regions = utility1.getHBaseCluster().getRegions(tableName);
    int totEdits = 0;
    // before shipping edits.
    for (HRegion region : regions) {
        HRegionInfo hri = region.getRegionInfo();
        byte[] row = hri.getStartKey();
        for (int i = 0; i < 100; i++) {
            if (row.length > 0) {
                Put put = new Put(row);
                put.addColumn(famName, row, row);
                region.put(put);
                totEdits++;
            }
        }
    }
    admin.addPeer(id, new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf2)).setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()), null);
    final int numEdits = totEdits;
    Waiter.waitFor(conf1, 30000, new Waiter.ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits;
        }

        @Override
        public String explainFailure() throws Exception {
            String failure = "Failed to replicate all edits, expected = " + numEdits + " replicated = " + InterClusterReplicationEndpointForTest.replicateCount.get();
            return failure;
        }
    });
    admin.removePeer("testInterClusterReplication");
    utility1.deleteTableData(tableName);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Waiter(org.apache.hadoop.hbase.Waiter) Put(org.apache.hadoop.hbase.client.Put) IOException(java.io.IOException) Test(org.junit.Test)

Example 38 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestMasterReplication method rollWALAndWait.

private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table, final byte[] row) throws IOException {
    final Admin admin = utility.getAdmin();
    final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();
    // find the region that corresponds to the given row.
    HRegion region = null;
    for (HRegion candidate : cluster.getRegions(table)) {
        if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
            region = candidate;
            break;
        }
    }
    assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);
    final CountDownLatch latch = new CountDownLatch(1);
    // listen for successful log rolls
    final WALActionsListener listener = new WALActionsListener.Base() {

        @Override
        public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
            latch.countDown();
        }
    };
    region.getWAL().registerWALActionsListener(listener);
    // request a roll
    admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(), region.getRegionInfo().getRegionName()));
    // wait
    try {
        latch.await();
    } catch (InterruptedException exception) {
        LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + "replication tests fail, it's probably because we should still be waiting.");
        Thread.currentThread().interrupt();
    }
    region.getWAL().unregisterWALActionsListener(listener);
}
Also used : Path(org.apache.hadoop.fs.Path) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) WALActionsListener(org.apache.hadoop.hbase.regionserver.wal.WALActionsListener) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 39 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestMultiSlaveReplication method rollWALAndWait.

private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table, final byte[] row) throws IOException {
    final Admin admin = utility.getAdmin();
    final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();
    // find the region that corresponds to the given row.
    HRegion region = null;
    for (HRegion candidate : cluster.getRegions(table)) {
        if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
            region = candidate;
            break;
        }
    }
    assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);
    final CountDownLatch latch = new CountDownLatch(1);
    // listen for successful log rolls
    final WALActionsListener listener = new WALActionsListener.Base() {

        @Override
        public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
            latch.countDown();
        }
    };
    region.getWAL().registerWALActionsListener(listener);
    // request a roll
    admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(), region.getRegionInfo().getRegionName()));
    // wait
    try {
        latch.await();
    } catch (InterruptedException exception) {
        LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + "replication tests fail, it's probably because we should still be waiting.");
        Thread.currentThread().interrupt();
    }
    region.getWAL().unregisterWALActionsListener(listener);
}
Also used : Path(org.apache.hadoop.fs.Path) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) WALActionsListener(org.apache.hadoop.hbase.regionserver.wal.WALActionsListener) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 40 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.

the class TestDurability method testIncrement.

@Test
public void testIncrement() throws Exception {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] col1 = Bytes.toBytes("col1");
    byte[] col2 = Bytes.toBytes("col2");
    byte[] col3 = Bytes.toBytes("col3");
    // Setting up region
    final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
    byte[] tableName = Bytes.toBytes("TestIncrement");
    final WAL wal = wals.getWAL(tableName, null);
    HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
    // col1: amount = 0, 1 write back to WAL
    Increment inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    Result res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 1);
    // col1: amount = 1, 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 1);
    res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 2);
    // col1: amount = 0, 0 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 2);
    // col1: amount = 0, col2: amount = 0, col3: amount = 0
    // 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    inc1.addColumn(FAMILY, col2, 0);
    inc1.addColumn(FAMILY, col3, 0);
    res = region.increment(inc1);
    assertEquals(3, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
    verifyWALCount(wals, wal, 3);
    // col1: amount = 5, col2: amount = 4, col3: amount = 3
    // 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 5);
    inc1.addColumn(FAMILY, col2, 4);
    inc1.addColumn(FAMILY, col3, 3);
    res = region.increment(inc1);
    assertEquals(3, res.size());
    assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
    assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
    assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
    verifyWALCount(wals, wal, 4);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) WAL(org.apache.hadoop.hbase.wal.WAL) Increment(org.apache.hadoop.hbase.client.Increment) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19