Search in sources :

Example 26 with RegionServerServices

use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.

the class TestWALSplitToHFile method testAfterAbortingFlush.

/**
 * Test that we could recover the data correctly after aborting flush. In the
 * test, first we abort flush after writing some data, then writing more data
 * and flush again, at last verify the data.
 */
@Test
public void testAfterAbortingFlush() throws IOException {
    Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
    TableDescriptor td = pair.getFirst();
    RegionInfo ri = pair.getSecond();
    // Write countPerFamily edits into the three families. Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, rootDir, logName);
    RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
    Mockito.doReturn(false).when(rsServices).isAborted();
    when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
    when(rsServices.getConfiguration()).thenReturn(conf);
    Configuration customConf = new Configuration(this.conf);
    customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, AbstractTestWALReplay.CustomStoreFlusher.class.getName());
    HRegion region = HRegion.openHRegion(this.rootDir, ri, td, wal, customConf, rsServices, null);
    int writtenRowCount = 10;
    List<ColumnFamilyDescriptor> families = Arrays.asList(td.getColumnFamilies());
    for (int i = 0; i < writtenRowCount; i++) {
        Put put = new Put(Bytes.toBytes(td.getTableName() + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    // Now assert edits made it in.
    RegionScanner scanner = region.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
    // Let us flush the region
    AbstractTestWALReplay.CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
    try {
        region.flush(true);
        fail("Injected exception hasn't been thrown");
    } catch (IOException e) {
        LOG.info("Expected simulated exception when flushing region, {}", e.getMessage());
        // simulated to abort server
        Mockito.doReturn(true).when(rsServices).isAborted();
        // region normally does not accept writes after
        region.setClosing(false);
    // DroppedSnapshotException. We mock around it for this test.
    }
    // writing more data
    int moreRow = 10;
    for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
        Put put = new Put(Bytes.toBytes(td.getTableName() + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    writtenRowCount += moreRow;
    // call flush again
    AbstractTestWALReplay.CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
    try {
        region.flush(true);
    } catch (IOException t) {
        LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage());
    }
    region.close(true);
    wal.shutdown();
    // Let us try to split and recover
    WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
    WAL wal2 = createWAL(this.conf, rootDir, logName);
    Mockito.doReturn(false).when(rsServices).isAborted();
    HRegion region2 = HRegion.openHRegion(this.rootDir, ri, td, wal2, this.conf, rsServices, null);
    scanner = region2.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
}
Also used : RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Aggregations

RegionServerServices (org.apache.hadoop.hbase.regionserver.RegionServerServices)26 Configuration (org.apache.hadoop.conf.Configuration)18 Test (org.junit.Test)17 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)15 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)7 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)5 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)5 RegionCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)4 ReplicationPeer (org.apache.hadoop.hbase.replication.ReplicationPeer)4 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)4 TableName (org.apache.hadoop.hbase.TableName)3 HasRegionServerServices (org.apache.hadoop.hbase.coprocessor.HasRegionServerServices)3 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)2 Put (org.apache.hadoop.hbase.client.Put)2 Scan (org.apache.hadoop.hbase.client.Scan)2 SpaceQuotaStatus (org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus)2