Search in sources :

Example 6 with RegionServerServices

use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.

the class ReplicationObserver method preCommitStoreFile.

@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "NPE should never happen; if it does it is a bigger issue")
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx, final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
    RegionCoprocessorEnvironment env = ctx.getEnvironment();
    Configuration c = env.getConfiguration();
    if (pairs == null || pairs.isEmpty() || !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) {
        LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded " + "data replication.");
        return;
    }
    // This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is
    // just going to break. This is all private. Not allowed. Regions shouldn't assume they are
    // hosted in a RegionServer. TODO: fix.
    RegionServerServices rss = ((HasRegionServerServices) env).getRegionServerServices();
    Replication rep = (Replication) ((HRegionServer) rss).getReplicationSourceService();
    rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs);
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) HasRegionServerServices(org.apache.hadoop.hbase.coprocessor.HasRegionServerServices) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Configuration(org.apache.hadoop.conf.Configuration) HasRegionServerServices(org.apache.hadoop.hbase.coprocessor.HasRegionServerServices)

Example 7 with RegionServerServices

use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsAfterAbortingFlush.

/**
 * Test that we could recover the data correctly after aborting flush. In the test, first we abort
 * flush after writing some data, then writing more data and flush again, at last verify the data.
 */
@Test
public void testReplayEditsAfterAbortingFlush() throws IOException {
    final TableName tableName = TableName.valueOf("testReplayEditsAfterAbortingFlush");
    final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final TableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtil.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families. Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
    Mockito.doReturn(false).when(rsServices).isAborted();
    when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
    when(rsServices.getConfiguration()).thenReturn(conf);
    Configuration customConf = new Configuration(this.conf);
    customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName());
    HRegion region = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
    int writtenRowCount = 10;
    List<ColumnFamilyDescriptor> families = Arrays.asList((htd.getColumnFamilies()));
    for (int i = 0; i < writtenRowCount; i++) {
        Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    // Now assert edits made it in.
    RegionScanner scanner = region.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
    // Let us flush the region
    CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
    try {
        region.flush(true);
        fail("Injected exception hasn't been thrown");
    } catch (IOException e) {
        LOG.info("Expected simulated exception when flushing region, {}", e.getMessage());
        // simulated to abort server
        Mockito.doReturn(true).when(rsServices).isAborted();
        // region normally does not accept writes after
        region.setClosing(false);
    // DroppedSnapshotException. We mock around it for this test.
    }
    // writing more data
    int moreRow = 10;
    for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
        Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    writtenRowCount += moreRow;
    // call flush again
    CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
    try {
        region.flush(true);
    } catch (IOException t) {
        LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage());
    }
    region.close(true);
    wal.shutdown();
    // Let us try to split and recover
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    Mockito.doReturn(false).when(rsServices).isAborted();
    HRegion region2 = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
    scanner = region2.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 8 with RegionServerServices

use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.

the class AccessController method start.

/* ---- MasterObserver implementation ---- */
@Override
public void start(CoprocessorEnvironment env) throws IOException {
    CompoundConfiguration conf = new CompoundConfiguration();
    conf.add(env.getConfiguration());
    authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
    if (!authorizationEnabled) {
        LOG.warn("AccessController has been loaded with authorization checks DISABLED!");
    }
    shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
    cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
    if (!cellFeaturesEnabled) {
        LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY + " accordingly.");
    }
    if (env instanceof MasterCoprocessorEnvironment) {
        // if running on HMaster
        MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
        if (mEnv instanceof HasMasterServices) {
            MasterServices masterServices = ((HasMasterServices) mEnv).getMasterServices();
            zkPermissionWatcher = masterServices.getZKPermissionWatcher();
            accessChecker = masterServices.getAccessChecker();
        }
    } else if (env instanceof RegionServerCoprocessorEnvironment) {
        RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
        if (rsEnv instanceof HasRegionServerServices) {
            RegionServerServices rsServices = ((HasRegionServerServices) rsEnv).getRegionServerServices();
            zkPermissionWatcher = rsServices.getZKPermissionWatcher();
            accessChecker = rsServices.getAccessChecker();
        }
    } else if (env instanceof RegionCoprocessorEnvironment) {
        // if running at region
        regionEnv = (RegionCoprocessorEnvironment) env;
        conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
        compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
        if (regionEnv instanceof HasRegionServerServices) {
            RegionServerServices rsServices = ((HasRegionServerServices) regionEnv).getRegionServerServices();
            zkPermissionWatcher = rsServices.getZKPermissionWatcher();
            accessChecker = rsServices.getAccessChecker();
        }
    }
    Preconditions.checkState(zkPermissionWatcher != null, "ZKPermissionWatcher is null");
    Preconditions.checkState(accessChecker != null, "AccessChecker is null");
    // set the user-provider.
    this.userProvider = UserProvider.instantiate(env.getConfiguration());
    tableAcls = new MapMaker().weakValues().makeMap();
}
Also used : HasMasterServices(org.apache.hadoop.hbase.coprocessor.HasMasterServices) RegionServerCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) HasRegionServerServices(org.apache.hadoop.hbase.coprocessor.HasRegionServerServices) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) HasRegionServerServices(org.apache.hadoop.hbase.coprocessor.HasRegionServerServices) MapMaker(org.apache.hbase.thirdparty.com.google.common.collect.MapMaker) CompoundConfiguration(org.apache.hadoop.hbase.CompoundConfiguration) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment) MasterServices(org.apache.hadoop.hbase.master.MasterServices) HasMasterServices(org.apache.hadoop.hbase.coprocessor.HasMasterServices)

Example 9 with RegionServerServices

use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.

the class TestRegionObserverPreFlushAndPreCompact method getRegionCoprocessorHost.

private RegionCoprocessorHost getRegionCoprocessorHost() {
    // Make up an HRegion instance. Use the hbase:meta first region as our RegionInfo. Use
    // hbase:meta table name for building the TableDescriptor our mock returns when asked schema
    // down inside RegionCoprocessorHost. Pass in mocked RegionServerServices too.
    RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
    HRegion mockedHRegion = Mockito.mock(HRegion.class);
    Mockito.when(mockedHRegion.getRegionInfo()).thenReturn(ri);
    TableDescriptor td = TableDescriptorBuilder.newBuilder(ri.getTable()).build();
    Mockito.when(mockedHRegion.getTableDescriptor()).thenReturn(td);
    RegionServerServices mockedServices = Mockito.mock(RegionServerServices.class);
    Configuration conf = HBaseConfiguration.create();
    // Load our test coprocessor defined above.
    conf.set(REGION_COPROCESSOR_CONF_KEY, TestRegionObserver.class.getName());
    return new RegionCoprocessorHost(mockedHRegion, mockedServices, conf);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 10 with RegionServerServices

use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.

the class TestCoprocessorConfiguration method testRegionServerCoprocessorHostDefaults.

@Test
public void testRegionServerCoprocessorHostDefaults() throws Exception {
    Configuration conf = new Configuration(CONF);
    RegionServerServices rsServices = mock(RegionServerServices.class);
    systemCoprocessorLoaded.set(false);
    new RegionServerCoprocessorHost(rsServices, conf);
    assertEquals("System coprocessors loading default was not honored", CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get());
}
Also used : RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Test(org.junit.Test)

Aggregations

RegionServerServices (org.apache.hadoop.hbase.regionserver.RegionServerServices)26 Configuration (org.apache.hadoop.conf.Configuration)18 Test (org.junit.Test)17 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)15 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)7 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)5 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)5 RegionCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)4 ReplicationPeer (org.apache.hadoop.hbase.replication.ReplicationPeer)4 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)4 TableName (org.apache.hadoop.hbase.TableName)3 HasRegionServerServices (org.apache.hadoop.hbase.coprocessor.HasRegionServerServices)3 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)2 Put (org.apache.hadoop.hbase.client.Put)2 Scan (org.apache.hadoop.hbase.client.Scan)2 SpaceQuotaStatus (org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus)2