use of org.apache.hadoop.hbase.regionserver.regionreplication.RegionReplicationBufferManager in project hbase by apache.
the class HRegionServer method preRegistrationInitialization.
/**
* All initialization needed before we go register with Master.<br>
* Do bare minimum. Do bulk of initializations AFTER we've connected to the Master.<br>
* In here we just put up the RpcServer, setup Connection, and ZooKeeper.
*/
private void preRegistrationInitialization() {
try {
initializeZooKeeper();
setupClusterConnection();
bootstrapNodeManager = new BootstrapNodeManager(asyncClusterConnection, masterAddressTracker);
regionReplicationBufferManager = new RegionReplicationBufferManager(this);
// Setup RPC client for master communication
this.rpcClient = asyncClusterConnection.getRpcClient();
} catch (Throwable t) {
// Call stop if error or process will stick around for ever since server
// puts up non-daemon threads.
this.rpcServices.stop();
abort("Initialization of RS failed. Hence aborting RS.", t);
}
}
use of org.apache.hadoop.hbase.regionserver.regionreplication.RegionReplicationBufferManager in project hbase by apache.
the class TestReplicateToReplica method setUp.
@Before
public void setUp() throws IOException {
TO_ADD_AFTER_PREPARE_FLUSH = new ArrayList<>();
tableName = name.getTableName();
testDir = UTIL.getDataTestDir(tableName.getNameAsString());
Configuration conf = UTIL.getConfiguration();
conf.set(HConstants.HBASE_DIR, testDir.toString());
td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2).setRegionMemStoreReplication(true).build();
reqAndResps = new ArrayDeque<>();
queueReqAndResps = true;
conn = mock(AsyncClusterConnection.class);
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).thenAnswer(i -> {
if (queueReqAndResps) {
@SuppressWarnings("unchecked") List<WAL.Entry> entries = i.getArgument(1, List.class);
CompletableFuture<Void> future = new CompletableFuture<>();
reqAndResps.add(Pair.newPair(entries, future));
return future;
} else {
return CompletableFuture.completedFuture(null);
}
});
flushRequester = mock(FlushRequester.class);
rss = mock(RegionServerServices.class);
when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
when(rss.getConfiguration()).thenReturn(conf);
when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(conf));
when(rss.getExecutorService()).thenReturn(EXEC);
when(rss.getAsyncClusterConnection()).thenReturn(conn);
when(rss.getFlushRequester()).thenReturn(flushRequester);
manager = new RegionReplicationBufferManager(rss);
when(rss.getRegionReplicationBufferManager()).thenReturn(manager);
RegionInfo primaryHri = RegionInfoBuilder.newBuilder(td.getTableName()).build();
RegionInfo secondaryHri = RegionReplicaUtil.getRegionInfoForReplica(primaryHri, 1);
walFactory = new WALFactory(conf, UUID.randomUUID().toString());
WAL wal = walFactory.getWAL(primaryHri);
primary = HRegion.createHRegion(primaryHri, testDir, conf, td, wal);
primary.close();
primary = HRegion.openHRegion(testDir, primaryHri, td, wal, conf, rss, null);
secondary = HRegion.openHRegion(secondaryHri, td, null, conf, rss, null);
when(rss.getRegions()).then(i -> {
return Arrays.asList(primary, secondary);
});
// process the open events
replicateAll();
}
Aggregations