use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.
the class TestReplicationSource method testReplicationSourceInitializingMetric.
@Test
public void testReplicationSourceInitializingMetric() throws IOException {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean("replication.source.regionserver.abort", false);
ReplicationSource rs = new ReplicationSource();
RegionServerServices rss = setupForAbortTests(rs, conf, BadReplicationEndpoint.class.getName());
try {
rs.startup();
assertTrue(rs.isSourceActive());
Waiter.waitFor(conf, 1000, () -> rs.getSourceMetrics().getSourceInitializing() == 1);
BadReplicationEndpoint.failing = false;
Waiter.waitFor(conf, 1000, () -> rs.getSourceMetrics().getSourceInitializing() == 0);
} finally {
rs.terminate("Done");
rss.stop("Done");
}
}
use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.
the class TestReplicationSource method testDefaultSkipsMetaWAL.
/**
* Test the default ReplicationSource skips queuing hbase:meta WAL files.
*/
@Test
public void testDefaultSkipsMetaWAL() throws IOException {
ReplicationSource rs = new ReplicationSource();
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setInt("replication.source.maxretriesmultiplier", 1);
ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class);
Mockito.when(mockPeer.getConfiguration()).thenReturn(conf);
Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class);
Mockito.when(peerConfig.getReplicationEndpointImpl()).thenReturn(DoNothingReplicationEndpoint.class.getName());
Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig);
ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
Mockito.when(manager.getGlobalMetrics()).thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
String queueId = "qid";
RegionServerServices rss = TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, p -> OptionalLong.empty(), new MetricsSource(queueId));
try {
rs.startup();
assertTrue(rs.isSourceActive());
assertEquals(0, rs.getSourceMetrics().getSizeOfLogQueue());
rs.enqueueLog(new Path("a.1" + META_WAL_PROVIDER_ID));
assertEquals(0, rs.getSourceMetrics().getSizeOfLogQueue());
rs.enqueueLog(new Path("a.1"));
assertEquals(1, rs.getSourceMetrics().getSizeOfLogQueue());
} finally {
rs.terminate("Done");
rss.stop("Done");
}
}
use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.
the class TestRegionServerSpaceQuotaManager method testExceptionOnPolicyEnforcementEnable.
@Test
public void testExceptionOnPolicyEnforcementEnable() throws Exception {
final TableName tableName = TableName.valueOf("foo");
final SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 2048L);
RegionServerServices rss = mock(RegionServerServices.class);
SpaceViolationPolicyEnforcementFactory factory = mock(SpaceViolationPolicyEnforcementFactory.class);
SpaceViolationPolicyEnforcement enforcement = mock(SpaceViolationPolicyEnforcement.class);
RegionServerSpaceQuotaManager realManager = new RegionServerSpaceQuotaManager(rss, factory);
when(factory.create(rss, tableName, snapshot)).thenReturn(enforcement);
doThrow(new IOException("Failed for test!")).when(enforcement).enable();
realManager.enforceViolationPolicy(tableName, snapshot);
Map<TableName, SpaceViolationPolicyEnforcement> enforcements = realManager.copyActiveEnforcements();
assertTrue("Expected active enforcements to be empty, but were " + enforcements, enforcements.isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.
the class TestAsyncFSWAL method testBrokenWriter.
@Test
public void testBrokenWriter() throws Exception {
RegionServerServices services = mock(RegionServerServices.class);
when(services.getConfiguration()).thenReturn(CONF);
TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("table")).setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
RegionInfo ri = RegionInfoBuilder.newBuilder(td.getTableName()).build();
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : td.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
long timestamp = EnvironmentEdgeManager.currentTime();
String testName = currentTest.getMethodName();
AtomicInteger failedCount = new AtomicInteger(0);
try (LogRoller roller = new LogRoller(services);
AsyncFSWAL wal = new AsyncFSWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), testName, CONF, null, true, null, null, GROUP, CHANNEL_CLASS) {
@Override
protected AsyncWriter createWriterInstance(Path path) throws IOException {
AsyncWriter writer = super.createWriterInstance(path);
return new AsyncWriter() {
@Override
public void close() throws IOException {
writer.close();
}
@Override
public long getLength() {
return writer.getLength();
}
@Override
public long getSyncedLength() {
return writer.getSyncedLength();
}
@Override
public CompletableFuture<Long> sync(boolean forceSync) {
CompletableFuture<Long> result = writer.sync(forceSync);
if (failedCount.incrementAndGet() < 1000) {
CompletableFuture<Long> future = new CompletableFuture<>();
FutureUtils.addListener(result, (r, e) -> future.completeExceptionally(new IOException("Inject Error")));
return future;
} else {
return result;
}
}
@Override
public void append(Entry entry) {
writer.append(entry);
}
};
}
}) {
wal.init();
roller.addWAL(wal);
roller.start();
int numThreads = 10;
AtomicReference<Exception> error = new AtomicReference<>();
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < 10; i++) {
final int index = i;
threads[index] = new Thread("Write-Thread-" + index) {
@Override
public void run() {
byte[] row = Bytes.toBytes("row" + index);
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, row, row, timestamp + index, row));
WALKeyImpl key = new WALKeyImpl(ri.getEncodedNameAsBytes(), td.getTableName(), SequenceId.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes);
try {
wal.append(ri, key, cols, true);
} catch (IOException e) {
// should not happen
throw new UncheckedIOException(e);
}
try {
wal.sync();
} catch (IOException e) {
error.set(e);
}
}
};
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
assertNull(error.get());
}
}
use of org.apache.hadoop.hbase.regionserver.RegionServerServices in project hbase by apache.
the class AbstractTestFSWAL method createHoldingHRegion.
private HRegion createHoldingHRegion(Configuration conf, TableDescriptor htd, WAL wal) throws IOException {
RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
TEST_UTIL.createLocalHRegion(hri, CONF, htd, wal).close();
RegionServerServices rsServices = mock(RegionServerServices.class);
when(rsServices.getServerName()).thenReturn(ServerName.valueOf("localhost:12345", 123456));
when(rsServices.getConfiguration()).thenReturn(conf);
return HRegion.openHRegion(TEST_UTIL.getDataTestDir(), hri, htd, wal, conf, rsServices, null);
}
Aggregations