use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRecoveredEditsReplayAndAbort method test.
@Test
public void test() throws Exception {
// set flush size to 10MB
CONF.setInt("hbase.hregion.memstore.flush.size", 1024 * 1024 * 10);
// set the report interval to a very small value
CONF.setInt("hbase.hstore.report.interval.edits", 1);
CONF.setInt("hbase.hstore.report.period", 0);
// mock a RegionServerServices
final RegionServerAccounting rsAccounting = new RegionServerAccounting(CONF);
RegionServerServices rs = Mockito.mock(RegionServerServices.class);
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
Mockito.when(rs.getRegionServerAccounting()).thenReturn(rsAccounting);
Mockito.when(rs.isAborted()).thenReturn(false);
Mockito.when(rs.getNonceManager()).thenReturn(null);
Mockito.when(rs.getServerName()).thenReturn(ServerName.valueOf("test", 0, 111));
Mockito.when(rs.getConfiguration()).thenReturn(CONF);
// create a region
TableName testTable = TableName.valueOf("testRecoveredEidtsReplayAndAbort");
TableDescriptor htd = TableDescriptorBuilder.newBuilder(testTable).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).build()).build();
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
Path logDir = TEST_UTIL.getDataTestDirOnTestFS("TestRecoveredEidtsReplayAndAbort.log");
final WAL wal = HBaseTestingUtil.createWal(CONF, logDir, info);
Path rootDir = TEST_UTIL.getDataTestDir();
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
HRegionFileSystem.createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info);
region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs);
// create some recovered.edits
final WALFactory wals = new WALFactory(CONF, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1200;
long minSeqId = 1000;
long totalEdits = maxSeqId - minSeqId;
for (long i = minSeqId; i <= maxSeqId; i += 100) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
LOG.info("Begin to write recovered.edits : " + recoveredEdits);
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
for (long j = i; j < i + 100; j++) {
long time = System.nanoTime();
WALEdit edit = new WALEdit();
// 200KB kv
byte[] value = new byte[200 * 1024];
random.nextBytes(value);
edit.add(new KeyValue(row, fam1, Bytes.toBytes(j), time, KeyValue.Type.Put, value));
writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, j, time, HConstants.DEFAULT_CLUSTER_ID), edit));
}
writer.close();
}
TaskMonitor.get().createStatus(method);
// try to replay the edits
try {
region.initialize(new CancelableProgressable() {
private long replayedEdits = 0;
@Override
public boolean progress() {
replayedEdits++;
// during replay, rsAccounting should align with global memstore, because
// there is only one memstore here
Assert.assertEquals(rsAccounting.getGlobalMemStoreDataSize(), region.getMemStoreDataSize());
Assert.assertEquals(rsAccounting.getGlobalMemStoreHeapSize(), region.getMemStoreHeapSize());
Assert.assertEquals(rsAccounting.getGlobalMemStoreOffHeapSize(), region.getMemStoreOffHeapSize());
// abort the replay before finishing, leaving some edits in the memory
return replayedEdits < totalEdits - 10;
}
});
Assert.fail("Should not reach here");
} catch (IOException t) {
LOG.info("Current memstore: " + region.getMemStoreDataSize() + ", " + region.getMemStoreHeapSize() + ", " + region.getMemStoreOffHeapSize());
}
// After aborting replay, there should be no data in the memory
Assert.assertEquals(0, rsAccounting.getGlobalMemStoreDataSize());
Assert.assertEquals(0, region.getMemStoreDataSize());
// All the chunk in the MSLAB should be recycled, otherwise, there might be
// a memory leak.
Assert.assertEquals(0, ChunkCreator.getInstance().numberOfMappedChunks());
} finally {
HBaseTestingUtil.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class SyncReplicationTestBase method verifyReplicationRequestRejection.
protected final void verifyReplicationRequestRejection(HBaseTestingUtil utility, boolean expectedRejection) throws Exception {
HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
AsyncClusterConnection connection = regionServer.getAsyncClusterConnection();
Entry[] entries = new Entry[10];
for (int i = 0; i < entries.length; i++) {
entries[i] = new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
}
if (!expectedRejection) {
ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
} else {
try {
ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
fail("Should throw IOException when sync-replication state is in A or DA");
} catch (RemoteException e) {
assertRejection(e.unwrapRemoteException());
} catch (DoNotRetryIOException e) {
assertRejection(e);
}
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestReplicationSmallTests method testReplicationInReplay.
/**
* Test for HBase-15259 WALEdits under replay will also be replicated
*/
@Test
public void testReplicationInReplay() throws Exception {
final TableName tableName = htable1.getName();
HRegion region = UTIL1.getMiniHBaseCluster().getRegions(tableName).get(0);
RegionInfo hri = region.getRegionInfo();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htable1.getDescriptor().getColumnFamilyNames()) {
scopes.put(fam, 1);
}
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
int index = UTIL1.getMiniHBaseCluster().getServerWith(hri.getRegionName());
WAL wal = UTIL1.getMiniHBaseCluster().getRegionServer(index).getWAL(region.getRegionInfo());
final byte[] rowName = Bytes.toBytes("testReplicationInReplay");
final byte[] qualifier = Bytes.toBytes("q");
final byte[] value = Bytes.toBytes("v");
WALEdit edit = new WALEdit(true);
long now = EnvironmentEdgeManager.currentTime();
edit.add(new KeyValue(rowName, famName, qualifier, now, value));
WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes);
wal.appendData(hri, walKey, edit);
wal.sync();
Get get = new Get(rowName);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
break;
}
Result res = htable2.get(get);
if (res.size() >= 1) {
fail("Not supposed to be replicated for " + Bytes.toString(res.getRow()));
} else {
LOG.info("Row not replicated, let's wait a bit more...");
Thread.sleep(SLEEP_TIME);
}
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class HRegion method doWALAppend.
/**
* @return writeEntry associated with this append
*/
private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, List<UUID> clusterIds, long now, long nonceGroup, long nonce, long origLogSeqNum) throws IOException {
Preconditions.checkArgument(walEdit != null && !walEdit.isEmpty(), "WALEdit is null or empty!");
Preconditions.checkArgument(!walEdit.isReplay() || origLogSeqNum != SequenceId.NO_SEQUENCE_ID, "Invalid replay sequence Id for replay WALEdit!");
// Using default cluster id, as this can only happen in the originating cluster.
// A slave cluster receives the final value (not the delta) as a Put. We use HLogKey
// here instead of WALKeyImpl directly to support legacy coprocessors.
WALKeyImpl walKey = walEdit.isReplay() ? new WALKeyImpl(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), SequenceId.NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc) : new WALKeyImpl(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), SequenceId.NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, this.getReplicationScope());
if (walEdit.isReplay()) {
walKey.setOrigLogSeqNum(origLogSeqNum);
}
// system lifecycle events like flushes or compactions
if (this.coprocessorHost != null && !walEdit.isMetaEdit()) {
this.coprocessorHost.preWALAppend(walKey, walEdit);
}
ServerCall<?> rpcCall = RpcServer.getCurrentServerCallWithCellScanner().orElse(null);
try {
long txid = this.wal.appendData(this.getRegionInfo(), walKey, walEdit);
WriteEntry writeEntry = walKey.getWriteEntry();
regionReplicationSink.ifPresent(sink -> writeEntry.attachCompletionAction(() -> {
sink.add(walKey, walEdit, rpcCall);
}));
// Call sync on our edit.
if (txid != 0) {
sync(txid, durability);
}
return writeEntry;
} catch (IOException ioe) {
if (walKey.getWriteEntry() != null) {
mvcc.complete(walKey.getWriteEntry());
}
throw ioe;
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionObserverInterface method testPreWALAppend.
@Test
public void testPreWALAppend() throws Exception {
SimpleRegionObserver sro = new SimpleRegionObserver();
ObserverContext ctx = Mockito.mock(ObserverContext.class);
WALKey key = new WALKeyImpl(Bytes.toBytes("region"), TEST_TABLE, EnvironmentEdgeManager.currentTime());
WALEdit edit = new WALEdit();
sro.preWALAppend(ctx, key, edit);
Assert.assertEquals(1, key.getExtendedAttributes().size());
Assert.assertArrayEquals(SimpleRegionObserver.WAL_EXTENDED_ATTRIBUTE_BYTES, key.getExtendedAttribute(Integer.toString(sro.getCtPreWALAppend())));
}
Aggregations