use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestRegionReplicationSink method testNormal.
@Test
public void testNormal() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
ServerCall<?> rpcCall = mock(ServerCall.class);
WALKeyImpl key = mock(WALKeyImpl.class);
when(key.estimatedSerializedSizeOf()).thenReturn(100L);
WALEdit edit = mock(WALEdit.class);
when(edit.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key, edit, rpcCall);
// should call increase on manager
verify(manager, times(1)).increase(anyLong());
// should have been retained
verify(rpcCall, times(1)).retainByWAL();
assertEquals(1100, sink.pendingSize());
futures.get(0).complete(null);
// should not call decrease yet
verify(manager, never()).decrease(anyLong());
// should not call release yet
verify(rpcCall, never()).releaseByWAL();
assertEquals(1100, sink.pendingSize());
futures.get(1).complete(null);
// should call decrease
verify(manager, times(1)).decrease(anyLong());
// should call release
verify(rpcCall, times(1)).releaseByWAL();
assertEquals(0, sink.pendingSize());
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsWrittenIntoWAL.
/**
* Create an HRegion with the result of a WAL split and test we only see the good edits=
*/
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
final TableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtil.closeRegionAndWAL(region2);
final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
final byte[] rowName = tableName.getName();
final byte[] regionName = hri.getEncodedNameAsBytes();
// Add 1k to each family.
final int countPerFamily = 1000;
Set<byte[]> familyNames = new HashSet<>();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, mvcc, scopes);
familyNames.add(hcd.getName());
}
// Add a cache flush, shouldn't have any effect
wal.startCacheFlush(regionName, familyNames);
wal.completeCacheFlush(regionName, HConstants.NO_SEQNUM);
// Add an edit to another family, should be skipped.
WALEdit edit = new WALEdit();
long now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// Delete the c family to verify deletes make it over.
edit = new WALEdit();
now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// Sync.
wal.sync();
// Make a new conf and a new fs for the splitter to run on so we can take
// over old wal.
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf);
// 100k seems to make for about 4 flushes during HRegion#initialize.
newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
// Make a new wal for new region.
WAL newWal = createWAL(newConf, hbaseRootDir, logName);
final AtomicInteger flushcount = new AtomicInteger(0);
try {
final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
@Override
protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid, final Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException {
LOG.info("InternalFlushCache Invoked");
FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker);
flushcount.incrementAndGet();
return fs;
}
};
// The seq id this region has opened up with
long seqid = region.initialize();
// The mvcc readpoint of from inserting data.
long writePoint = mvcc.getWritePoint();
// We flushed during init.
assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
assertTrue((seqid - 1) == writePoint);
Get get = new Get(rowName);
Result result = region.get(get);
// Make sure we only see the good edits
assertEquals(countPerFamily * (htd.getColumnFamilies().length - 1), result.size());
region.close();
} finally {
newWal.close();
}
return null;
}
});
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class ProtobufLogTestHelper method generateEdit.
private static WAL.Entry generateEdit(int i, RegionInfo hri, TableName tableName, byte[] row, int columnCount, long timestamp, MultiVersionConcurrencyControl mvcc) {
WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID, mvcc);
WALEdit edit = new WALEdit();
int prefix = i;
IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j)).map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add);
return new WAL.Entry(key, edit);
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestWALActionsListener method testActionListener.
/**
* Add a bunch of dummy data and roll the logs every two insert. We
* should end up with 10 rolled files (plus the roll called in
* the constructor). Also test adding a listener while it's running.
*/
@Test
public void testActionListener() throws Exception {
DummyWALActionsListener observer = new DummyWALActionsListener();
final WALFactory wals = new WALFactory(conf, "testActionListener");
wals.getWALProvider().addWALActionsListener(observer);
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(SOME_BYTES)).setStartKey(SOME_BYTES).setEndKey(SOME_BYTES).build();
final WAL wal = wals.getWAL(hri);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < 20; i++) {
byte[] b = Bytes.toBytes(i + "");
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
scopes.put(b, 0);
long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit);
wal.sync(txid);
if (i == 10) {
wal.registerWALActionsListener(laterobserver);
}
if (i % 2 == 0) {
wal.rollWriter();
}
}
wal.close();
assertEquals(11, observer.preLogRollCounter);
assertEquals(11, observer.postLogRollCounter);
assertEquals(5, laterobserver.preLogRollCounter);
assertEquals(5, laterobserver.postLogRollCounter);
assertEquals(1, observer.closedCount);
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class VisibilityReplicationEndpoint method replicate.
@Override
public boolean replicate(ReplicateContext replicateContext) {
if (!delegator.canReplicateToSameCluster()) {
// Only when the replication is inter cluster replication we need to
// convert the visibility tags to
// string based tags. But for intra cluster replication like region
// replicas it is not needed.
List<Entry> entries = replicateContext.getEntries();
List<Tag> visTags = new ArrayList<>();
List<Tag> nonVisTags = new ArrayList<>();
List<Entry> newEntries = new ArrayList<>(entries.size());
for (Entry entry : entries) {
WALEdit newEdit = new WALEdit();
ArrayList<Cell> cells = entry.getEdit().getCells();
for (Cell cell : cells) {
if (cell.getTagsLength() > 0) {
visTags.clear();
nonVisTags.clear();
Byte serializationFormat = VisibilityUtils.extractAndPartitionTags(cell, visTags, nonVisTags);
if (!visTags.isEmpty()) {
try {
byte[] modifiedVisExpression = visibilityLabelsService.encodeVisibilityForReplication(visTags, serializationFormat);
if (modifiedVisExpression != null) {
nonVisTags.add(new ArrayBackedTag(TagType.STRING_VIS_TAG_TYPE, modifiedVisExpression));
}
} catch (Exception ioe) {
LOG.error("Exception while reading the visibility labels from the cell. The replication " + "would happen as per the existing format and not as " + "string type for the cell " + cell + ".", ioe);
// just return the old entries as it is without applying the string type change
newEdit.add(cell);
continue;
}
// Recreate the cell with the new tags and the existing tags
Cell newCell = PrivateCellUtil.createCell(cell, nonVisTags);
newEdit.add(newCell);
} else {
newEdit.add(cell);
}
} else {
newEdit.add(cell);
}
}
newEntries.add(new Entry((entry.getKey()), newEdit));
}
replicateContext.setEntries(newEntries);
return delegator.replicate(replicateContext);
} else {
return delegator.replicate(replicateContext);
}
}
Aggregations