use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class ClusterMarkingEntryFilter method filter.
@Override
public Entry filter(Entry entry) {
// don't replicate if the log entries have already been consumed by the cluster
if (replicationEndpoint.canReplicateToSameCluster() || !entry.getKey().getClusterIds().contains(peerClusterId)) {
WALEdit edit = entry.getEdit();
WALKeyImpl logKey = (WALKeyImpl) entry.getKey();
if (edit != null && !edit.isEmpty()) {
// Mark that the current cluster has the change
logKey.addClusterId(clusterId);
return entry;
}
}
return null;
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestBulkLoad method shouldBulkLoadManyFamilyHLog.
@Test
public void shouldBulkLoadManyFamilyHLog() throws IOException {
when(log.appendMarker(any(), any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)))).thenAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) {
WALKeyImpl walKey = invocation.getArgument(1);
MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
if (mvcc != null) {
MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
walKey.setWriteEntry(we);
}
return 01L;
}
});
testRegionWithFamilies(family1, family2).bulkLoadHFiles(withFamilyPathsFor(family1, family2), false, null);
verify(log).sync(anyLong());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestLogRollAbort method testLogRollAfterSplitStart.
/**
* Tests the case where a RegionServer enters a GC pause,
* comes back online after the master declared it dead and started to split.
* Want log rolling after a master split to fail. See HBASE-2312.
*/
@Test
public void testLogRollAfterSplitStart() throws IOException {
LOG.info("Verify wal roll after split starts will fail.");
String logName = ServerName.valueOf("testLogRollAfterSplitStart", 16010, EnvironmentEdgeManager.currentTime()).toString();
Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
final WALFactory wals = new WALFactory(conf, logName);
try {
// put some entries in an WAL
TableName tableName = TableName.valueOf(this.getClass().getName());
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
WAL log = wals.getWAL(regionInfo);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
int total = 20;
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
scopes.put(Bytes.toBytes("column"), 0);
log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs);
}
// Send the data to HDFS datanodes and close the HDFS writer
log.sync();
((AbstractFSWAL<?>) log).replaceWriter(((FSHLog) log).getOldPath(), null, null);
// code taken from MasterFileSystem.getLogDirs(), which is called from
// MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process)
// rename the directory so a rogue RS doesn't create more WALs
Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
if (!fs.rename(thisTestsDir, rsSplitDir)) {
throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
}
LOG.debug("Renamed region directory: " + rsSplitDir);
LOG.debug("Processing the old log files.");
WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);
LOG.debug("Trying to roll the WAL.");
try {
log.rollWriter();
Assert.fail("rollWriter() did not throw any exception.");
} catch (IOException ioe) {
if (ioe.getCause() instanceof FileNotFoundException) {
LOG.info("Got the expected exception: ", ioe.getCause());
} else {
Assert.fail("Unexpected exception: " + ioe);
}
}
} finally {
wals.close();
if (fs.exists(thisTestsDir)) {
fs.delete(thisTestsDir, true);
}
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestMetricsWAL method testWalWrittenInBytes.
@Test
public void testWalWrittenInBytes() throws Exception {
MetricsWALSource source = mock(MetricsWALSourceImpl.class);
MetricsWAL metricsWAL = new MetricsWAL(source);
TableName tableName = TableName.valueOf("foo");
WALKey walKey = new WALKeyImpl(null, tableName, -1);
metricsWAL.postAppend(100, 900, walKey, null);
metricsWAL.postAppend(200, 2000, walKey, null);
verify(source, times(1)).incrementWrittenBytes(100);
verify(source, times(1)).incrementWrittenBytes(200);
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionReplicationSink method testSizeCapacity.
@Test
public void testSizeCapacity() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(6).collect(Collectors.toList());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
for (int i = 0; i < 3; i++) {
ServerCall<?> rpcCall = mock(ServerCall.class);
WALKeyImpl key = mock(WALKeyImpl.class);
when(key.estimatedSerializedSizeOf()).thenReturn(100L);
when(key.getSequenceId()).thenReturn(i + 1L);
WALEdit edit = mock(WALEdit.class);
when(edit.estimatedSerializedSizeOf()).thenReturn((i + 1) * 600L * 1024);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key, edit, rpcCall);
}
// the first entry will be send out immediately
verify(conn, times(2)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
// complete the first send
futures.get(0).complete(null);
futures.get(1).complete(null);
// we should have another batch
verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
// complete the second send
futures.get(2).complete(null);
futures.get(3).complete(null);
// the size of the second entry is greater than 1024 * 1024, so we will have another batch
verify(conn, times(6)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
// complete the third send
futures.get(4).complete(null);
futures.get(5).complete(null);
// should have send out all so no pending entries.
assertEquals(0, sink.pendingSize());
}
Aggregations