use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestHRegion method testSkipRecoveredEditsReplay.
@Test
public void testSkipRecoveredEditsReplay() throws Exception {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, CONF, family);
final WALFactory wals = new WALFactory(CONF, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (HStore store : region.getStores()) {
maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId - 1);
}
long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, status);
assertEquals(maxSeqId, seqId);
region.getMVCC().advanceTo(seqId);
Get get = new Get(row);
Result result = region.get(get);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
assertEquals(1, kvs.size());
assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
}
} finally {
HBaseTestingUtil.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSource method testLogMoving.
/**
* Sanity check that we can move logs around while we are reading
* from them. Should this test fail, ReplicationSource would have a hard
* time reading logs that are being archived.
*/
// This tests doesn't belong in here... it is not about ReplicationSource.
@Test
public void testLogMoving() throws Exception {
Path logPath = new Path(logDir, "log");
if (!FS.exists(logDir)) {
FS.mkdirs(logDir);
}
if (!FS.exists(oldLogDir)) {
FS.mkdirs(oldLogDir);
}
WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath, TEST_UTIL.getConfiguration());
for (int i = 0; i < 3; i++) {
byte[] b = Bytes.toBytes(Integer.toString(i));
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID);
writer.append(new WAL.Entry(key, edit));
writer.sync(false);
}
writer.close();
WAL.Reader reader = WALFactory.createReader(FS, logPath, TEST_UTIL.getConfiguration());
WAL.Entry entry = reader.next();
assertNotNull(entry);
Path oldLogPath = new Path(oldLogDir, "log");
FS.rename(logPath, oldLogPath);
entry = reader.next();
assertNotNull(entry);
reader.next();
entry = reader.next();
assertNull(entry);
reader.close();
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSource method testTerminateClearsBuffer.
@Test
public void testTerminateClearsBuffer() throws Exception {
ReplicationSource source = new ReplicationSource();
ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class);
MetricsReplicationGlobalSourceSource mockMetrics = mock(MetricsReplicationGlobalSourceSource.class);
AtomicLong buffer = new AtomicLong();
Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer);
Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics);
ReplicationPeer mockPeer = mock(ReplicationPeer.class);
Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
Configuration testConf = HBaseConfiguration.create();
source.init(testConf, null, mockManager, null, mockPeer, null, "testPeer", null, p -> OptionalLong.empty(), mock(MetricsSource.class));
ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, conf, null, 0, null, source, null);
ReplicationSourceShipper shipper = new ReplicationSourceShipper(conf, null, null, source);
shipper.entryReader = reader;
source.workerThreads.put("testPeer", shipper);
WALEntryBatch batch = new WALEntryBatch(10, logDir);
WAL.Entry mockEntry = mock(WAL.Entry.class);
WALEdit mockEdit = mock(WALEdit.class);
WALKeyImpl mockKey = mock(WALKeyImpl.class);
when(mockEntry.getEdit()).thenReturn(mockEdit);
when(mockEdit.isEmpty()).thenReturn(false);
when(mockEntry.getKey()).thenReturn(mockKey);
when(mockKey.estimatedSerializedSizeOf()).thenReturn(1000L);
when(mockEdit.heapSize()).thenReturn(10000L);
when(mockEdit.size()).thenReturn(0);
ArrayList<Cell> cells = new ArrayList<>();
KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("v1"));
cells.add(kv);
when(mockEdit.getCells()).thenReturn(cells);
reader.addEntryToBatch(batch, mockEntry);
reader.entryBatchQueue.put(batch);
source.terminate("test");
assertEquals(0, source.getSourceManager().getTotalBufferUsed().get());
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestBasicWALEntryStream method appendEntries.
private void appendEntries(WALProvider.Writer writer, int numEntries) throws IOException {
for (int i = 0; i < numEntries; i++) {
byte[] b = Bytes.toBytes(Integer.toString(i));
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID);
NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
scopes.put(b, HConstants.REPLICATION_SCOPE_GLOBAL);
writer.append(new WAL.Entry(key, edit));
writer.sync(false);
}
writer.close();
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestHBaseInterClusterReplicationEndpointFilterEdits method testFilterNotExistTableEdits.
@Test
public void testFilterNotExistTableEdits() {
List<List<Entry>> entryList = new ArrayList<>();
// should be filtered
Cell c1 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2, EnvironmentEdgeManager.currentTime()), new WALEdit().add(c1));
entryList.add(Lists.newArrayList(e1));
// should be kept
Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()), new WALEdit().add(c2));
entryList.add(Lists.newArrayList(e2));
List<List<Entry>> filtered = endpoint.filterNotExistTableEdits(entryList);
assertEquals(1, filtered.size());
Entry entry = filtered.get(0).get(0);
assertEquals(1, entry.getEdit().getCells().size());
assertEquals(TABLE1, entry.getKey().getTableName());
}
Aggregations