use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestRegionObserverInterface method testPreWALAppend.
@Test
public void testPreWALAppend() throws Exception {
SimpleRegionObserver sro = new SimpleRegionObserver();
ObserverContext ctx = Mockito.mock(ObserverContext.class);
WALKey key = new WALKeyImpl(Bytes.toBytes("region"), TEST_TABLE, EnvironmentEdgeManager.currentTime());
WALEdit edit = new WALEdit();
sro.preWALAppend(ctx, key, edit);
Assert.assertEquals(1, key.getExtendedAttributes().size());
Assert.assertArrayEquals(SimpleRegionObserver.WAL_EXTENDED_ATTRIBUTE_BYTES, key.getExtendedAttribute(Integer.toString(sro.getCtPreWALAppend())));
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestWALObserver method testWALCoprocessorReplay.
/**
* Test WAL replay behavior with WALObserver.
*/
@Test
public void testWALCoprocessorReplay() throws Exception {
// WAL replay is handled at HRegion::replayRecoveredEdits(), which is
// ultimately called by HRegion::initialize()
TableName tableName = TableName.valueOf(currentTest.getMethodName());
TableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
// final HRegionInfo hri =
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
// final HRegionInfo hri1 =
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
final Configuration newConf = HBaseConfiguration.create(this.conf);
// WAL wal = new WAL(this.fs, this.dir, this.oldLogDir, this.conf);
WAL wal = wals.getWAL(null);
// Put p = creatPutWith2Families(TEST_ROW);
WALEdit edit = new WALEdit();
long now = EnvironmentEdgeManager.currentTime();
final int countPerFamily = 1000;
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
for (byte[] fam : htd.getColumnFamilyNames()) {
addWALEdits(tableName, hri, TEST_ROW, fam, countPerFamily, EnvironmentEdgeManager.getDelegate(), wal, scopes, mvcc);
}
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// sync to fs.
wal.sync();
User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Path p = runWALSplit(newConf);
LOG.info("WALSplit path == " + p);
// Make a new wal for new region open.
final WALFactory wals2 = new WALFactory(conf, ServerName.valueOf(currentTest.getMethodName() + "2", 16010, EnvironmentEdgeManager.currentTime()).toString());
WAL wal2 = wals2.getWAL(null);
HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
SampleRegionWALCoprocessor cp2 = region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class);
// TODO: asserting here is problematic.
assertNotNull(cp2);
assertTrue(cp2.isPreWALRestoreCalled());
assertTrue(cp2.isPostWALRestoreCalled());
region.close();
wals2.close();
return null;
}
});
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class ReplicationProtobufUtil method buildReplicateWALEntryRequest.
/**
* Create a new ReplicateWALEntryRequest from a list of WAL entries
* @param entries the WAL entries to be replicated
* @param encodedRegionName alternative region name to use if not null
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDir Path to source cluster base namespace directory
* @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
* @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
*/
public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) {
// Accumulate all the Cells seen in here.
List<List<? extends Cell>> allCells = new ArrayList<>(entries.length);
int size = 0;
WALEntry.Builder entryBuilder = WALEntry.newBuilder();
ReplicateWALEntryRequest.Builder builder = ReplicateWALEntryRequest.newBuilder();
for (Entry entry : entries) {
entryBuilder.clear();
WALProtos.WALKey.Builder keyBuilder;
try {
keyBuilder = entry.getKey().getBuilder(WALCellCodec.getNoneCompressor());
} catch (IOException e) {
throw new AssertionError("There should not throw exception since NoneCompressor do not throw any exceptions", e);
}
if (encodedRegionName != null) {
keyBuilder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedRegionName));
}
entryBuilder.setKey(keyBuilder.build());
WALEdit edit = entry.getEdit();
List<Cell> cells = edit.getCells();
// Add up the size. It is used later serializing out the kvs.
for (Cell cell : cells) {
size += PrivateCellUtil.estimatedSerializedSizeOf(cell);
}
// Collect up the cells
allCells.add(cells);
// Write out how many cells associated with this entry.
entryBuilder.setAssociatedCellCount(cells.size());
builder.addEntry(entryBuilder.build());
}
if (replicationClusterId != null) {
builder.setReplicationClusterId(replicationClusterId);
}
if (sourceBaseNamespaceDir != null) {
builder.setSourceBaseNamespaceDirPath(sourceBaseNamespaceDir.toString());
}
if (sourceHFileArchiveDir != null) {
builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString());
}
return new Pair<>(builder.build(), getCellScanner(allCells, size));
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestWALPlayer method testWALKeyValueMapper.
private void testWALKeyValueMapper(final String tableConfigKey) throws Exception {
Configuration configuration = new Configuration();
configuration.set(tableConfigKey, "table");
WALKeyValueMapper mapper = new WALKeyValueMapper();
WALKey key = mock(WALKey.class);
when(key.getTableName()).thenReturn(TableName.valueOf("table"));
@SuppressWarnings("unchecked") Mapper<WALKey, WALEdit, ImmutableBytesWritable, Cell>.Context context = mock(Context.class);
when(context.getConfiguration()).thenReturn(configuration);
WALEdit value = mock(WALEdit.class);
ArrayList<Cell> values = new ArrayList<>();
KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), null);
values.add(kv1);
when(value.getCells()).thenReturn(values);
mapper.setup(context);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
ImmutableBytesWritable writer = (ImmutableBytesWritable) invocation.getArgument(0);
MapReduceExtendedCell key = (MapReduceExtendedCell) invocation.getArgument(1);
assertEquals("row", Bytes.toString(writer.get()));
assertEquals("row", Bytes.toString(CellUtil.cloneRow(key)));
return null;
}
}).when(context).write(any(), any());
mapper.map(key, value, context);
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestWALRecordReader method testWALRecordReaderActiveArchiveTolerance.
/**
* Test WALRecordReader tolerance to moving WAL from active
* to archive directory
* @throws Exception exception
*/
@Test
public void testWALRecordReaderActiveArchiveTolerance() throws Exception {
final WALFactory walfactory = new WALFactory(conf, getName());
WAL log = walfactory.getWAL(info);
byte[] value = Bytes.toBytes("value");
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), value));
long txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
log.sync(txid);
// make sure 2nd edit gets a later timestamp
Thread.sleep(10);
edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), value));
txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
log.sync(txid);
log.shutdown();
// should have 2 log entries now
WALInputFormat input = new WALInputFormat();
Configuration jobConf = new Configuration(conf);
jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
// make sure log is found
List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
assertEquals(1, splits.size());
WALInputFormat.WALSplit split = (WALInputFormat.WALSplit) splits.get(0);
LOG.debug("log=" + logDir + " file=" + split.getLogFileName());
testSplitWithMovingWAL(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2"));
}
Aggregations