use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestWALObserver method addWALEdits.
private void addWALEdits(final TableName tableName, final RegionInfo hri, final byte[] rowName, final byte[] family, final int count, EnvironmentEdge ee, final WAL wal, final NavigableMap<byte[], Integer> scopes, final MultiVersionConcurrencyControl mvcc) throws IOException {
String familyStr = Bytes.toString(family);
long txid = -1;
for (int j = 0; j < count; j++) {
byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j));
byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j));
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
// uses WALKeyImpl instead of HLogKey on purpose. will only work for tests where we don't care
// about legacy coprocessors
txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, ee.currentTime(), mvcc), edit);
}
if (-1 != txid) {
wal.sync(txid);
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestWALObserver method verifyWritesSeen.
private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp, final boolean seesLegacy) throws Exception {
RegionInfo hri = createBasicHRegionInfo(Bytes.toString(TEST_TABLE));
TableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
deleteDir(basedir);
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
// TEST_FAMILY[0] shall be removed from WALEdit.
// TEST_FAMILY[1] value shall be changed.
// TEST_FAMILY[2] shall be added to WALEdit, although it's not in the put.
cp.setTestValues(TEST_TABLE, TEST_ROW, TEST_FAMILY[0], TEST_QUALIFIER[0], TEST_FAMILY[1], TEST_QUALIFIER[1], TEST_FAMILY[2], TEST_QUALIFIER[2]);
assertFalse(cp.isPreWALWriteCalled());
assertFalse(cp.isPostWALWriteCalled());
// TEST_FAMILY[2] is not in the put, however it shall be added by the tested
// coprocessor.
// Use a Put to create familyMap.
Put p = creatPutWith2Families(TEST_ROW);
Map<byte[], List<Cell>> familyMap = p.getFamilyCellMap();
WALEdit edit = new WALEdit();
edit.add(familyMap);
boolean foundFamily0 = false;
boolean foundFamily2 = false;
boolean modifiedFamily1 = false;
List<Cell> cells = edit.getCells();
for (Cell cell : cells) {
if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[0])) {
foundFamily0 = true;
}
if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[2])) {
foundFamily2 = true;
}
if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[1])) {
if (!Arrays.equals(CellUtil.cloneValue(cell), TEST_VALUE[1])) {
modifiedFamily1 = true;
}
}
}
assertTrue(foundFamily0);
assertFalse(foundFamily2);
assertFalse(modifiedFamily1);
// it's where WAL write cp should occur.
long now = EnvironmentEdgeManager.currentTime();
// we use HLogKey here instead of WALKeyImpl directly to support legacy coprocessors.
long txid = log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), now, new MultiVersionConcurrencyControl(), scopes), edit);
log.sync(txid);
// the edit shall have been change now by the coprocessor.
foundFamily0 = false;
foundFamily2 = false;
modifiedFamily1 = false;
for (Cell cell : cells) {
if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[0])) {
foundFamily0 = true;
}
if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[2])) {
foundFamily2 = true;
}
if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[1])) {
if (!Arrays.equals(CellUtil.cloneValue(cell), TEST_VALUE[1])) {
modifiedFamily1 = true;
}
}
}
assertFalse(foundFamily0);
assertTrue(foundFamily2);
assertTrue(modifiedFamily1);
assertTrue(cp.isPreWALWriteCalled());
assertTrue(cp.isPostWALWriteCalled());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestWALObserver method testEmptyWALEditAreNotSeen.
/**
* Coprocessors shouldn't get notice of empty waledits.
*/
@Test
public void testEmptyWALEditAreNotSeen() throws Exception {
RegionInfo hri = createBasicHRegionInfo(Bytes.toString(TEST_TABLE));
TableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
WAL log = wals.getWAL(null);
try {
SampleRegionWALCoprocessor cp = getCoprocessor(log, SampleRegionWALCoprocessor.class);
cp.setTestValues(TEST_TABLE, null, null, null, null, null, null, null);
assertFalse(cp.isPreWALWriteCalled());
assertFalse(cp.isPostWALWriteCalled());
final long now = EnvironmentEdgeManager.currentTime();
long txid = log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), now, mvcc, scopes), new WALEdit());
log.sync(txid);
assertFalse("Empty WALEdit should skip coprocessor evaluation.", cp.isPreWALWriteCalled());
assertFalse("Empty WALEdit should skip coprocessor evaluation.", cp.isPostWALWriteCalled());
} finally {
log.close();
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestHRegion method testArchiveRecoveredEditsReplay.
@Test
public void testArchiveRecoveredEditsReplay() throws Exception {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, CONF, family);
final WALFactory wals = new WALFactory(CONF, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (HStore store : region.getStores()) {
maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId - 1);
}
CONF.set("hbase.region.archive.recovered.edits", "true");
CONF.set(CommonFSUtils.HBASE_WAL_DIR, "/custom_wal_dir");
long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, status);
assertEquals(maxSeqId, seqId);
region.getMVCC().advanceTo(seqId);
String fakeFamilyName = recoveredEditsDir.getName();
Path rootDir = new Path(CONF.get(HConstants.HBASE_DIR));
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(rootDir, region.getRegionInfo(), Bytes.toBytes(fakeFamilyName));
FileStatus[] list = TEST_UTIL.getTestFileSystem().listStatus(storeArchiveDir);
assertEquals(6, list.length);
} finally {
CONF.set("hbase.region.archive.recovered.edits", "false");
CONF.set(CommonFSUtils.HBASE_WAL_DIR, "");
HBaseTestingUtil.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestHRegion method testRecoveredEditsReplayCompaction.
public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception {
CONF.setClass(HConstants.REGION_IMPL, HRegionForTesting.class, Region.class);
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, CONF, family);
final WALFactory wals = new WALFactory(CONF, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
long maxSeqId = 3;
long minSeqId = 0;
for (long i = minSeqId; i < maxSeqId; i++) {
Put put = new Put(Bytes.toBytes(i));
put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
region.put(put);
region.flush(true);
}
// this will create a region with 3 files
assertEquals(3, region.getStore(family).getStorefilesCount());
List<Path> storeFiles = new ArrayList<>(3);
for (HStoreFile sf : region.getStore(family).getStorefiles()) {
storeFiles.add(sf.getPath());
}
// disable compaction completion
CONF.setBoolean("hbase.hstore.compaction.complete", false);
region.compactStores();
// ensure that nothing changed
assertEquals(3, region.getStore(family).getStorefilesCount());
// now find the compacted file, and manually add it to the recovered edits
Path tmpDir = new Path(region.getRegionFileSystem().getTempDir(), Bytes.toString(family));
FileStatus[] files = CommonFSUtils.listStatus(fs, tmpDir);
String errorMsg = "Expected to find 1 file in the region temp directory " + "from the compaction, could not find any";
assertNotNull(errorMsg, files);
assertEquals(errorMsg, 1, files.length);
// move the file inside region dir
Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath());
byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes();
byte[] fakeEncodedNameAsBytes = new byte[encodedNameAsBytes.length];
for (int i = 0; i < encodedNameAsBytes.length; i++) {
// Mix the byte array to have a new encodedName
fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1);
}
CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region.getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, storeFiles, Lists.newArrayList(newFile), region.getRegionFileSystem().getStoreDir(Bytes.toString(family)));
WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(), this.region.getRegionInfo(), compactionDescriptor, region.getMVCC(), null);
Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
long time = System.nanoTime();
writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, 10, time, HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(), compactionDescriptor)));
writer.close();
// close the region now, and reopen again
region.getTableDescriptor();
region.getRegionInfo();
HBaseTestingUtil.closeRegionAndWAL(this.region);
try {
region = HRegion.openHRegion(region, null);
} catch (WrongRegionException wre) {
fail("Matching encoded region name should not have produced WrongRegionException");
}
// now check whether we have only one store file, the compacted one
Collection<HStoreFile> sfs = region.getStore(family).getStorefiles();
for (HStoreFile sf : sfs) {
LOG.info(Objects.toString(sf.getPath()));
}
if (!mismatchedRegionName) {
assertEquals(1, region.getStore(family).getStorefilesCount());
}
files = CommonFSUtils.listStatus(fs, tmpDir);
assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);
for (long i = minSeqId; i < maxSeqId; i++) {
Get get = new Get(Bytes.toBytes(i));
Result result = region.get(get);
byte[] value = result.getValue(family, Bytes.toBytes(i));
assertArrayEquals(Bytes.toBytes(i), value);
}
} finally {
HBaseTestingUtil.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
CONF.setClass(HConstants.REGION_IMPL, HRegion.class, Region.class);
}
}
Aggregations