use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestAtomicOperation method initHRegion.
private void initHRegion(byte[] tableName, String callingMethod, int[] maxVersions, byte[]... families) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
int i = 0;
for (byte[] family : families) {
ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersions != null ? maxVersions[i++] : 1).build();
builder.setColumnFamily(familyDescriptor);
}
TableDescriptor tableDescriptor = builder.build();
RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
region = TEST_UTIL.createLocalHRegion(info, tableDescriptor);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class AbstractTestFSWAL method testFindMemStoresEligibleForFlush.
/**
* On rolling a wal after reaching the threshold, {@link WAL#rollWriter()} returns the list of
* regions which should be flushed in order to archive the oldest wal file.
* <p>
* This method tests this behavior by inserting edits and rolling the wal enough times to reach
* the max number of logs threshold. It checks whether we get the "right regions and stores" for
* flush on rolling the wal.
* @throws Exception
*/
@Test
public void testFindMemStoresEligibleForFlush() throws Exception {
LOG.debug("testFindMemStoresEligibleForFlush");
Configuration conf1 = HBaseConfiguration.create(CONF);
conf1.setInt("hbase.regionserver.maxlogs", 1);
AbstractFSWAL<?> wal = newWAL(FS, CommonFSUtils.getWALRootDir(conf1), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
String cf1 = "cf1";
String cf2 = "cf2";
String cf3 = "cf3";
TableDescriptor t1 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t1")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build();
TableDescriptor t2 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t2")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build();
RegionInfo hri1 = RegionInfoBuilder.newBuilder(t1.getTableName()).build();
RegionInfo hri2 = RegionInfoBuilder.newBuilder(t2.getTableName()).build();
List<ColumnFamilyDescriptor> cfs = new ArrayList();
cfs.add(ColumnFamilyDescriptorBuilder.of(cf1));
cfs.add(ColumnFamilyDescriptorBuilder.of(cf2));
TableDescriptor t3 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t3")).setColumnFamilies(cfs).build();
RegionInfo hri3 = RegionInfoBuilder.newBuilder(t3.getTableName()).build();
// add edits and roll the wal
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : t1.getColumnFamilyNames()) {
scopes1.put(fam, 0);
}
NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : t2.getColumnFamilyNames()) {
scopes2.put(fam, 0);
}
NavigableMap<byte[], Integer> scopes3 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : t3.getColumnFamilyNames()) {
scopes3.put(fam, 0);
}
try {
addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
wal.rollWriter();
// add some more edits and roll the wal. This would reach the log number threshold
addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
wal.rollWriter();
// with above rollWriter call, the max logs limit is reached.
assertTrue(wal.getNumRolledLogFiles() == 2);
// get the regions to flush; since there is only one region in the oldest wal, it should
// return only one region.
Map<byte[], List<byte[]>> regionsToFlush = wal.findRegionsToForceFlush();
assertEquals(1, regionsToFlush.size());
assertEquals(hri1.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]);
// insert edits in second region
addEdits(wal, hri2, t2, 2, mvcc, scopes2, cf1);
// get the regions to flush, it should still read region1.
regionsToFlush = wal.findRegionsToForceFlush();
assertEquals(1, regionsToFlush.size());
assertEquals(hri1.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]);
// flush region 1, and roll the wal file. Only last wal which has entries for region1 should
// remain.
flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
wal.rollWriter();
// only one wal should remain now (that is for the second region).
assertEquals(1, wal.getNumRolledLogFiles());
// flush the second region
flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getColumnFamilyNames());
wal.rollWriter(true);
// no wal should remain now.
assertEquals(0, wal.getNumRolledLogFiles());
// add edits both to region 1 and region 2, and roll.
addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
addEdits(wal, hri2, t2, 2, mvcc, scopes2, cf1);
wal.rollWriter();
// add edits and roll the writer, to reach the max logs limit.
assertEquals(1, wal.getNumRolledLogFiles());
addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
wal.rollWriter();
// it should return two regions to flush, as the oldest wal file has entries
// for both regions.
regionsToFlush = wal.findRegionsToForceFlush();
assertEquals(2, regionsToFlush.size());
// flush both regions
flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getColumnFamilyNames());
wal.rollWriter(true);
assertEquals(0, wal.getNumRolledLogFiles());
// Add an edit to region1, and roll the wal.
addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
// tests partial flush: roll on a partial flush, and ensure that wal is not archived.
wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
wal.rollWriter();
wal.completeCacheFlush(hri1.getEncodedNameAsBytes(), HConstants.NO_SEQNUM);
assertEquals(1, wal.getNumRolledLogFiles());
// clear test data
flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
wal.rollWriter(true);
// add edits for three familes
addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf1);
addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf2);
addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf3);
wal.rollWriter();
addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf1);
wal.rollWriter();
assertEquals(2, wal.getNumRolledLogFiles());
// flush one family before archive oldest wal
Set<byte[]> flushedFamilyNames = new HashSet<>();
flushedFamilyNames.add(Bytes.toBytes(cf1));
flushRegion(wal, hri3.getEncodedNameAsBytes(), flushedFamilyNames);
regionsToFlush = wal.findRegionsToForceFlush();
// then only two family need to be flushed when archive oldest wal
assertEquals(1, regionsToFlush.size());
assertEquals(hri3.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]);
assertEquals(2, regionsToFlush.get(hri3.getEncodedNameAsBytes()).size());
} finally {
if (wal != null) {
wal.close();
}
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsAfterPartialFlush.
/**
* Test that we recover correctly when there is a failure in between the flushes. i.e. Some stores
* got flushed but others did not.
* <p/>
* Unfortunately, there is no easy hook to flush at a store level. The way we get around this is
* by flushing at the region level, and then deleting the recently flushed store file for one of
* the Stores. This would put us back in the situation where all but that store got flushed and
* the region died.
* <p/>
* We restart Region again, and verify that the edits were replayed.
*/
@Test
public void testReplayEditsAfterPartialFlush() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException {
final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
final byte[] rowName = tableName.getName();
final int countPerFamily = 10;
final TableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region3 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtil.closeRegionAndWAL(region3);
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
long seqid = region.getOpenSeqNum();
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getColumnFamilies().length, result.size());
// Let us flush the region
region.flush(true);
region.close(true);
wal.shutdown();
// delete the store files in the second column family to simulate a failure
// in between the flushcache();
// we have 3 families. killing the middle one ensures that taking the maximum
// will make us fail.
int cf_count = 0;
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
cf_count++;
if (cf_count == 2) {
region.getRegionFileSystem().deleteFamily(hcd.getNameAsString());
}
}
// Let us try to split and recover
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
HRegion region2 = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal2);
long seqid2 = region2.getOpenSeqNum();
assertTrue(seqid + result.size() < seqid2);
final Result result1b = region2.get(g);
assertEquals(result.size(), result1b.size());
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsAfterAbortingFlush.
/**
* Test that we could recover the data correctly after aborting flush. In the test, first we abort
* flush after writing some data, then writing more data and flush again, at last verify the data.
*/
@Test
public void testReplayEditsAfterAbortingFlush() throws IOException {
final TableName tableName = TableName.valueOf("testReplayEditsAfterAbortingFlush");
final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
final TableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region3 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtil.closeRegionAndWAL(region3);
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
Mockito.doReturn(false).when(rsServices).isAborted();
when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
when(rsServices.getConfiguration()).thenReturn(conf);
Configuration customConf = new Configuration(this.conf);
customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName());
HRegion region = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
int writtenRowCount = 10;
List<ColumnFamilyDescriptor> families = Arrays.asList((htd.getColumnFamilies()));
for (int i = 0; i < writtenRowCount; i++) {
Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
region.put(put);
}
// Now assert edits made it in.
RegionScanner scanner = region.getScanner(new Scan());
assertEquals(writtenRowCount, getScannedCount(scanner));
// Let us flush the region
CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
try {
region.flush(true);
fail("Injected exception hasn't been thrown");
} catch (IOException e) {
LOG.info("Expected simulated exception when flushing region, {}", e.getMessage());
// simulated to abort server
Mockito.doReturn(true).when(rsServices).isAborted();
// region normally does not accept writes after
region.setClosing(false);
// DroppedSnapshotException. We mock around it for this test.
}
// writing more data
int moreRow = 10;
for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
region.put(put);
}
writtenRowCount += moreRow;
// call flush again
CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
try {
region.flush(true);
} catch (IOException t) {
LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage());
}
region.close(true);
wal.shutdown();
// Let us try to split and recover
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
Mockito.doReturn(false).when(rsServices).isAborted();
HRegion region2 = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
scanner = region2.getScanner(new Scan());
assertEquals(writtenRowCount, getScannedCount(scanner));
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class AbstractTestWALReplay method test2727.
/**
* Tests for hbase-2727.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-2727">HBASE-2727</a>
*/
@Test
public void test2727() throws Exception {
// Test being able to have > 1 set of edits in the recovered.edits directory.
// Ensure edits are replayed properly.
final TableName tableName = TableName.valueOf("test2727");
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
TableDescriptor tableDescriptor = createBasic3FamilyHTD(tableName);
Region region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, tableDescriptor);
HBaseTestingUtil.closeRegionAndWAL(region2);
final byte[] rowName = tableName.getName();
WAL wal1 = createWAL(this.conf, hbaseRootDir, logName);
// Add 1k to each family.
final int countPerFamily = 1000;
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : tableDescriptor.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) {
addWALEdits(tableName, hri, rowName, familyDescriptor.getName(), countPerFamily, ee, wal1, mvcc, scopes);
}
wal1.shutdown();
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
// Add 1k to each family.
for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) {
addWALEdits(tableName, hri, rowName, familyDescriptor.getName(), countPerFamily, ee, wal2, mvcc, scopes);
}
wal2.shutdown();
runWALSplit(this.conf);
WAL wal3 = createWAL(this.conf, hbaseRootDir, logName);
try {
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, tableDescriptor, wal3);
long seqid = region.getOpenSeqNum();
// The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1.
// When opened, this region would apply 6k edits, and increment the sequenceId by 1
assertTrue(seqid > mvcc.getWritePoint());
assertEquals(seqid - 1, mvcc.getWritePoint());
LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: " + mvcc.getReadPoint());
// TODO: Scan all.
region.close();
} finally {
wal3.close();
}
}
Aggregations