use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestWALSplitToHFile method testWrittenViaHRegion.
/**
* Test writing edits into an HRegion, closing it, splitting logs, opening
* Region again. Verify seqids.
*/
@Test
public void testWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, InterruptedException {
Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
TableDescriptor td = pair.getFirst();
RegionInfo ri = pair.getSecond();
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, rootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
long seqid = region.getOpenSeqNum();
boolean first = true;
for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
addRegionEdits(ROW, cfd.getName(), countPerFamily, this.ee, region, "x");
if (first) {
// If first, so we have at least one family w/ different seqid to rest.
region.flush(true);
first = false;
}
}
// Now assert edits made it in.
final Get g = new Get(ROW);
Result result = region.get(g);
assertEquals(countPerFamily * td.getColumnFamilies().length, result.size());
// Now close the region (without flush), split the log, reopen the region and assert that
// replay of log has the correct effect, that our seqids are calculated correctly so
// all edits in logs are seen as 'stale'/old.
region.close(true);
wal.shutdown();
try {
WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
} catch (Exception e) {
LOG.debug("Got exception", e);
}
WAL wal2 = createWAL(this.conf, rootDir, logName);
HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
long seqid2 = region2.getOpenSeqNum();
assertTrue(seqid + result.size() < seqid2);
final Result result1b = region2.get(g);
assertEquals(result.size(), result1b.size());
// correctly when region is opened again.
for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
addRegionEdits(ROW, hcd.getName(), countPerFamily, this.ee, region2, "y");
}
// Get count of edits.
final Result result2 = region2.get(g);
assertEquals(2 * result.size(), result2.size());
wal2.sync();
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtil.getDifferentUser(newConf, td.getTableName().getNameAsString());
user.runAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(conf), conf, wals);
FileSystem newFS = FileSystem.get(newConf);
// Make a new wal for new region open.
WAL wal3 = createWAL(newConf, rootDir, logName);
Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
HRegion region3 = new HRegion(tableDir, wal3, newFS, newConf, ri, td, null);
long seqid3 = region3.initialize();
Result result3 = region3.get(g);
// Assert that count of cells is same as before crash.
assertEquals(result2.size(), result3.size());
// I can't close wal1. Its been appropriated when we split.
region3.close();
wal3.close();
return null;
}
});
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestWALSplitToHFile method testRecoverSequenceId.
@Test
public void testRecoverSequenceId() throws Exception {
Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
TableDescriptor td = pair.getFirst();
RegionInfo ri = pair.getSecond();
WAL wal = createWAL(this.conf, rootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
Map<Integer, Map<String, Long>> seqIdMap = new HashMap<>();
// Write data and do not flush
for (int i = 0; i < countPerFamily; i++) {
for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
region.put(new Put(Bytes.toBytes(i)).addColumn(cfd.getName(), QUALIFIER, VALUE1));
Result result = region.get(new Get(Bytes.toBytes(i)).addFamily(cfd.getName()));
assertTrue(Bytes.equals(VALUE1, result.getValue(cfd.getName(), QUALIFIER)));
List<Cell> cells = result.listCells();
assertEquals(1, cells.size());
seqIdMap.computeIfAbsent(i, r -> new HashMap<>()).put(cfd.getNameAsString(), cells.get(0).getSequenceId());
}
}
// Now close the region without flush
region.close(true);
wal.shutdown();
// split the log
WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
// reopen the region
WAL wal2 = createWAL(this.conf, rootDir, logName);
HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
// assert the seqid was recovered
for (int i = 0; i < countPerFamily; i++) {
for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
Result result = region2.get(new Get(Bytes.toBytes(i)).addFamily(cfd.getName()));
assertTrue(Bytes.equals(VALUE1, result.getValue(cfd.getName(), QUALIFIER)));
List<Cell> cells = result.listCells();
assertEquals(1, cells.size());
assertEquals((long) seqIdMap.get(i).get(cfd.getNameAsString()), cells.get(0).getSequenceId());
}
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class HBaseTestingUtility method generateColumnDescriptors.
/**
* Create a set of column descriptors with the combination of compression,
* encoding, bloom codecs available.
* @param prefix family names prefix
* @return the list of column descriptors
*/
public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
long familyId = 0;
for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) {
for (DataBlockEncoding encodingType : DataBlockEncoding.values()) {
for (BloomType bloomType : BloomType.values()) {
String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
columnFamilyDescriptorBuilder.setCompressionType(compressionType);
columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
familyId++;
}
}
}
return columnFamilyDescriptors;
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestWALSplitToHFile method testPutWithSameTimestamp.
@Test
public void testPutWithSameTimestamp() throws Exception {
Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
TableDescriptor td = pair.getFirst();
RegionInfo ri = pair.getSecond();
WAL wal = createWAL(this.conf, rootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
final long timestamp = this.ee.currentTime();
// Write data and flush
for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE1));
}
region.flush(true);
// Write data with same timestamp and do not flush
for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE2));
}
// Now close the region without flush
region.close(true);
wal.shutdown();
// split the log
WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
// reopen the region
WAL wal2 = createWAL(this.conf, rootDir, logName);
HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
Result result2 = region2.get(new Get(ROW));
assertEquals(td.getColumnFamilies().length, result2.size());
for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
assertTrue(Bytes.equals(VALUE2, result2.getValue(cfd.getName(), QUALIFIER)));
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project gora by apache.
the class TestHBaseStore method testNewVersionBehavior.
@Test
public void testNewVersionBehavior() throws IOException {
// Following Test fails in HBase 2.0.5 when NEW_VERSION_BEHAVIOR == true
// Persisting for cases where qualifier == null, deleting row does not delete the column family.
// Once these issues are fixed, we could remove the workarounds we have added on
// HBaseDataStore #put method.
Connection conn = ConnectionFactory.createConnection(conf);
TableName test = TableName.valueOf("Test");
TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(test);
ColumnFamilyDescriptorBuilder columnDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test-family"));
columnDescBuilder.setNewVersionBehavior(true);
ColumnFamilyDescriptor columnDescriptor = columnDescBuilder.build();
tableDescBuilder.setColumnFamily(columnDescriptor);
TableDescriptor tableDescriptor = tableDescBuilder.build();
conn.getAdmin().createTable(tableDescriptor);
Table table = conn.getTable(test);
Put put = new Put(Bytes.toBytes("com.example/http"));
put.addColumn(Bytes.toBytes("test-family"), null, Bytes.toBytes("test-value"));
table.put(put);
Delete del = new Delete(Bytes.toBytes("com.example/http"));
table.delete(del);
Get get = new Get(Bytes.toBytes("com.example/http"));
// get.addColumn(Bytes.toBytes("test-family"), null);
Result result = table.get(get);
byte[] value = result.getValue(Bytes.toBytes("test-family"), null);
if (value != null) {
// Test failed, this should be null after the delete row operation.
}
}
Aggregations