use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestStoreFile method testCacheOnWriteEvictOnClose.
@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
Configuration conf = this.conf;
// Find a home for our files (regiondir ("7e0102") and familyname).
Path baseDir = new Path(new Path(testDir, "7e0102"), "twoCOWEOC");
// Grab the block cache and get the initial hit/miss counts
BlockCache bc = new CacheConfig(conf).getBlockCache();
assertNotNull(bc);
CacheStats cs = bc.getStats();
long startHit = cs.getHitCount();
long startMiss = cs.getMissCount();
long startEvicted = cs.getEvictedCount();
// Let's write a StoreFile with three blocks, with cache on write off
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
CacheConfig cacheConf = new CacheConfig(conf);
Path pathCowOff = new Path(baseDir, "123456789");
StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
LOG.debug(hsf.getPath().toString());
// Read this file, we should see 3 misses
StoreFileReader reader = hsf.createReader();
reader.loadFileInfo();
StoreFileScanner scanner = getStoreFileScanner(reader, true, true);
scanner.seek(KeyValue.LOWESTKEY);
while (scanner.next() != null) ;
assertEquals(startHit, cs.getHitCount());
assertEquals(startMiss + 3, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
startMiss += 3;
scanner.close();
reader.close(cacheConf.shouldEvictOnClose());
// Now write a StoreFile with three blocks, with cache on write on
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
cacheConf = new CacheConfig(conf);
Path pathCowOn = new Path(baseDir, "123456788");
writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
// Read this file, we should see 3 hits
reader = hsf.createReader();
scanner = getStoreFileScanner(reader, true, true);
scanner.seek(KeyValue.LOWESTKEY);
while (scanner.next() != null) ;
assertEquals(startHit + 3, cs.getHitCount());
assertEquals(startMiss, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
startHit += 3;
scanner.close();
reader.close(cacheConf.shouldEvictOnClose());
// Let's read back the two files to ensure the blocks exactly match
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE);
StoreFileReader readerOne = hsf.createReader();
readerOne.loadFileInfo();
StoreFileScanner scannerOne = getStoreFileScanner(readerOne, true, true);
scannerOne.seek(KeyValue.LOWESTKEY);
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE);
StoreFileReader readerTwo = hsf.createReader();
readerTwo.loadFileInfo();
StoreFileScanner scannerTwo = getStoreFileScanner(readerTwo, true, true);
scannerTwo.seek(KeyValue.LOWESTKEY);
Cell kv1 = null;
Cell kv2 = null;
while ((kv1 = scannerOne.next()) != null) {
kv2 = scannerTwo.next();
assertTrue(kv1.equals(kv2));
KeyValue keyv1 = KeyValueUtil.ensureKeyValue(kv1);
KeyValue keyv2 = KeyValueUtil.ensureKeyValue(kv2);
assertTrue(Bytes.compareTo(keyv1.getBuffer(), keyv1.getKeyOffset(), keyv1.getKeyLength(), keyv2.getBuffer(), keyv2.getKeyOffset(), keyv2.getKeyLength()) == 0);
assertTrue(Bytes.compareTo(kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength(), kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
}
assertNull(scannerTwo.next());
assertEquals(startHit + 6, cs.getHitCount());
assertEquals(startMiss, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
startHit += 6;
scannerOne.close();
readerOne.close(cacheConf.shouldEvictOnClose());
scannerTwo.close();
readerTwo.close(cacheConf.shouldEvictOnClose());
// Let's close the first file with evict on close turned on
conf.setBoolean("hbase.rs.evictblocksonclose", true);
cacheConf = new CacheConfig(conf);
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE);
reader = hsf.createReader();
reader.close(cacheConf.shouldEvictOnClose());
// We should have 3 new evictions but the evict count stat should not change. Eviction because
// of HFile invalidation is not counted along with normal evictions
assertEquals(startHit, cs.getHitCount());
assertEquals(startMiss, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
// Let's close the second file with evict on close turned off
conf.setBoolean("hbase.rs.evictblocksonclose", false);
cacheConf = new CacheConfig(conf);
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE);
reader = hsf.createReader();
reader.close(cacheConf.shouldEvictOnClose());
// We expect no changes
assertEquals(startHit, cs.getHitCount());
assertEquals(startMiss, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestStoreFile method testReference.
/**
* Test that our mechanism of writing store files in one region to reference
* store files in other regions works.
* @throws IOException
*/
@Test
public void testReference() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE);
StoreFileReader reader = hsf.createReader();
// Split on a row, not in middle of row. Midkey returned by reader
// may be in middle of row. Create new one with empty column and
// timestamp.
Cell kv = reader.midkey();
byte[] midRow = CellUtil.cloneRow(kv);
kv = reader.getLastKey();
byte[] finalRow = CellUtil.cloneRow(kv);
hsf.closeReader(true);
// Make a reference
HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE);
// Now confirm that I can read from the reference and that it only gets
// keys from top half of the file.
HFileScanner s = refHsf.createReader().getScanner(false, false);
for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next(); ) {
ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey());
kv = KeyValueUtil.createKeyValueFromKey(bb);
if (first) {
assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, midRow.length));
first = false;
}
}
assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, finalRow.length));
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestStoreFileRefresherChore method verifyData.
private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException {
for (int i = startRow; i < startRow + numRows; i++) {
byte[] row = Bytes.toBytes("" + i);
Get get = new Get(row);
for (byte[] family : families) {
get.addColumn(family, qf);
}
Result result = newReg.get(get);
Cell[] raw = result.rawCells();
assertEquals(families.length, result.size());
for (int j = 0; j < families.length; j++) {
assertTrue(CellUtil.matchingRow(raw[j], row));
assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
assertTrue(CellUtil.matchingQualifier(raw[j], qf));
}
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestTags method testFlushAndCompactionwithCombinations.
@Test
public void testFlushAndCompactionwithCombinations() throws Exception {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
byte[] rowd = Bytes.toBytes("rowd");
byte[] rowe = Bytes.toBytes("rowe");
Table table = null;
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
colDesc.setDataBlockEncoding(encoding);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(desc);
try {
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row);
byte[] value = Bytes.toBytes("value");
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
int bigTagLen = Short.MAX_VALUE - 5;
put.setAttribute("visibility", new byte[bigTagLen]);
table.put(put);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
put1 = new Put(row2);
value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(rowd);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
table.put(put2);
put2 = new Put(rowe);
value2 = Bytes.toBytes("1000dfsddfdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
put.setAttribute("visibility", Bytes.toBytes("ram"));
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
TestCoprocessorForTags.checkTagPresence = true;
Scan s = new Scan(row);
s.setCaching(1);
ResultScanner scanner = table.getScanner(s);
try {
Result next = null;
while ((next = scanner.next()) != null) {
CellScanner cellScanner = next.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
if (CellUtil.matchingRow(current, row)) {
assertEquals(1, TestCoprocessorForTags.tags.size());
Tag tag = TestCoprocessorForTags.tags.get(0);
assertEquals(bigTagLen, tag.getValueLength());
} else {
assertEquals(0, TestCoprocessorForTags.tags.size());
}
}
} finally {
if (scanner != null) {
scanner.close();
}
TestCoprocessorForTags.checkTagPresence = false;
}
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(s);
try {
Result next = null;
while ((next = scanner.next()) != null) {
CellScanner cellScanner = next.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
if (CellUtil.matchingRow(current, row)) {
assertEquals(1, TestCoprocessorForTags.tags.size());
Tag tag = TestCoprocessorForTags.tags.get(0);
assertEquals(bigTagLen, tag.getValueLength());
} else {
assertEquals(0, TestCoprocessorForTags.tags.size());
}
}
} finally {
if (scanner != null) {
scanner.close();
}
TestCoprocessorForTags.checkTagPresence = false;
}
} finally {
if (table != null) {
table.close();
}
// delete the table
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestTags method testFlushAndCompactionWithoutTags.
@Test
public void testFlushAndCompactionWithoutTags() throws Exception {
Table table = null;
try {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(desc);
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row);
byte[] value = Bytes.toBytes("value");
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
table.put(put);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(row2);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
Scan s = new Scan(row);
ResultScanner scanner = table.getScanner(s);
try {
Result[] next = scanner.next(3);
for (Result result : next) {
CellScanner cellScanner = result.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertEquals(0, current.getTagsLength());
}
} finally {
if (scanner != null)
scanner.close();
}
admin.compact(tableName);
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
s = new Scan(row);
scanner = table.getScanner(s);
try {
Result[] next = scanner.next(3);
for (Result result : next) {
CellScanner cellScanner = result.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertEquals(0, current.getTagsLength());
}
} finally {
if (scanner != null) {
scanner.close();
}
}
} finally {
if (table != null) {
table.close();
}
}
}
Aggregations