use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class CompactedHFilesDischarger method chore.
@Override
public void chore() {
// when the test case is not spinning up a cluster
if (regionServerServices == null)
return;
List<Region> onlineRegions = regionServerServices.getOnlineRegions();
if (onlineRegions != null) {
for (Region region : onlineRegions) {
if (LOG.isTraceEnabled()) {
LOG.trace("Started the compacted hfiles cleaner for the region " + region.getRegionInfo());
}
for (Store store : region.getStores()) {
try {
if (useExecutor && regionServerServices != null) {
CompactedHFilesDischargeHandler handler = new CompactedHFilesDischargeHandler((Server) regionServerServices, EventType.RS_COMPACTED_FILES_DISCHARGER, (HStore) store);
regionServerServices.getExecutorService().submit(handler);
} else {
// call synchronously if the RegionServerServices are not
// available
store.closeAndArchiveCompactedFiles();
}
if (LOG.isTraceEnabled()) {
LOG.trace("Completed archiving the compacted files for the region " + region.getRegionInfo() + " under the store " + store.getColumnFamilyName());
}
} catch (Exception e) {
LOG.error("Exception while trying to close and archive the compacted store " + "files of the store " + store.getColumnFamilyName() + " in the" + " region " + region.getRegionInfo(), e);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Completed the compacted hfiles cleaner for the region " + region.getRegionInfo());
}
}
}
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class SnapshotManifest method addRegion.
/**
* Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
* This is used by the "online snapshot" when the table is enabled.
*/
public void addRegion(final HRegion region) throws IOException {
// 0. Get the ManifestBuilder/RegionVisitor
RegionVisitor visitor = createRegionVisitor(desc);
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing '" + region + "' region-info for snapshot.");
Object regionData = visitor.regionOpen(region.getRegionInfo());
monitor.rethrowException();
// 2. iterate through all the stores in the region
LOG.debug("Creating references for hfiles");
for (Store store : region.getStores()) {
// 2.1. build the snapshot reference for the store
Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
monitor.rethrowException();
List<StoreFile> storeFiles = new ArrayList<>(store.getStorefiles());
if (LOG.isDebugEnabled()) {
LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
}
// 2.2. iterate through all the store's files and create "references".
for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
StoreFile storeFile = storeFiles.get(i);
monitor.rethrowException();
// create "reference" to this store file.
LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath());
visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
}
visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class HFileArchiveTestingUtil method getStoreArchivePath.
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName, byte[] storeName) throws IOException {
byte[] table = Bytes.toBytes(tableName);
// get the RS and region serving our table
List<HRegion> servingRegions = util.getHBaseCluster().getRegions(table);
HRegion region = servingRegions.get(0);
// check that we actually have some store files that were archived
Store store = region.getStore(storeName);
return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestFromClientSide method testCacheOnWriteEvictOnClose.
/**
* Tests that cache on write works all the way up from the client-side.
*
* Performs inserts, flushes, and compactions, verifying changes in the block
* cache along the way.
*
* @throws Exception
*/
@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[] data = Bytes.toBytes("data");
Table table = TEST_UTIL.createTable(tableName, FAMILY);
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
// get the block cache and region
String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
Store store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
BlockCache cache = cacheConf.getBlockCache();
// establish baseline stats
long startBlockCount = cache.getBlockCount();
long startBlockHits = cache.getStats().getHitCount();
long startBlockMiss = cache.getStats().getMissCount();
// wait till baseline is stable, (minimal 500 ms)
for (int i = 0; i < 5; i++) {
Thread.sleep(100);
if (startBlockCount != cache.getBlockCount() || startBlockHits != cache.getStats().getHitCount() || startBlockMiss != cache.getStats().getMissCount()) {
startBlockCount = cache.getBlockCount();
startBlockHits = cache.getStats().getHitCount();
startBlockMiss = cache.getStats().getMissCount();
i = -1;
}
}
// insert data
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
// data was in memstore so don't expect any changes
assertEquals(startBlockCount, cache.getBlockCount());
assertEquals(startBlockHits, cache.getStats().getHitCount());
assertEquals(startBlockMiss, cache.getStats().getMissCount());
// flush the data
System.out.println("Flushing cache");
region.flush(true);
// expect one more block in cache, no change in hits/misses
long expectedBlockCount = startBlockCount + 1;
long expectedBlockHits = startBlockHits;
long expectedBlockMiss = startBlockMiss;
assertEquals(expectedBlockCount, cache.getBlockCount());
assertEquals(expectedBlockHits, cache.getStats().getHitCount());
assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
// read the data and expect same blocks, one new hit, no misses
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
assertEquals(expectedBlockCount, cache.getBlockCount());
assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
// insert a second column, read the row, no new blocks, one new hit
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
byte[] data2 = Bytes.add(data, data);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
Result r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
assertEquals(expectedBlockCount, cache.getBlockCount());
assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
// flush, one new block
System.out.println("Flushing cache");
region.flush(true);
assertEquals(++expectedBlockCount, cache.getBlockCount());
assertEquals(expectedBlockHits, cache.getStats().getHitCount());
assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
// compact, net minus two blocks, two hits, no misses
System.out.println("Compacting");
assertEquals(2, store.getStorefilesCount());
store.triggerMajorCompaction();
region.compact(true);
// wait 10 seconds max
waitForStoreFileCount(store, 1, 10000);
assertEquals(1, store.getStorefilesCount());
// evicted two blocks, cached none
expectedBlockCount -= 2;
assertEquals(expectedBlockCount, cache.getBlockCount());
expectedBlockHits += 2;
assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
assertEquals(expectedBlockHits, cache.getStats().getHitCount());
// read the row, this should be a cache miss because we don't cache data
// blocks on compaction
r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
// cached one data block
expectedBlockCount += 1;
assertEquals(expectedBlockCount, cache.getBlockCount());
assertEquals(expectedBlockHits, cache.getStats().getHitCount());
assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
}
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestBlockEvictionFromClient method setCacheProperties.
private BlockCache setCacheProperties(Region region) {
Iterator<Store> strItr = region.getStores().iterator();
BlockCache cache = null;
while (strItr.hasNext()) {
Store store = strItr.next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
// Use the last one
cache = cacheConf.getBlockCache();
}
return cache;
}
Aggregations