use of org.apache.hadoop.hbase.regionserver.Store in project phoenix by apache.
the class DefaultStatisticsCollector method writeStatistics.
private void writeStatistics(final Region region, boolean delete, List<Mutation> mutations, long currentTime, Scan scan) throws IOException {
try {
Set<ImmutableBytesPtr> fams = guidePostsInfoWriterMap.keySet();
// compaction as guidePostsInfoWriterMap cannot be empty in this case.
if (cachedGuidePosts == null) {
// We're either collecting stats for the data table or the local index table, but not both
// We can determine this based on the column families in the scan being prefixed with the
// local index column family prefix. We always explicitly specify the local index column
// families when we're collecting stats for a local index.
boolean collectingForLocalIndex = scan != null && !scan.getFamilyMap().isEmpty() && MetaDataUtil.isLocalIndexFamily(scan.getFamilyMap().keySet().iterator().next());
for (Store store : region.getStores()) {
ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getFamily().getName());
boolean isLocalIndexStore = MetaDataUtil.isLocalIndexFamily(cfKey);
if (isLocalIndexStore != collectingForLocalIndex) {
continue;
}
if (!guidePostsInfoWriterMap.containsKey(cfKey)) {
Pair<Long, GuidePostsInfoBuilder> emptyGps = new Pair<Long, GuidePostsInfoBuilder>(0l, new GuidePostsInfoBuilder());
guidePostsInfoWriterMap.put(cfKey, emptyGps);
}
}
}
for (ImmutableBytesPtr fam : fams) {
if (delete) {
if (logger.isDebugEnabled()) {
logger.debug("Deleting the stats for the region " + region.getRegionInfo());
}
statsWriter.deleteStatsForRegion(region, this, fam, mutations);
}
if (logger.isDebugEnabled()) {
logger.debug("Adding new stats for the region " + region.getRegionInfo());
}
// If we've disabled stats, don't write any, just delete them
if (this.guidePostDepth > 0) {
statsWriter.addStats(this, fam, mutations);
}
}
} catch (IOException e) {
logger.error("Failed to update statistics table!", e);
throw e;
}
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class RefreshHFilesEndpoint method refreshHFiles.
@Override
public void refreshHFiles(RpcController controller, RefreshHFilesProtos.RefreshHFilesRequest request, RpcCallback<RefreshHFilesProtos.RefreshHFilesResponse> done) {
try {
for (Store store : env.getRegion().getStores()) {
LOG.debug("Refreshing HFiles for region: " + store.getRegionInfo().getRegionNameAsString() + " and store: " + store.getColumnFamilyName() + "class:" + store.getClass());
store.refreshStoreFiles();
}
} catch (IOException ioe) {
LOG.error("Exception while trying to refresh store files: ", ioe);
CoprocessorRpcUtils.setControllerException(controller, ioe);
}
done.run(RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance());
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestCloseChecker method testIsClosed.
@Test
public void testIsClosed() {
Store enableWrite = mock(Store.class);
when(enableWrite.areWritesEnabled()).thenReturn(true);
Store disableWrite = mock(Store.class);
when(disableWrite.areWritesEnabled()).thenReturn(false);
Configuration conf = new Configuration();
long currentTime = EnvironmentEdgeManager.currentTime();
conf.setInt(SIZE_LIMIT_KEY, 10);
conf.setLong(TIME_LIMIT_KEY, 10);
CloseChecker closeChecker = new CloseChecker(conf, currentTime);
assertFalse(closeChecker.isTimeLimit(enableWrite, currentTime));
assertFalse(closeChecker.isSizeLimit(enableWrite, 10L));
closeChecker = new CloseChecker(conf, currentTime);
assertFalse(closeChecker.isTimeLimit(enableWrite, currentTime + 11));
assertFalse(closeChecker.isSizeLimit(enableWrite, 11L));
closeChecker = new CloseChecker(conf, currentTime);
assertTrue(closeChecker.isTimeLimit(disableWrite, currentTime + 11));
assertTrue(closeChecker.isSizeLimit(disableWrite, 11L));
for (int i = 0; i < 10; i++) {
int plusTime = 5 * i;
assertFalse(closeChecker.isTimeLimit(enableWrite, currentTime + plusTime));
assertFalse(closeChecker.isSizeLimit(enableWrite, 5L));
}
closeChecker = new CloseChecker(conf, currentTime);
assertFalse(closeChecker.isTimeLimit(disableWrite, currentTime + 6));
assertFalse(closeChecker.isSizeLimit(disableWrite, 6));
assertTrue(closeChecker.isTimeLimit(disableWrite, currentTime + 12));
assertTrue(closeChecker.isSizeLimit(disableWrite, 6));
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestFileSystemUtilizationChore method mockRegionWithSize.
/**
* Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each
* {@link Store} will have a reported size corresponding to the element in {@code storeSizes}.
*
* @param storeSizes A list of sizes for each Store.
* @return A mocked Region.
*/
private Region mockRegionWithSize(Collection<Long> storeSizes) {
final Region r = mock(Region.class);
final RegionInfo info = mock(RegionInfo.class);
when(r.getRegionInfo()).thenReturn(info);
List<Store> stores = new ArrayList<>();
when(r.getStores()).thenReturn((List) stores);
for (Long storeSize : storeSizes) {
final Store s = mock(Store.class);
stores.add(s);
when(s.getHFilesSize()).thenReturn(storeSize);
}
return r;
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestFileSystemUtilizationChore method mockRegionWithHFileLinks.
private Region mockRegionWithHFileLinks(Collection<Long> storeSizes, Collection<Long> hfileSizes) {
final Region r = mock(Region.class);
final RegionInfo info = mock(RegionInfo.class);
when(r.getRegionInfo()).thenReturn(info);
List<Store> stores = new ArrayList<>();
when(r.getStores()).thenReturn((List) stores);
assertEquals("Logic error, storeSizes and linkSizes must be equal in size", storeSizes.size(), hfileSizes.size());
Iterator<Long> storeSizeIter = storeSizes.iterator();
Iterator<Long> hfileSizeIter = hfileSizes.iterator();
while (storeSizeIter.hasNext() && hfileSizeIter.hasNext()) {
final long storeSize = storeSizeIter.next();
final long hfileSize = hfileSizeIter.next();
final Store s = mock(Store.class);
stores.add(s);
when(s.getStorefilesSize()).thenReturn(storeSize);
when(s.getHFilesSize()).thenReturn(hfileSize);
}
return r;
}
Aggregations