use of org.apache.hadoop.hbase.regionserver.Store in project Gaffer by gchq.
the class GafferCoprocessorTest method shouldDelegatePreFlushToStoreScanner.
@Test
public void shouldDelegatePreFlushToStoreScanner() throws IOException {
// Given
final ObserverContext<RegionCoprocessorEnvironment> e = mock(ObserverContext.class);
final Store store = mock(Store.class);
final InternalScanner scanner = mock(InternalScanner.class);
// When
final StoreScanner storeScanner = (StoreScanner) coprocessor.preFlush(e, store, scanner);
// Then
assertNotNull(storeScanner);
}
use of org.apache.hadoop.hbase.regionserver.Store in project Gaffer by gchq.
the class GafferCoprocessorTest method shouldDelegatePreCompactToStoreScanner.
@Test
public void shouldDelegatePreCompactToStoreScanner() throws IOException {
// Given
final ObserverContext<RegionCoprocessorEnvironment> e = mock(ObserverContext.class);
final Store store = mock(Store.class);
final InternalScanner scanner = mock(InternalScanner.class);
// When
final StoreScanner storeScanner = (StoreScanner) coprocessor.preCompact(e, store, scanner, ScanType.COMPACT_DROP_DELETES);
// Then
assertNotNull(storeScanner);
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestDateTieredCompactor method createCompactor.
private DateTieredCompactor createCompactor(StoreFileWritersCapture writers, final KeyValue[] input, List<StoreFile> storefiles) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
final Scanner scanner = new Scanner(input);
// Create store mock that is satisfactory for compactor.
HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
final Store store = mock(Store.class);
when(store.getStorefiles()).thenReturn(storefiles);
when(store.getFamily()).thenReturn(col);
when(store.getScanInfo()).thenReturn(si);
when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
long maxSequenceId = StoreFile.getMaxSequenceIdInList(storefiles);
when(store.getMaxSequenceId()).thenReturn(maxSequenceId);
return new DateTieredCompactor(conf, store) {
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
return scanner;
}
@Override
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
return scanner;
}
};
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestZooKeeperTableArchiveClient method loadFlushAndCompact.
private void loadFlushAndCompact(Region region, byte[] family) throws IOException {
// create two hfiles in the region
createHFileInRegion(region, family);
createHFileInRegion(region, family);
Store s = region.getStore(family);
int count = s.getStorefilesCount();
assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count, count >= 2);
// compact the two files into one file to get files in the archive
LOG.debug("Compacting stores");
region.compact(true);
}
use of org.apache.hadoop.hbase.regionserver.Store in project phoenix by apache.
the class IndexRegionSplitPolicy method getSplitPoint.
@Override
protected byte[] getSplitPoint() {
byte[] oldSplitPoint = super.getSplitPoint();
if (oldSplitPoint == null)
return null;
List<Store> stores = region.getStores();
byte[] splitPointFromLargestStore = null;
long largestStoreSize = 0;
boolean isLocalIndexKey = false;
for (Store s : stores) {
if (s.getFamily().getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
byte[] splitPoint = s.getSplitPoint();
if (oldSplitPoint != null && splitPoint != null && Bytes.compareTo(oldSplitPoint, splitPoint) == 0) {
isLocalIndexKey = true;
}
}
}
if (!isLocalIndexKey)
return oldSplitPoint;
for (Store s : stores) {
if (!s.getFamily().getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
byte[] splitPoint = s.getSplitPoint();
long storeSize = s.getSize();
if (splitPoint != null && largestStoreSize < storeSize) {
splitPointFromLargestStore = splitPoint;
largestStoreSize = storeSize;
}
}
}
return splitPointFromLargestStore;
}
Aggregations