use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class LocalTableStateTest method testCorrectRollback.
/**
* Test that we correctly rollback the state of keyvalue
* @throws Exception
*/
@Test
@SuppressWarnings("unchecked")
public void testCorrectRollback() throws Exception {
Put m = new Put(row);
m.add(fam, qual, ts, val);
// setup mocks
RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
Region region = Mockito.mock(Region.class);
Mockito.when(env.getRegion()).thenReturn(region);
RegionScanner scanner = Mockito.mock(RegionScanner.class);
Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
final byte[] stored = Bytes.toBytes("stored-value");
final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
storedKv.setSequenceId(2);
Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
list.add(storedKv);
return false;
}
});
LocalHBaseState state = new LocalTable(env);
LocalTableState table = new LocalTableState(state, m);
// add the kvs from the mutation
KeyValue kv = KeyValueUtil.ensureKeyValue(m.get(fam, qual).get(0));
kv.setSequenceId(0);
table.addPendingUpdates(kv);
// setup the lookup
ColumnReference col = new ColumnReference(fam, qual);
table.setCurrentTimestamp(ts);
// check that the value is there
Pair<CoveredDeleteScanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
Scanner s = p.getFirst();
assertEquals("Didn't get the pending mutation's value first", kv, s.next());
// rollback that value
table.rollback(Arrays.asList(kv));
p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
s = p.getFirst();
assertEquals("Didn't correctly rollback the row - still found it!", null, s.next());
Mockito.verify(env, Mockito.times(1)).getRegion();
Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class LocalTableStateTest method testCorrectOrderingWithLazyLoadingColumns.
@SuppressWarnings("unchecked")
@Test
public void testCorrectOrderingWithLazyLoadingColumns() throws Exception {
Put m = new Put(row);
m.add(fam, qual, ts, val);
// setup mocks
Configuration conf = new Configuration(false);
RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
Mockito.when(env.getConfiguration()).thenReturn(conf);
Region region = Mockito.mock(Region.class);
Mockito.when(env.getRegion()).thenReturn(region);
RegionScanner scanner = Mockito.mock(RegionScanner.class);
Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
final byte[] stored = Bytes.toBytes("stored-value");
Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
kv.setSequenceId(0);
list.add(kv);
return false;
}
});
LocalHBaseState state = new LocalTable(env);
LocalTableState table = new LocalTableState(state, m);
// add the kvs from the mutation
table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
// setup the lookup
ColumnReference col = new ColumnReference(fam, qual);
table.setCurrentTimestamp(ts);
// check that our value still shows up first on scan, even though this is a lazy load
Pair<CoveredDeleteScanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
Scanner s = p.getFirst();
assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next());
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class NonTxIndexBuilderTest method setup.
/**
* Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be
* called, where any read requests to
* {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test
* field 'currentRowCells'
*/
@Before
public void setup() throws Exception {
RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration(false);
conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
Mockito.when(env.getConfiguration()).thenReturn(conf);
// the following is used by LocalTable#getCurrentRowState()
Region mockRegion = Mockito.mock(Region.class);
Mockito.when(env.getRegion()).thenReturn(mockRegion);
Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer<RegionScanner>() {
@Override
public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
Scan sArg = (Scan) invocation.getArguments()[0];
TimeRange timeRange = sArg.getTimeRange();
return getMockTimeRangeRegionScanner(timeRange);
}
});
// the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
Mockito.when(env.getRegionInfo()).thenReturn(mockRegionInfo);
Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
Mockito.when(mockRegionInfo.getTable()).thenReturn(TableName.valueOf(TEST_TABLE_STRING));
mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation) Mockito.any())).thenReturn(true);
Mockito.when(mockIndexMetaData.getIndexMaintainers()).thenReturn(Collections.singletonList(getTestIndexMaintainer()));
indexBuilder = new NonTxIndexBuilder();
indexBuilder.setup(env);
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class UngroupedAggregateRegionObserver method rebuildIndices.
private RegionScanner rebuildIndices(final RegionScanner innerScanner, final Region region, final Scan scan, Configuration config) throws IOException {
byte[] indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
boolean useProto = true;
// for backward compatibility fall back to look up by the old attribute
if (indexMetaData == null) {
useProto = false;
indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
}
byte[] clientVersionBytes = scan.getAttribute(PhoenixIndexCodec.CLIENT_VERSION);
boolean hasMore;
int rowCount = 0;
try {
int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
long maxBatchSizeBytes = config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
MutationList mutations = new MutationList(maxBatchSize);
region.startRegionOperation();
byte[] uuidValue = ServerCacheClient.generateId();
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
Put put = null;
Delete del = null;
for (Cell cell : results) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
put.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMetaData);
put.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
put.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
put.setAttribute(PhoenixIndexCodec.CLIENT_VERSION, clientVersionBytes);
mutations.add(put);
// Since we're replaying existing mutations, it makes no sense to write them to the wal
put.setDurability(Durability.SKIP_WAL);
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
del.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMetaData);
del.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
del.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
del.setAttribute(PhoenixIndexCodec.CLIENT_VERSION, clientVersionBytes);
mutations.add(del);
// Since we're replaying existing mutations, it makes no sense to write them to the wal
del.setDurability(Durability.SKIP_WAL);
}
del.addDeleteMarker(cell);
}
}
if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
uuidValue = ServerCacheClient.generateId();
mutations.clear();
}
rowCount++;
}
} while (hasMore);
if (!mutations.isEmpty()) {
region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
}
}
} catch (IOException e) {
logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
throw e;
} finally {
region.closeRegionOperation();
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
@Override
public HRegionInfo getRegionInfo() {
return region.getRegionInfo();
}
@Override
public boolean isFilterDone() {
return true;
}
@Override
public void close() throws IOException {
innerScanner.close();
}
@Override
public boolean next(List<Cell> results) throws IOException {
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.
the class UngroupedAggregateRegionObserver method collectStats.
private RegionScanner collectStats(final RegionScanner innerScanner, StatisticsCollector stats, final Region region, final Scan scan, Configuration config) throws IOException {
StatsCollectionCallable callable = new StatsCollectionCallable(stats, region, innerScanner, config, scan);
byte[] asyncBytes = scan.getAttribute(BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB);
boolean async = false;
if (asyncBytes != null) {
async = Bytes.toBoolean(asyncBytes);
}
// in case of async, we report 0 as number of rows updated
long rowCount = 0;
StatisticsCollectionRunTracker statsRunTracker = StatisticsCollectionRunTracker.getInstance(config);
boolean runUpdateStats = statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo());
if (runUpdateStats) {
if (!async) {
rowCount = callable.call();
} else {
statsRunTracker.runTask(callable);
}
} else {
rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT;
logger.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region " + region.getRegionInfo().getRegionNameAsString());
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
@Override
public HRegionInfo getRegionInfo() {
return region.getRegionInfo();
}
@Override
public boolean isFilterDone() {
return true;
}
@Override
public void close() throws IOException {
// No-op because we want to manage closing of the inner scanner ourselves.
// This happens inside StatsCollectionCallable.
}
@Override
public boolean next(List<Cell> results) throws IOException {
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
}
Aggregations