use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestMultipleColumnPrefixFilter method testMultipleColumnPrefixFilterWithColumnPrefixFilter.
@Test
public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOException {
String family = "Family";
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");
long maxTimestamp = 2;
String valueString = "ValueString";
for (String row : rows) {
Put p = new Put(Bytes.toBytes(row));
p.setDurability(Durability.SKIP_WAL);
for (String column : columns) {
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString);
p.add(kv);
}
}
region.put(p);
}
MultipleColumnPrefixFilter multiplePrefixFilter;
Scan scan1 = new Scan();
scan1.setMaxVersions();
byte[][] filter_prefix = new byte[1][];
filter_prefix[0] = new byte[] { 'p' };
multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix);
scan1.setFilter(multiplePrefixFilter);
List<Cell> results1 = new ArrayList<>();
InternalScanner scanner1 = region.getScanner(scan1);
while (scanner1.next(results1)) ;
ColumnPrefixFilter singlePrefixFilter;
Scan scan2 = new Scan();
scan2.setMaxVersions();
singlePrefixFilter = new ColumnPrefixFilter(Bytes.toBytes("p"));
scan2.setFilter(singlePrefixFilter);
List<Cell> results2 = new ArrayList<>();
InternalScanner scanner2 = region.getScanner(scan1);
while (scanner2.next(results2)) ;
assertEquals(results1.size(), results2.size());
HBaseTestingUtility.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestMultipleColumnPrefixFilter method testMultipleColumnPrefixFilterWithManyFamilies.
@Test
public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException {
String family1 = "Family1";
String family2 = "Family2";
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
hcd1.setMaxVersions(3);
htd.addFamily(hcd1);
HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
hcd2.setMaxVersions(3);
htd.addFamily(hcd2);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");
long maxTimestamp = 3;
List<Cell> kvList = new ArrayList<>();
Map<String, List<Cell>> prefixMap = new HashMap<>();
prefixMap.put("p", new ArrayList<>());
prefixMap.put("q", new ArrayList<>());
prefixMap.put("s", new ArrayList<>());
String valueString = "ValueString";
for (String row : rows) {
Put p = new Put(Bytes.toBytes(row));
p.setDurability(Durability.SKIP_WAL);
for (String column : columns) {
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
double rand = Math.random();
Cell kv;
if (rand < 0.5)
kv = KeyValueTestUtil.create(row, family1, column, timestamp, valueString);
else
kv = KeyValueTestUtil.create(row, family2, column, timestamp, valueString);
p.add(kv);
kvList.add(kv);
for (String s : prefixMap.keySet()) {
if (column.startsWith(s)) {
prefixMap.get(s).add(kv);
}
}
}
}
region.put(p);
}
MultipleColumnPrefixFilter filter;
Scan scan = new Scan();
scan.setMaxVersions();
byte[][] filter_prefix = new byte[2][];
filter_prefix[0] = new byte[] { 'p' };
filter_prefix[1] = new byte[] { 'q' };
filter = new MultipleColumnPrefixFilter(filter_prefix);
scan.setFilter(filter);
List<Cell> results = new ArrayList<>();
InternalScanner scanner = region.getScanner(scan);
while (scanner.next(results)) ;
assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());
HBaseTestingUtility.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestInvocationRecordFilter method tearDown.
@After
public void tearDown() throws Exception {
WAL wal = ((HRegion) region).getWAL();
((HRegion) region).close();
wal.close();
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestHBaseFsckOneRS method testCleanUpDaughtersNotInMetaAfterFailedSplit.
@Test(timeout = 180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
createTable(TEST_UTIL, desc, null);
tbl = connection.getTable(desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
byte[] parentRegionName = regions.get(0).getRegionInfo().getRegionName();
cluster.getServerWith(parentRegionName);
// Create daughters without adding to META table
MasterProcedureEnv env = cluster.getMaster().getMasterProcedureExecutor().getEnvironment();
SplitTableRegionProcedure splitR = new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("r3"));
splitR.prepareSplitRegion(env);
splitR.setRegionStateToSplitting(env);
splitR.closeParentRegionForSplit(env);
splitR.createDaughterRegions(env);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
for (RegionState state : am.getRegionStates().getRegionsInTransition()) {
am.regionOffline(state.getRegion());
}
Map<HRegionInfo, ServerName> regionsMap = new HashMap<>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(tableName).size());
// fix hole
assertErrors(doFsck(conf, false, true, false, false, false, false, false, false, false, false, false, false, null), new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(tableName);
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestIntraRowPagination method testScanLimitAndOffset.
/**
* Test from client side for scan with maxResultPerCF set
*
* @throws Exception
*/
@Test
public void testScanLimitAndOffset() throws Exception {
//byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
byte[][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
byte[][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
byte[][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES));
HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE, null, null, false);
for (byte[] family : FAMILIES) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.addFamily(hcd);
}
HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
try {
Put put;
Scan scan;
Result result;
boolean toLog = true;
List<Cell> kvListExp = new ArrayList<>();
int storeOffset = 1;
int storeLimit = 3;
for (int r = 0; r < ROWS.length; r++) {
put = new Put(ROWS[r]);
for (int c = 0; c < FAMILIES.length; c++) {
for (int q = 0; q < QUALIFIERS.length; q++) {
KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1, HTestConst.DEFAULT_VALUE_BYTES);
put.add(kv);
if (storeOffset <= q && q < storeOffset + storeLimit) {
kvListExp.add(kv);
}
}
}
region.put(put);
}
scan = new Scan();
scan.setRowOffsetPerColumnFamily(storeOffset);
scan.setMaxResultsPerColumnFamily(storeLimit);
RegionScanner scanner = region.getScanner(scan);
List<Cell> kvListScan = new ArrayList<>();
List<Cell> results = new ArrayList<>();
while (scanner.next(results) || !results.isEmpty()) {
kvListScan.addAll(results);
results.clear();
}
result = Result.create(kvListScan);
TestScannersFromClientSide.verifyResult(result, kvListExp, toLog, "Testing scan with storeOffset and storeLimit");
} finally {
HBaseTestingUtility.closeRegionAndWAL(region);
}
}
Aggregations