use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestFlushWithThroughputController method setUp.
@Before
public void setUp() {
hbtu = new HBaseTestingUtil();
tableName = TableName.valueOf("Table-" + testName.getMethodName());
hbtu.getConfiguration().set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, PressureAwareFlushThroughputController.class.getName());
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestInitializeStoreFileTracker method setUp.
@Before
public void setUp() throws Exception {
conf = HBaseConfiguration.create();
// Speed up the launch of RollingUpgradeChore
conf.setInt(RollingUpgradeChore.ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY, 1);
conf.setLong(RollingUpgradeChore.ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY, 1);
// Set the default implementation to file instead of default, to confirm we will not set SFT to
// file
conf.set(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.FILE.name());
HTU = new HBaseTestingUtil(conf);
HTU.startMiniCluster();
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestSCVFWithMiniCluster method setUp.
@BeforeClass
public static void setUp() throws Exception {
HBaseTestingUtil util = new HBaseTestingUtil();
util.startMiniCluster(1);
Admin admin = util.getAdmin();
destroy(admin, HBASE_TABLE_NAME);
create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B);
admin.close();
htable = util.getConnection().getTable(HBASE_TABLE_NAME);
/* Add some values */
List<Put> puts = new ArrayList<>();
/* Add a row with 'a:foo' = false */
Put put = new Put(Bytes.toBytes("1"));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("false"));
put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
puts.add(put);
/* Add a row with 'a:foo' = true */
put = new Put(Bytes.toBytes("2"));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("true"));
put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
puts.add(put);
/* Add a row with 'a:foo' qualifier not set */
put = new Put(Bytes.toBytes("3"));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
puts.add(put);
htable.put(puts);
/*
* We want to filter out from the scan all rows that do not have the column 'a:foo' with value
* 'false'. Only row with key '1' should be returned in the scan.
*/
scanFilter = new SingleColumnValueFilter(FAMILY_A, QUALIFIER_FOO, CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("false")));
((SingleColumnValueFilter) scanFilter).setFilterIfMissing(true);
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestResettingCounters method testResettingCounters.
@Test
public void testResettingCounters() throws Exception {
HBaseTestingUtil htu = new HBaseTestingUtil();
Configuration conf = htu.getConfiguration();
FileSystem fs = FileSystem.get(conf);
byte[] table = Bytes.toBytes(name.getMethodName());
byte[][] families = new byte[][] { Bytes.toBytes("family1"), Bytes.toBytes("family2"), Bytes.toBytes("family3") };
int numQualifiers = 10;
byte[][] qualifiers = new byte[numQualifiers][];
for (int i = 0; i < numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i);
int numRows = 10;
byte[][] rows = new byte[numRows][];
for (int i = 0; i < numRows; i++) rows[i] = Bytes.toBytes("r" + i);
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(table));
for (byte[] family : families) {
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
}
TableDescriptor tableDescriptor = builder.build();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
Path path = new Path(testDir);
if (fs.exists(path)) {
if (!fs.delete(path, true)) {
throw new IOException("Failed delete of " + path);
}
}
HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, path, conf, tableDescriptor);
try {
Increment odd = new Increment(rows[0]);
odd.setDurability(Durability.SKIP_WAL);
Increment even = new Increment(rows[0]);
even.setDurability(Durability.SKIP_WAL);
Increment all = new Increment(rows[0]);
all.setDurability(Durability.SKIP_WAL);
for (int i = 0; i < numQualifiers; i++) {
if (i % 2 == 0)
even.addColumn(families[0], qualifiers[i], 1);
else
odd.addColumn(families[0], qualifiers[i], 1);
all.addColumn(families[0], qualifiers[i], 1);
}
// increment odd qualifiers 5 times and flush
for (int i = 0; i < 5; i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE);
region.flush(true);
// increment even qualifiers 5 times
for (int i = 0; i < 5; i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE);
// increment all qualifiers, should have value=6 for all
Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE);
assertEquals(numQualifiers, result.size());
Cell[] kvs = result.rawCells();
for (int i = 0; i < kvs.length; i++) {
System.out.println(kvs[i].toString());
assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i]));
assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
}
} finally {
HBaseTestingUtil.closeRegionAndWAL(region);
}
HBaseTestingUtil.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestSecureBulkloadListener method setUp.
@Before
public void setUp() throws Exception {
random.nextBytes(randomBytes);
htu = new HBaseTestingUtil();
// For the test with multiple blocks
htu.getConfiguration().setInt("dfs.blocksize", 1024);
htu.getConfiguration().setInt("dfs.replication", 3);
htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, new String[] { host1, host2, host3 });
conf = htu.getConfiguration();
cluster = htu.getDFSCluster();
dfs = (DistributedFileSystem) FileSystem.get(conf);
}
Aggregations