use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestRegionObserverScannerOpenHook method testRegionObserverScanTimeStacking.
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtil().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells());
HBaseTestingUtil.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestServerRemoteProcedure method setUp.
@Before
public void setUp() throws Exception {
util = new HBaseTestingUtil();
this.executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build());
master = new MockMasterServices(util.getConfiguration(), this.regionsToRegionServers);
rsDispatcher = new MockRSProcedureDispatcher(master);
rsDispatcher.setMockRsExecutor(new NoopRSExecutor());
master.start(2, rsDispatcher);
am = master.getAssignmentManager();
master.getServerManager().getOnlineServersList().stream().forEach(serverName -> am.getRegionStates().getOrCreateServer(serverName));
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHRegion method testgetHDFSBlocksDistribution.
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
HBaseTestingUtil htu = new HBaseTestingUtil();
// Why do we set the block size in this test? If we set it smaller than the kvs, then we'll
// break up the file in to more pieces that can be distributed across the three nodes and we
// won't be able to have the condition this test asserts; that at least one node has
// a copy of all replicas -- if small block size, then blocks are spread evenly across the
// the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack.
// final int DEFAULT_BLOCK_SIZE = 1024;
// htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setInt("dfs.replication", 2);
// set up a cluster with 3 nodes
SingleProcessHBaseCluster cluster = null;
String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
int regionServersCount = 3;
try {
StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build();
cluster = htu.startMiniCluster(option);
byte[][] families = { fam1, fam2 };
Table ht = htu.createTable(tableName, families);
// Setting up region
byte[] row = Bytes.toBytes("row1");
byte[] col = Bytes.toBytes("col1");
Put put = new Put(row);
put.addColumn(fam1, col, 1, Bytes.toBytes("test1"));
put.addColumn(fam2, col, 1, Bytes.toBytes("test2"));
ht.put(put);
HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
firstRegion.flush(true);
HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
// Given the default replication factor is 2 and we have 2 HFiles,
// we will have total of 4 replica of blocks on 3 datanodes; thus there
// must be at least one host that have replica for 2 HFiles. That host's
// weight will be equal to the unique block weight.
long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
StringBuilder sb = new StringBuilder();
for (String host : blocksDistribution1.getTopHosts()) {
if (sb.length() > 0)
sb.append(", ");
sb.append(host);
sb.append("=");
sb.append(blocksDistribution1.getWeight(host));
}
String topHost = blocksDistribution1.getTopHosts().get(0);
long topHostWeight = blocksDistribution1.getWeight(topHost);
String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
LOG.info(msg);
assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
// use the static method to compute the value, it should be the same.
// static method is used by load balancer or other components
HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo());
long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();
assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);
ht.close();
} finally {
if (cluster != null) {
htu.shutdownMiniCluster();
}
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestHRegionFileSystem method testBlockStoragePolicy.
@Test
public void testBlockStoragePolicy() throws Exception {
TEST_UTIL = new HBaseTestingUtil();
Configuration conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniCluster();
Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
// the original block storage policy would be HOT
String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertEquals("HOT", spA);
assertEquals("HOT", spB);
// Recreate table and make sure storage policy could be set through configuration
TEST_UTIL.shutdownMiniCluster();
TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
TEST_UTIL.startMiniCluster();
table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertEquals("WARM", spA);
assertEquals("WARM", spB);
// alter table cf schema to change storage policies
// and make sure it could override settings in conf
ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]);
// alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
admin.modifyColumnFamily(TABLE_NAME, cfdA.build());
while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
Thread.sleep(200);
LOG.debug("Waiting on table to finish schema altering");
}
// alter through HColumnDescriptor#setStoragePolicy
ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]);
cfdB.setStoragePolicy("ALL_SSD");
admin.modifyColumnFamily(TABLE_NAME, cfdB.build());
while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
Thread.sleep(200);
LOG.debug("Waiting on table to finish schema altering");
}
spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertNotNull(spA);
assertEquals("ONE_SSD", spA);
assertNotNull(spB);
assertEquals("ALL_SSD", spB);
// flush memstore snapshot into 3 files
for (long i = 0; i < 3; i++) {
Put put = new Put(Bytes.toBytes(i));
put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i));
table.put(put);
admin.flush(TABLE_NAME);
}
// there should be 3 files in store dir
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0]));
FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath);
assertNotNull(storeFiles);
assertEquals(3, storeFiles.length);
// store temp dir still exists but empty
Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0]));
assertTrue(fs.exists(storeTempDir));
FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir);
assertNull(tempFiles);
// storage policy of cf temp dir and 3 store files should be ONE_SSD
assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir));
for (FileStatus status : storeFiles) {
assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath()));
}
// change storage policies by calling raw api directly
regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD");
regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD");
spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertNotNull(spA);
assertEquals("ALL_SSD", spA);
assertNotNull(spB);
assertEquals("ONE_SSD", spB);
} finally {
table.close();
TEST_UTIL.deleteTable(TABLE_NAME);
TEST_UTIL.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestInputStreamBlockDistribution method setUp.
@Before
public void setUp() throws Exception {
HBaseTestingUtil testUtil = new HBaseTestingUtil();
conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
MiniDFSCluster cluster = testUtil.getDFSCluster();
fs = cluster.getFileSystem();
testPath = new Path(testUtil.getDefaultRootDirPath(), "test.file");
writeSomeData(fs, testPath, 256 << 20, (byte) 2);
}
Aggregations