use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestCompactionArchiveIOException method setup.
@Before
public void setup() throws Exception {
testUtil = new HBaseTestingUtil();
testUtil.startMiniDFSCluster(1);
testDir = testUtil.getDataTestDirOnTestFS();
CommonFSUtils.setRootDir(testUtil.getConfiguration(), testDir);
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestGlobalReplicationThrottler method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1 = HBaseConfiguration.create();
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
conf1.setLong("replication.source.sleepforretries", 100);
// Each WAL is about 120 bytes
conf1.setInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, REPLICATION_SOURCE_QUOTA);
conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);
utility1 = new HBaseTestingUtil(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
new ZKWatcher(conf1, "cluster1", null, true);
conf2 = new Configuration(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
utility2 = new HBaseTestingUtil(conf2);
utility2.setZkCluster(miniZK);
new ZKWatcher(conf2, "cluster2", null, true);
ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()).build();
utility1.startMiniCluster();
utility2.startMiniCluster();
try (Connection connection = ConnectionFactory.createConnection(utility1.getConfiguration());
Admin admin1 = connection.getAdmin()) {
admin1.addReplicationPeer("peer1", rpc);
admin1.addReplicationPeer("peer2", rpc);
admin1.addReplicationPeer("peer3", rpc);
numOfPeer = admin1.listReplicationPeers().size();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class WALEntryStreamTestBase method startCluster.
protected static void startCluster() throws Exception {
TEST_UTIL = new HBaseTestingUtil();
CONF = TEST_UTIL.getConfiguration();
CONF.setLong("replication.source.sleepforretries", 10);
TEST_UTIL.startMiniDFSCluster(3);
cluster = TEST_UTIL.getDFSCluster();
fs = cluster.getFileSystem();
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestBlockReorderBlockLocation method setUp.
@Before
public void setUp() throws Exception {
htu = new HBaseTestingUtil();
// For the test with multiple blocks
htu.getConfiguration().setInt("dfs.blocksize", 1024);
htu.getConfiguration().setInt("dfs.replication", 3);
htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, new String[] { host1, host2, host3 });
conf = htu.getConfiguration();
cluster = htu.getDFSCluster();
dfs = (DistributedFileSystem) FileSystem.get(conf);
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestFileLink method testGetUnderlyingFSDataInputStream.
/**
* Test that the returned link from {@link FileLink#open(FileSystem)} can be unwrapped
* to a {@link HdfsDataInputStream} by
* {@link FileLink#getUnderlyingFileLinkInputStream(FSDataInputStream)}
*/
@Test
public void testGetUnderlyingFSDataInputStream() throws Exception {
HBaseTestingUtil testUtil = new HBaseTestingUtil();
Configuration conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
try {
MiniDFSCluster cluster = testUtil.getDFSCluster();
FileSystem fs = cluster.getFileSystem();
Path originalPath = new Path(testUtil.getDefaultRootDirPath(), "test.file");
writeSomeData(fs, originalPath, 256 << 20, (byte) 2);
List<Path> files = new ArrayList<Path>();
files.add(originalPath);
FileLink link = new FileLink(files);
FSDataInputStream stream = link.open(fs);
FSDataInputStream underlying = FileLink.getUnderlyingFileLinkInputStream(stream);
assertTrue(underlying instanceof HdfsDataInputStream);
} finally {
testUtil.shutdownMiniCluster();
}
}
Aggregations