use of org.apache.hadoop.hbase.HBaseTestingUtility in project hadoop by apache.
the class TestHBaseStorageFlowRunCompaction method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
conf.setInt("hfile.format.version", 3);
util.startMiniCluster();
createSchema();
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestClientOperationInterrupt method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = HBaseConfiguration.create();
conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, TestCoprocessor.class.getName());
util = new HBaseTestingUtility(conf);
util.startMiniCluster();
Admin admin = util.getAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
Table ht = util.createTable(tableName, new byte[][] { dummy, test });
Put p = new Put(row1);
p.addColumn(dummy, dummy, dummy);
ht.put(p);
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestCacheConfig method testFileBucketCacheConfig.
@Test
public void testFileBucketCacheConfig() throws IOException {
HBaseTestingUtility htu = new HBaseTestingUtility(this.conf);
try {
Path p = new Path(htu.getDataTestDir(), "bc.txt");
FileSystem fs = FileSystem.get(this.conf);
fs.create(p).close();
this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p);
doBucketCacheConfigTest();
} finally {
htu.cleanupTestDir();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestFileLink method testHDFSLinkReadDuringDelete.
/**
* Test that link is still readable even when the current file gets deleted.
*
* NOTE: This test is valid only on HDFS.
* When a file is deleted from a local file-system, it is simply 'unlinked'.
* The inode, which contains the file's data, is not deleted until all
* processes have finished with it.
* In HDFS when the request exceed the cached block locations,
* a query to the namenode is performed, using the filename,
* and the deleted file doesn't exists anymore (FileNotFoundException).
*/
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
HBaseTestingUtility testUtil = new HBaseTestingUtility();
Configuration conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
MiniDFSCluster cluster = testUtil.getDFSCluster();
FileSystem fs = cluster.getFileSystem();
assertEquals("hdfs", fs.getUri().getScheme());
try {
List<Path> files = new ArrayList<>();
for (int i = 0; i < 3; i++) {
Path path = new Path(String.format("test-data-%d", i));
writeSomeData(fs, path, 1 << 20, (byte) i);
files.add(path);
}
FileLink link = new FileLink(files);
FSDataInputStream in = link.open(fs);
try {
byte[] data = new byte[8192];
int n;
// Switch to file 1
n = in.read(data);
dataVerify(data, n, (byte) 0);
fs.delete(files.get(0), true);
skipBuffer(in, (byte) 0);
// Switch to file 2
n = in.read(data);
dataVerify(data, n, (byte) 1);
fs.delete(files.get(1), true);
skipBuffer(in, (byte) 1);
// Switch to file 3
n = in.read(data);
dataVerify(data, n, (byte) 2);
fs.delete(files.get(2), true);
skipBuffer(in, (byte) 2);
// No more files available
try {
n = in.read(data);
assert (n <= 0);
} catch (FileNotFoundException e) {
assertTrue(true);
}
} finally {
in.close();
}
} finally {
testUtil.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.
the class TestFileLink method testHDFSLinkReadDuringRename.
/**
* Test, on HDFS, that the FileLink is still readable
* even when the current file gets renamed.
*/
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
HBaseTestingUtility testUtil = new HBaseTestingUtility();
Configuration conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
MiniDFSCluster cluster = testUtil.getDFSCluster();
FileSystem fs = cluster.getFileSystem();
assertEquals("hdfs", fs.getUri().getScheme());
try {
testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
} finally {
testUtil.shutdownMiniCluster();
}
}
Aggregations