use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method addLabels.
public static void addLabels() throws Exception {
PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
@Override
public VisibilityLabelsResponse run() throws Exception {
String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE };
try (Connection conn = ConnectionFactory.createConnection(conf)) {
VisibilityClient.addLabels(conn, labels);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(action);
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing.
@Test
public void testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing() throws Exception {
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
Admin hBaseAdmin = TEST_UTIL.getAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(colDesc);
hBaseAdmin.createTable(desc);
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
Put put1 = new Put(Bytes.toBytes("row1"));
put1.addColumn(fam, qual, 123l, value);
put1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
Put put2 = new Put(Bytes.toBytes("row1"));
put2.addColumn(fam, qual, 123l, value1);
put2.setCellVisibility(new CellVisibility(SECRET));
table.put(createList(put1, put2));
Scan s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(CONFIDENTIAL, SECRET));
ResultScanner scanner = table.getScanner(s);
assertEquals(scanner.next(3).length, 1);
scanner.close();
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
d.addColumn(fam, qual, 123l);
table.delete(d);
}
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility(SECRET));
d.addColumn(fam, qual, 123l);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(CONFIDENTIAL));
scanner = table.getScanner(s);
assertEquals(scanner.next(3).length, 0);
scanner.close();
}
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestHFileOutputFormat2 method testExcludeAllFromMinorCompaction.
/**
* This test is to test the scenario happened in HBASE-6901.
* All files are bulk loaded and excluded from minor compaction.
* Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
* will be thrown.
*/
@Ignore("Flakey: See HBASE-9051")
@Test
public void testExcludeAllFromMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
util.startMiniCluster();
try (Connection conn = ConnectionFactory.createConnection();
Admin admin = conn.getAdmin();
Table table = util.createTable(TABLE_NAME, FAMILIES);
RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
final FileSystem fs = util.getDFSCluster().getFileSystem();
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = new Path(FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(), Bytes.toString(FAMILIES[0])));
assertEquals(0, fs.listStatus(storePath).length);
// Generate two bulk load files
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
for (int i = 0; i < 2; i++) {
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
runIncrementalPELoad(conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME), testDir, false);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
}
// Ensure data shows up
int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME);
try {
quickPoll(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
List<HRegion> regions = util.getMiniHBaseCluster().getRegions(TABLE_NAME);
for (HRegion region : regions) {
for (Store store : region.getStores()) {
store.closeAndArchiveCompactedFiles();
}
}
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME);
quickPoll(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
List<HRegion> regions = util.getMiniHBaseCluster().getRegions(TABLE_NAME);
for (HRegion region : regions) {
for (Store store : region.getStores()) {
store.closeAndArchiveCompactedFiles();
}
}
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestHFileOutputFormat2 method testExcludeMinorCompaction.
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testExcludeMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
util.startMiniCluster();
try (Connection conn = ConnectionFactory.createConnection(conf);
Admin admin = conn.getAdmin()) {
Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
final FileSystem fs = util.getDFSCluster().getFileSystem();
Table table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = new Path(FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(), Bytes.toString(FAMILIES[0])));
assertEquals(0, fs.listStatus(storePath).length);
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.addColumn(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME);
assertEquals(1, util.countRows(table));
quickPoll(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
// Generate a bulk load file with more rows
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME);
runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir, false);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);
// Ensure data shows up
int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows + 1, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME);
try {
quickPoll(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME);
quickPoll(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testSplitTmpFileCleanUp.
/**
* This test creates a table with many small regions. The bulk load files
* would be splitted multiple times before all of them can be loaded successfully.
*/
@Test(timeout = 120000)
public void testSplitTmpFileCleanUp() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"), Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"), Bytes.toBytes("row_00000050") };
try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
setupTableWithSplitkeys(table, 10, SPLIT_KEYS);
LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
// create HFiles
Path bulk = buildBulkFiles(table, 2);
try (Table t = connection.getTable(table);
RegionLocator locator = connection.getRegionLocator(table);
Admin admin = connection.getAdmin()) {
lih.doBulkLoad(bulk, admin, t, locator);
}
// family path
Path tmpPath = new Path(bulk, family(0));
// TMP_DIR under family path
tmpPath = new Path(tmpPath, LoadIncrementalHFiles.TMP_DIR);
FileSystem fs = bulk.getFileSystem(util.getConfiguration());
// HFiles have been splitted, there is TMP_DIR
assertTrue(fs.exists(tmpPath));
// TMP_DIR should have been cleaned-up
assertNull(LoadIncrementalHFiles.TMP_DIR + " should be empty.", FSUtils.listStatus(fs, tmpPath));
assertExpectedTable(connection, table, ROWCOUNT, 2);
}
}
Aggregations