use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitWhenRegionHoleExistsInMeta.
@Test(timeout = 120000)
public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
// Share connection. We were failing to find the table with our new reverse scan because it
// looks for first region, not any region -- that is how it works now. The below removes first
// region in test. Was reliant on the Connection caching having first region.
Connection connection = ConnectionFactory.createConnection(util.getConfiguration());
Table table = connection.getTable(tableName);
setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
Path dir = buildBulkFiles(tableName, 2);
final AtomicInteger countedLqis = new AtomicInteger();
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
if (lqis != null && lqis.getFirst() != null) {
countedLqis.addAndGet(lqis.getFirst().size());
}
return lqis;
}
};
// do bulkload when there is no region hole in hbase:meta.
try (Table t = connection.getTable(tableName);
RegionLocator locator = connection.getRegionLocator(tableName);
Admin admin = connection.getAdmin()) {
loader.doBulkLoad(dir, admin, t, locator);
} catch (Exception e) {
LOG.error("exeception=", e);
}
// check if all the data are loaded into the table.
this.assertExpectedTable(tableName, ROWCOUNT, 2);
dir = buildBulkFiles(tableName, 3);
// Mess it up by leaving a hole in the hbase:meta
List<HRegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
for (HRegionInfo regionInfo : regionInfos) {
if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
MetaTableAccessor.deleteRegion(connection, regionInfo);
break;
}
}
try (Table t = connection.getTable(tableName);
RegionLocator locator = connection.getRegionLocator(tableName);
Admin admin = connection.getAdmin()) {
loader.doBulkLoad(dir, admin, t, locator);
} catch (Exception e) {
LOG.error("exception=", e);
assertTrue("IOException expected", e instanceof IOException);
}
table.close();
// Make sure at least the one region that still exists can be found.
regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
assertTrue(regionInfos.size() >= 1);
this.assertExpectedTable(connection, tableName, ROWCOUNT, 2);
connection.close();
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestImportTsv method validateTable.
/**
* Confirm ImportTsv via data in online table.
*/
private static void validateTable(Configuration conf, TableName tableName, String family, int valueMultiplier, boolean isDryRun) throws IOException {
LOG.debug("Validating table.");
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName);
boolean verified = false;
long pause = conf.getLong("hbase.client.pause", 5 * 1000);
int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
for (int i = 0; i < numRetries; i++) {
try {
Scan scan = new Scan();
// Scan entire family.
scan.addFamily(Bytes.toBytes(family));
ResultScanner resScanner = table.getScanner(scan);
int numRows = 0;
for (Result res : resScanner) {
numRows++;
assertEquals(2, res.size());
List<Cell> kvs = res.listCells();
assertTrue(CellUtil.matchingRow(kvs.get(0), Bytes.toBytes("KEY")));
assertTrue(CellUtil.matchingRow(kvs.get(1), Bytes.toBytes("KEY")));
assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier)));
assertTrue(CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier)));
// Only one result set is expected, so let it loop.
}
if (isDryRun) {
assertEquals(0, numRows);
} else {
assertEquals(1, numRows);
}
verified = true;
break;
} catch (NullPointerException e) {
// If here, a cell was empty. Presume its because updates came in
// after the scanner had been opened. Wait a while and retry.
}
try {
Thread.sleep(pause);
} catch (InterruptedException e) {
// continue
}
}
table.close();
connection.close();
assertTrue(verified);
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestVisibilityLabels method testClearUserAuths.
@Test
public void testClearUserAuths() throws Throwable {
PrivilegedExceptionAction<Void> action = new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
String[] auths = { SECRET, CONFIDENTIAL, PRIVATE };
String user = "testUser";
try (Connection conn = ConnectionFactory.createConnection(conf)) {
VisibilityClient.setAuths(conn, auths, user);
} catch (Throwable e) {
fail("Should not have failed");
}
// Removing the auths for SECRET and CONFIDENTIAL for the user.
// Passing a non existing auth also.
auths = new String[] { SECRET, PUBLIC, CONFIDENTIAL };
VisibilityLabelsResponse response = null;
try (Connection conn = ConnectionFactory.createConnection(conf)) {
response = VisibilityClient.clearAuths(conn, auths, user);
} catch (Throwable e) {
fail("Should not have failed");
}
List<RegionActionResult> resultList = response.getResultList();
assertEquals(3, resultList.size());
assertTrue(resultList.get(0).getException().getValue().isEmpty());
assertEquals("org.apache.hadoop.hbase.DoNotRetryIOException", resultList.get(1).getException().getName());
assertTrue(Bytes.toString(resultList.get(1).getException().getValue().toByteArray()).contains("org.apache.hadoop.hbase.security.visibility.InvalidLabelException: " + "Label 'public' is not set for the user testUser"));
assertTrue(resultList.get(2).getException().getValue().isEmpty());
try (Connection connection = ConnectionFactory.createConnection(conf);
Table ht = connection.getTable(LABELS_TABLE_NAME)) {
ResultScanner scanner = ht.getScanner(new Scan());
Result result = null;
List<Result> results = new ArrayList<>();
while ((result = scanner.next()) != null) {
results.add(result);
}
List<String> curAuths = extractAuths(user, results);
assertTrue(curAuths.contains(PRIVATE));
assertEquals(1, curAuths.size());
}
GetAuthsResponse authsResponse = null;
try (Connection conn = ConnectionFactory.createConnection(conf)) {
authsResponse = VisibilityClient.getAuths(conn, user);
} catch (Throwable e) {
fail("Should not have failed");
}
List<String> authsList = new ArrayList<>(authsResponse.getAuthList().size());
for (ByteString authBS : authsResponse.getAuthList()) {
authsList.add(Bytes.toString(authBS.toByteArray()));
}
assertEquals(1, authsList.size());
assertTrue(authsList.contains(PRIVATE));
return null;
}
};
SUPERUSER.runAs(action);
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestVisibilityLabelsReplication method verifyGet.
protected void verifyGet(final byte[] row, final String visString, final int expected, final boolean nullExpected, final String... auths) throws IOException, InterruptedException {
PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf1);
Table table2 = connection.getTable(TABLE_NAME)) {
CellScanner cellScanner;
Cell current;
Get get = new Get(row);
get.setAuthorizations(new Authorizations(auths));
Result result = table2.get(get);
cellScanner = result.cellScanner();
boolean advance = cellScanner.advance();
if (nullExpected) {
assertTrue(!advance);
return null;
}
current = cellScanner.current();
assertArrayEquals(CellUtil.cloneRow(current), row);
for (Tag tag : TestCoprocessorForTagsAtSink.tags) {
LOG.info("The tag type is " + tag.getType());
}
assertEquals(expected, TestCoprocessorForTagsAtSink.tags.size());
Tag tag = TestCoprocessorForTagsAtSink.tags.get(1);
if (tag.getType() != NON_VIS_TAG_TYPE) {
assertEquals(TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, tag.getType());
}
tag = TestCoprocessorForTagsAtSink.tags.get(0);
boolean foundNonVisTag = false;
for (Tag t : TestCoprocessorForTagsAtSink.tags) {
if (t.getType() == NON_VIS_TAG_TYPE) {
assertEquals(TEMP, TagUtil.getValueAsString(t));
foundNonVisTag = true;
break;
}
}
doAssert(row, visString);
assertTrue(foundNonVisTag);
return null;
}
}
};
USER1.runAs(scanAction);
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestVisibilityLabelsReplication method setAuths.
public static void setAuths(final Configuration conf) throws Exception {
PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
public VisibilityLabelsResponse run() throws Exception {
try (Connection conn = ConnectionFactory.createConnection(conf)) {
return VisibilityClient.setAuths(conn, new String[] { SECRET, CONFIDENTIAL, PRIVATE, TOPSECRET, UNICODE_VIS_TAG }, "user1");
} catch (Throwable e) {
throw new Exception(e);
}
}
};
VisibilityLabelsResponse response = SUPERUSER.runAs(action);
}
Aggregations