use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestAccessControlFilter method doQualifierAccess.
private void doQualifierAccess(final Table table) throws Exception {
// set permissions
SecureTestUtil.grantOnTable(TEST_UTIL, READER.getShortName(), TABLE, null, null, Permission.Action.READ);
SecureTestUtil.grantOnTable(TEST_UTIL, LIMITED.getShortName(), TABLE, FAMILY, PUBLIC_COL, Permission.Action.READ);
// put some test data
List<Put> puts = new ArrayList<>(100);
for (int i = 0; i < 100; i++) {
Put p = new Put(Bytes.toBytes(i));
p.addColumn(FAMILY, PRIVATE_COL, Bytes.toBytes("secret " + i));
p.addColumn(FAMILY, PUBLIC_COL, Bytes.toBytes("info " + i));
puts.add(p);
}
table.put(puts);
// test read
READER.runAs(new PrivilegedExceptionAction<Object>() {
public Object run() throws Exception {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
// force a new RS connection
conf.set("testkey", UUID.randomUUID().toString());
Connection connection = ConnectionFactory.createConnection(conf);
Table t = connection.getTable(TABLE);
try {
ResultScanner rs = t.getScanner(new Scan());
int rowcnt = 0;
for (Result r : rs) {
rowcnt++;
int rownum = Bytes.toInt(r.getRow());
assertTrue(r.containsColumn(FAMILY, PRIVATE_COL));
assertEquals("secret " + rownum, Bytes.toString(r.getValue(FAMILY, PRIVATE_COL)));
assertTrue(r.containsColumn(FAMILY, PUBLIC_COL));
assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL)));
}
assertEquals("Expected 100 rows returned", 100, rowcnt);
return null;
} finally {
t.close();
connection.close();
}
}
});
// test read with qualifier filter
LIMITED.runAs(new PrivilegedExceptionAction<Object>() {
public Object run() throws Exception {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
// force a new RS connection
conf.set("testkey", UUID.randomUUID().toString());
Connection connection = ConnectionFactory.createConnection(conf);
Table t = connection.getTable(TABLE);
try {
ResultScanner rs = t.getScanner(new Scan());
int rowcnt = 0;
for (Result r : rs) {
rowcnt++;
int rownum = Bytes.toInt(r.getRow());
assertFalse(r.containsColumn(FAMILY, PRIVATE_COL));
assertTrue(r.containsColumn(FAMILY, PUBLIC_COL));
assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL)));
}
assertEquals("Expected 100 rows returned", 100, rowcnt);
return null;
} finally {
t.close();
connection.close();
}
}
});
// test as user with no permission
DENIED.runAs(new PrivilegedExceptionAction<Object>() {
public Object run() throws Exception {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
// force a new RS connection
conf.set("testkey", UUID.randomUUID().toString());
Connection connection = ConnectionFactory.createConnection(conf);
Table t = connection.getTable(TABLE);
try {
ResultScanner rs = t.getScanner(new Scan());
int rowcnt = 0;
for (Result r : rs) {
rowcnt++;
int rownum = Bytes.toInt(r.getRow());
assertFalse(r.containsColumn(FAMILY, PRIVATE_COL));
assertTrue(r.containsColumn(FAMILY, PUBLIC_COL));
assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL)));
}
assertEquals("Expected 0 rows returned", 0, rowcnt);
return null;
} finally {
t.close();
connection.close();
}
}
});
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestReplicationSink method testBatchSink.
/**
* Insert a whole batch of entries
* @throws Exception
*/
@Test
public void testBatchSink() throws Exception {
List<WALEntry> entries = new ArrayList<>(BATCH_SIZE);
List<Cell> cells = new ArrayList<>();
for (int i = 0; i < BATCH_SIZE; i++) {
entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
Scan scan = new Scan();
ResultScanner scanRes = table1.getScanner(scan);
assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length);
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestReplicationSink method testMixedPutDelete.
/**
* Insert a mix of puts and deletes
* @throws Exception
*/
@Test
public void testMixedPutDelete() throws Exception {
List<WALEntry> entries = new ArrayList<>(BATCH_SIZE / 2);
List<Cell> cells = new ArrayList<>();
for (int i = 0; i < BATCH_SIZE / 2; i++) {
entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
entries = new ArrayList<>(BATCH_SIZE);
cells = new ArrayList<>();
for (int i = 0; i < BATCH_SIZE; i++) {
entries.add(createEntry(TABLE_NAME1, i, i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
Scan scan = new Scan();
ResultScanner scanRes = table1.getScanner(scan);
assertEquals(BATCH_SIZE / 2, scanRes.next(BATCH_SIZE).length);
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestReplicationSink method testReplicateEntriesForHFiles.
/**
* Test replicateEntries with a bulk load entry for 25 HFiles
*/
@Test
public void testReplicateEntriesForHFiles() throws Exception {
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
int numRows = 10;
List<Path> p = new ArrayList<>(1);
// 1. Generate 25 hfile ranges
Random rng = new SecureRandom();
Set<Integer> numbers = new HashSet<>();
while (numbers.size() < 50) {
numbers.add(rng.nextInt(1000));
}
List<Integer> numberList = new ArrayList<>(numbers);
Collections.sort(numberList);
Map<String, Long> storeFilesSize = new HashMap<>(1);
// 2. Create 25 hfiles
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = dir.getFileSystem(conf);
Iterator<Integer> numbersItr = numberList.iterator();
for (int i = 0; i < 25; i++) {
Path hfilePath = new Path(familyDir, "hfile_" + i);
HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
p.add(hfilePath);
storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
}
// 3. Create a BulkLoadDescriptor and a WALEdit
Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
storeFiles.put(FAM_NAME1, p);
WALEdit edit = null;
WALProtos.BulkLoadDescriptor loadDescriptor = null;
try (Connection c = ConnectionFactory.createConnection(conf);
RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
}
List<WALEntry> entries = new ArrayList<>(1);
// 4. Create a WALEntryBuilder
WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
// 5. Copy the hfile to the path as it is in reality
for (int i = 0; i < 25; i++) {
String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString();
String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
}
entries.add(builder.build());
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 6. Assert no existing data in table
assertEquals(0, scanner.next(numRows).length);
}
// 7. Replicate the bulk loaded entry
SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 8. Assert data is replicated
assertEquals(numRows, scanner.next(numRows).length);
}
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestSerialReplication method testRegionMerge.
@Test
public void testRegionMerge() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
table.addFamily(fam);
utility1.getAdmin().createTable(table);
utility2.getAdmin().createTable(table);
Threads.sleep(5000);
utility1.getAdmin().split(tableName, ROWS[50]);
waitTableHasRightNumberOfRegions(tableName, 2);
try (Table t1 = utility1.getConnection().getTable(tableName);
Table t2 = utility2.getConnection().getTable(tableName)) {
for (int i = 10; i < 100; i += 10) {
Put put = new Put(ROWS[i]);
put.addColumn(famName, VALUE, VALUE);
t1.put(put);
}
List<Pair<HRegionInfo, ServerName>> regions = MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), tableName);
utility1.getAdmin().mergeRegionsAsync(regions.get(0).getFirst().getRegionName(), regions.get(1).getFirst().getRegionName(), true);
waitTableHasRightNumberOfRegions(tableName, 1);
for (int i = 11; i < 100; i += 10) {
Put put = new Put(ROWS[i]);
put.addColumn(famName, VALUE, VALUE);
t1.put(put);
}
long start = EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() - start < 180000) {
Scan scan = new Scan();
scan.setCaching(100);
List<Cell> list = new ArrayList<>();
try (ResultScanner results = t2.getScanner(scan)) {
for (Result result : results) {
assertEquals(1, result.rawCells().length);
list.add(result.rawCells()[0]);
}
}
List<Integer> listOfNumbers = getRowNumbers(list);
List<Integer> list0 = new ArrayList<>();
List<Integer> list1 = new ArrayList<>();
for (int num : listOfNumbers) {
if (num % 10 == 0) {
list0.add(num);
} else {
list1.add(num);
}
}
LOG.info(Arrays.toString(list0.toArray()));
LOG.info(Arrays.toString(list1.toArray()));
assertIntegerList(list1, 11, 10);
if (!list1.isEmpty()) {
assertEquals(9, list0.size());
}
if (list.size() == 18) {
return;
}
LOG.info("Waiting all logs pushed to slave. Expected 18 , actual " + list.size());
Thread.sleep(200);
}
}
}
Aggregations