use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method doPuts.
private Table doPuts(TableName tableName) throws IOException, InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException {
Admin hBaseAdmin = TEST_UTIL.getAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(colDesc);
hBaseAdmin.createTable(desc);
List<Put> puts = new ArrayList<>(5);
Put put = new Put(Bytes.toBytes("row1"));
put.addColumn(fam, qual, 123l, value);
put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
puts.add(put);
put = new Put(Bytes.toBytes("row1"));
put.addColumn(fam, qual, 124l, value);
put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")"));
puts.add(put);
put = new Put(Bytes.toBytes("row1"));
put.addColumn(fam, qual, 125l, value);
put.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET));
puts.add(put);
put = new Put(Bytes.toBytes("row1"));
put.addColumn(fam, qual, 126l, value);
put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")"));
puts.add(put);
put = new Put(Bytes.toBytes("row1"));
put.addColumn(fam, qual, 127l, value);
put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")"));
puts.add(put);
TEST_UTIL.getAdmin().flush(tableName);
put = new Put(Bytes.toBytes("row2"));
put.addColumn(fam, qual, 127l, value);
put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")"));
puts.add(put);
Table table = TEST_UTIL.getConnection().getTable(tableName);
table.put(puts);
return table;
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method testDeletesWithoutAndWithVisibilityLabels.
@Test
public void testDeletesWithoutAndWithVisibilityLabels() throws Exception {
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
Admin hBaseAdmin = TEST_UTIL.getAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(colDesc);
hBaseAdmin.createTable(desc);
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
Put put = new Put(row1);
put.addColumn(fam, qual, value);
put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
table.put(put);
Delete d = new Delete(row1);
// without visibility
d.addColumn(fam, qual);
table.delete(d);
PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Scan s = new Scan();
ResultScanner scanner = table.getScanner(s);
// The delete would not be able to apply it because of visibility mismatch
Result[] next = scanner.next(3);
assertEquals(next.length, 1);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(scanAction);
d = new Delete(row1);
// with visibility
d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
d.addColumn(fam, qual);
table.delete(d);
scanAction = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Scan s = new Scan();
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
// this will alone match
assertEquals(next.length, 0);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(scanAction);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing.
@Test
public void testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing() throws Exception {
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
Admin hBaseAdmin = TEST_UTIL.getAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(colDesc);
hBaseAdmin.createTable(desc);
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
Put put1 = new Put(Bytes.toBytes("row1"));
put1.addColumn(fam, qual, 123l, value);
put1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
Put put2 = new Put(Bytes.toBytes("row1"));
put2.addColumn(fam, qual, 123l, value1);
put2.setCellVisibility(new CellVisibility(SECRET));
table.put(createList(put1, put2));
Scan s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(CONFIDENTIAL, SECRET));
ResultScanner scanner = table.getScanner(s);
assertEquals(scanner.next(3).length, 1);
scanner.close();
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
d.addColumn(fam, qual, 123l);
table.delete(d);
}
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility(SECRET));
d.addColumn(fam, qual, 123l);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(CONFIDENTIAL));
scanner = table.getScanner(s);
assertEquals(scanner.next(3).length, 0);
scanner.close();
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestReplicaWithCluster method testChangeTable.
@Test(timeout = 120000)
public void testChangeTable() throws Exception {
HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable");
hdt.setRegionReplication(NB_SERVERS);
hdt.addCoprocessor(SlowMeCopro.class.getName());
Table table = HTU.createTable(hdt, new byte[][] { f }, null);
// basic test: it should work.
Put p = new Put(row);
p.addColumn(f, row, row);
table.put(p);
Get g = new Get(row);
Result r = table.get(g);
Assert.assertFalse(r.isStale());
// Add a CF, it should work.
HTableDescriptor bHdt = HTU.getAdmin().getTableDescriptor(hdt.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(row);
hdt.addFamily(hcd);
HTU.getAdmin().disableTable(hdt.getTableName());
HTU.getAdmin().modifyTable(hdt.getTableName(), hdt);
HTU.getAdmin().enableTable(hdt.getTableName());
HTableDescriptor nHdt = HTU.getAdmin().getTableDescriptor(hdt.getTableName());
Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilyCount() + 1, nHdt.getColumnFamilyCount());
p = new Put(row);
p.addColumn(row, row, row);
table.put(p);
g = new Get(row);
r = table.get(g);
Assert.assertFalse(r.isStale());
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
r = table.get(g);
Assert.assertTrue(r.isStale());
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
Admin admin = HTU.getAdmin();
nHdt = admin.getTableDescriptor(hdt.getTableName());
Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilyCount() + 1, nHdt.getColumnFamilyCount());
admin.disableTable(hdt.getTableName());
admin.deleteTable(hdt.getTableName());
admin.close();
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestReplicaWithCluster method testBulkLoad.
@Test(timeout = 30000)
public void testBulkLoad() throws IOException {
// Create table then get the single region for our new table.
LOG.debug("Creating test table");
HTableDescriptor hdt = HTU.createTableDescriptor("testBulkLoad");
hdt.setRegionReplication(NB_SERVERS);
hdt.addCoprocessor(SlowMeCopro.class.getName());
Table table = HTU.createTable(hdt, new byte[][] { f }, null);
// create hfiles to load.
LOG.debug("Creating test data");
Path dir = HTU.getDataTestDirOnTestFS("testBulkLoad");
final int numRows = 10;
final byte[] qual = Bytes.toBytes("qual");
final byte[] val = Bytes.toBytes("val");
final List<Pair<byte[], String>> famPaths = new ArrayList<>();
for (HColumnDescriptor col : hdt.getColumnFamilies()) {
Path hfile = new Path(dir, col.getNameAsString());
TestHRegionServerBulkLoad.createHFile(HTU.getTestFileSystem(), hfile, col.getName(), qual, val, numRows);
famPaths.add(new Pair<>(col.getName(), hfile.toString()));
}
// bulk load HFiles
LOG.debug("Loading test data");
final ClusterConnection conn = (ClusterConnection) HTU.getAdmin().getConnection();
table = conn.getTable(hdt.getTableName());
final String bulkToken = new SecureBulkLoadClient(HTU.getConfiguration(), table).prepareBulkLoad(conn);
ClientServiceCallable<Void> callable = new ClientServiceCallable<Void>(conn, hdt.getTableName(), TestHRegionServerBulkLoad.rowkey(0), new RpcControllerFactory(HTU.getConfiguration()).newController()) {
@Override
protected Void rpcCall() throws Exception {
LOG.debug("Going to connect to server " + getLocation() + " for row " + Bytes.toStringBinary(getRow()));
SecureBulkLoadClient secureClient = null;
byte[] regionName = getLocation().getRegionInfo().getRegionName();
try (Table table = conn.getTable(getTableName())) {
secureClient = new SecureBulkLoadClient(HTU.getConfiguration(), table);
secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, true, null, bulkToken);
}
return null;
}
};
RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(HTU.getConfiguration());
RpcRetryingCaller<Void> caller = factory.newCaller();
caller.callWithRetries(callable, 10000);
// verify we can read them from the primary
LOG.debug("Verifying data load");
for (int i = 0; i < numRows; i++) {
byte[] row = TestHRegionServerBulkLoad.rowkey(i);
Get g = new Get(row);
Result r = table.get(g);
Assert.assertFalse(r.isStale());
}
// verify we can read them from the replica
LOG.debug("Verifying replica queries");
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
for (int i = 0; i < numRows; i++) {
byte[] row = TestHRegionServerBulkLoad.rowkey(i);
Get g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
Result r = table.get(g);
Assert.assertTrue(r.isStale());
}
SlowMeCopro.cdl.get().countDown();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
HTU.getAdmin().disableTable(hdt.getTableName());
HTU.deleteTable(hdt.getTableName());
}
Aggregations