use of org.apache.hadoop.hbase.client.Put in project hive by apache.
the class HBaseReadWrite method removeRoleGrants.
/**
* Remove all of the grants for a role. This is not cheap.
* @param roleName Role to remove from all other roles and grants
* @throws IOException
*/
void removeRoleGrants(String roleName) throws IOException {
buildRoleCache();
List<Put> puts = new ArrayList<>();
// First, walk the role table and remove any references to this role
for (Map.Entry<String, HbaseMetastoreProto.RoleGrantInfoList> e : roleCache.entrySet()) {
boolean madeAChange = false;
List<HbaseMetastoreProto.RoleGrantInfo> rgil = new ArrayList<>();
rgil.addAll(e.getValue().getGrantInfoList());
for (int i = 0; i < rgil.size(); i++) {
if (HBaseUtils.convertPrincipalTypes(rgil.get(i).getPrincipalType()) == PrincipalType.ROLE && rgil.get(i).getPrincipalName().equals(roleName)) {
rgil.remove(i);
madeAChange = true;
break;
}
}
if (madeAChange) {
Put put = new Put(HBaseUtils.buildKey(e.getKey()));
HbaseMetastoreProto.RoleGrantInfoList proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder().addAllGrantInfo(rgil).build();
put.add(CATALOG_CF, ROLES_COL, proto.toByteArray());
puts.add(put);
roleCache.put(e.getKey(), proto);
}
}
if (puts.size() > 0) {
HTableInterface htab = conn.getHBaseTable(ROLE_TABLE);
htab.put(puts);
conn.flush(htab);
}
// Remove any global privileges held by this role
PrincipalPrivilegeSet global = getGlobalPrivs();
if (global != null && global.getRolePrivileges() != null && global.getRolePrivileges().remove(roleName) != null) {
putGlobalPrivs(global);
}
// Now, walk the db table
puts.clear();
List<Database> dbs = scanDatabases(null);
// rare, but can happen
if (dbs == null)
dbs = new ArrayList<>();
for (Database db : dbs) {
if (db.getPrivileges() != null && db.getPrivileges().getRolePrivileges() != null && db.getPrivileges().getRolePrivileges().remove(roleName) != null) {
byte[][] serialized = HBaseUtils.serializeDatabase(db);
Put put = new Put(serialized[0]);
put.add(CATALOG_CF, CATALOG_COL, serialized[1]);
puts.add(put);
}
}
if (puts.size() > 0) {
HTableInterface htab = conn.getHBaseTable(DB_TABLE);
htab.put(puts);
conn.flush(htab);
}
// Finally, walk the table table
puts.clear();
for (Database db : dbs) {
List<Table> tables = scanTables(db.getName(), null);
if (tables != null) {
for (Table table : tables) {
if (table.getPrivileges() != null && table.getPrivileges().getRolePrivileges() != null && table.getPrivileges().getRolePrivileges().remove(roleName) != null) {
byte[][] serialized = HBaseUtils.serializeTable(table, HBaseUtils.hashStorageDescriptor(table.getSd(), md));
Put put = new Put(serialized[0]);
put.add(CATALOG_CF, CATALOG_COL, serialized[1]);
puts.add(put);
}
}
}
}
if (puts.size() > 0) {
HTableInterface htab = conn.getHBaseTable(TABLE_TABLE);
htab.put(puts);
conn.flush(htab);
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestCacheOnWrite method testNotCachingDataBlocksDuringCompactionInternals.
private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException, InterruptedException {
// TODO: need to change this test if we add a cache size threshold for
// compactions, or if we implement some other kind of intelligent logic for
// deciding what blocks to cache-on-write on compaction.
final String table = "CompactionCacheOnWrite";
final String cf = "myCF";
final byte[] cfBytes = Bytes.toBytes(cf);
final int maxVersions = 3;
Region region = TEST_UTIL.createTestRegion(table, new HColumnDescriptor(cf).setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions).setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()));
int rowIdx = 0;
long ts = EnvironmentEdgeManager.currentTime();
for (int iFile = 0; iFile < 5; ++iFile) {
for (int iRow = 0; iRow < 500; ++iRow) {
String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow;
Put p = new Put(Bytes.toBytes(rowStr));
++rowIdx;
for (int iCol = 0; iCol < 10; ++iCol) {
String qualStr = "col" + iCol;
String valueStr = "value_" + rowStr + "_" + qualStr;
for (int iTS = 0; iTS < 5; ++iTS) {
if (useTags) {
Tag t = new ArrayBackedTag((byte) 1, "visibility");
Tag[] tags = new Tag[1];
tags[0] = t;
KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
p.add(kv);
} else {
p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
}
}
}
p.setDurability(Durability.ASYNC_WAL);
region.put(p);
}
region.flush(true);
}
clearBlockCache(blockCache);
assertEquals(0, blockCache.getBlockCount());
region.compact(false);
LOG.debug("compactStores() returned");
for (CachedBlock block : blockCache) {
assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
assertNotEquals(BlockType.DATA, block.getBlockType());
}
((HRegion) region).close();
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestEncodedSeekers method doPuts.
private void doPuts(Region region) throws IOException {
LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
for (int i = 0; i < NUM_ROWS; ++i) {
byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
Put put = new Put(key);
put.setDurability(Durability.ASYNC_WAL);
byte[] col = Bytes.toBytes(String.valueOf(j));
byte[] value = dataGenerator.generateRandomSizeValue(key, col);
if (includeTags) {
Tag[] tag = new Tag[1];
tag[0] = new ArrayBackedTag((byte) 1, "Visibility");
KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
put.add(kv);
} else {
put.addColumn(CF_BYTES, col, value);
}
if (VERBOSE) {
KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
System.err.println(Strings.padFront(i + "", ' ', 4) + " " + kvPut);
}
region.put(put);
}
if (i % NUM_ROWS_PER_FLUSH == 0) {
region.flush(true);
}
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestPrefixTree method testHBASE12817.
@Test
public void testHBASE12817() throws IOException {
for (int i = 0; i < 100; i++) {
region.put(new Put(Bytes.toBytes("obj" + (2900 + i))).addColumn(fam, qual1, Bytes.toBytes(i)));
}
region.put(new Put(Bytes.toBytes("obj299")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj29")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj2")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj3")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.flush(true);
Scan scan = new Scan(Bytes.toBytes("obj29995"));
RegionScanner scanner = region.getScanner(scan);
List<Cell> cells = new ArrayList<>();
assertFalse(scanner.next(cells));
assertArrayEquals(Bytes.toBytes("obj3"), Result.create(cells).getRow());
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestPrefixTree method testHBASE11728.
@Test
public void testHBASE11728() throws Exception {
Put put = new Put(Bytes.toBytes("a-b-0-0"));
put.addColumn(fam, qual1, Bytes.toBytes("c1-value"));
region.put(put);
put = new Put(row1_bytes);
put.addColumn(fam, qual1, Bytes.toBytes("c1-value"));
region.put(put);
put = new Put(row2_bytes);
put.addColumn(fam, qual2, Bytes.toBytes("c2-value"));
region.put(put);
put = new Put(row3_bytes);
put.addColumn(fam, qual2, Bytes.toBytes("c2-value-2"));
region.put(put);
put = new Put(row4_bytes);
put.addColumn(fam, qual2, Bytes.toBytes("c2-value-3"));
region.put(put);
region.flush(true);
String[] rows = new String[3];
rows[0] = row1;
rows[1] = row2;
rows[2] = row3;
byte[][] val = new byte[3][];
val[0] = Bytes.toBytes("c1-value");
val[1] = Bytes.toBytes("c2-value");
val[2] = Bytes.toBytes("c2-value-2");
Scan scan = new Scan();
scan.setStartRow(row1_bytes);
scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
RegionScanner scanner = region.getScanner(scan);
List<Cell> cells = new ArrayList<>();
for (int i = 0; i < 3; i++) {
assertEquals(i < 2, scanner.next(cells));
CellScanner cellScanner = Result.create(cells).cellScanner();
while (cellScanner.advance()) {
assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner.current().getRowOffset(), cellScanner.current().getRowLength()));
assertEquals(Bytes.toString(val[i]), Bytes.toString(cellScanner.current().getValueArray(), cellScanner.current().getValueOffset(), cellScanner.current().getValueLength()));
}
cells.clear();
}
scanner.close();
// Add column
scan = new Scan();
scan.addColumn(fam, qual2);
scan.setStartRow(row1_bytes);
scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
scanner = region.getScanner(scan);
for (int i = 1; i < 3; i++) {
assertEquals(i < 2, scanner.next(cells));
CellScanner cellScanner = Result.create(cells).cellScanner();
while (cellScanner.advance()) {
assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner.current().getRowOffset(), cellScanner.current().getRowLength()));
}
cells.clear();
}
scanner.close();
scan = new Scan();
scan.addColumn(fam, qual2);
scan.setStartRow(Bytes.toBytes("a-b-A-1-"));
scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
scanner = region.getScanner(scan);
for (int i = 1; i < 3; i++) {
assertEquals(i < 2, scanner.next(cells));
CellScanner cellScanner = Result.create(cells).cellScanner();
while (cellScanner.advance()) {
assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner.current().getRowOffset(), cellScanner.current().getRowLength()));
}
cells.clear();
}
scanner.close();
scan = new Scan();
scan.addColumn(fam, qual2);
scan.setStartRow(Bytes.toBytes("a-b-A-1-140239"));
scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
scanner = region.getScanner(scan);
assertFalse(scanner.next(cells));
assertFalse(cells.isEmpty());
scanner.close();
}
Aggregations