Search in sources :

Example 46 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class HBaseReadWrite method removeRoleGrants.

/**
   * Remove all of the grants for a role.  This is not cheap.
   * @param roleName Role to remove from all other roles and grants
   * @throws IOException
   */
void removeRoleGrants(String roleName) throws IOException {
    buildRoleCache();
    List<Put> puts = new ArrayList<>();
    // First, walk the role table and remove any references to this role
    for (Map.Entry<String, HbaseMetastoreProto.RoleGrantInfoList> e : roleCache.entrySet()) {
        boolean madeAChange = false;
        List<HbaseMetastoreProto.RoleGrantInfo> rgil = new ArrayList<>();
        rgil.addAll(e.getValue().getGrantInfoList());
        for (int i = 0; i < rgil.size(); i++) {
            if (HBaseUtils.convertPrincipalTypes(rgil.get(i).getPrincipalType()) == PrincipalType.ROLE && rgil.get(i).getPrincipalName().equals(roleName)) {
                rgil.remove(i);
                madeAChange = true;
                break;
            }
        }
        if (madeAChange) {
            Put put = new Put(HBaseUtils.buildKey(e.getKey()));
            HbaseMetastoreProto.RoleGrantInfoList proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder().addAllGrantInfo(rgil).build();
            put.add(CATALOG_CF, ROLES_COL, proto.toByteArray());
            puts.add(put);
            roleCache.put(e.getKey(), proto);
        }
    }
    if (puts.size() > 0) {
        HTableInterface htab = conn.getHBaseTable(ROLE_TABLE);
        htab.put(puts);
        conn.flush(htab);
    }
    // Remove any global privileges held by this role
    PrincipalPrivilegeSet global = getGlobalPrivs();
    if (global != null && global.getRolePrivileges() != null && global.getRolePrivileges().remove(roleName) != null) {
        putGlobalPrivs(global);
    }
    // Now, walk the db table
    puts.clear();
    List<Database> dbs = scanDatabases(null);
    // rare, but can happen
    if (dbs == null)
        dbs = new ArrayList<>();
    for (Database db : dbs) {
        if (db.getPrivileges() != null && db.getPrivileges().getRolePrivileges() != null && db.getPrivileges().getRolePrivileges().remove(roleName) != null) {
            byte[][] serialized = HBaseUtils.serializeDatabase(db);
            Put put = new Put(serialized[0]);
            put.add(CATALOG_CF, CATALOG_COL, serialized[1]);
            puts.add(put);
        }
    }
    if (puts.size() > 0) {
        HTableInterface htab = conn.getHBaseTable(DB_TABLE);
        htab.put(puts);
        conn.flush(htab);
    }
    // Finally, walk the table table
    puts.clear();
    for (Database db : dbs) {
        List<Table> tables = scanTables(db.getName(), null);
        if (tables != null) {
            for (Table table : tables) {
                if (table.getPrivileges() != null && table.getPrivileges().getRolePrivileges() != null && table.getPrivileges().getRolePrivileges().remove(roleName) != null) {
                    byte[][] serialized = HBaseUtils.serializeTable(table, HBaseUtils.hashStorageDescriptor(table.getSd(), md));
                    Put put = new Put(serialized[0]);
                    put.add(CATALOG_CF, CATALOG_COL, serialized[1]);
                    puts.add(put);
                }
            }
        }
    }
    if (puts.size() > 0) {
        HTableInterface htab = conn.getHBaseTable(TABLE_TABLE);
        htab.put(puts);
        conn.flush(htab);
    }
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) PrincipalPrivilegeSet(org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Database(org.apache.hadoop.hive.metastore.api.Database) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap)

Example 47 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestCacheOnWrite method testNotCachingDataBlocksDuringCompactionInternals.

private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException, InterruptedException {
    // TODO: need to change this test if we add a cache size threshold for
    // compactions, or if we implement some other kind of intelligent logic for
    // deciding what blocks to cache-on-write on compaction.
    final String table = "CompactionCacheOnWrite";
    final String cf = "myCF";
    final byte[] cfBytes = Bytes.toBytes(cf);
    final int maxVersions = 3;
    Region region = TEST_UTIL.createTestRegion(table, new HColumnDescriptor(cf).setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions).setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()));
    int rowIdx = 0;
    long ts = EnvironmentEdgeManager.currentTime();
    for (int iFile = 0; iFile < 5; ++iFile) {
        for (int iRow = 0; iRow < 500; ++iRow) {
            String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow;
            Put p = new Put(Bytes.toBytes(rowStr));
            ++rowIdx;
            for (int iCol = 0; iCol < 10; ++iCol) {
                String qualStr = "col" + iCol;
                String valueStr = "value_" + rowStr + "_" + qualStr;
                for (int iTS = 0; iTS < 5; ++iTS) {
                    if (useTags) {
                        Tag t = new ArrayBackedTag((byte) 1, "visibility");
                        Tag[] tags = new Tag[1];
                        tags[0] = t;
                        KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
                        p.add(kv);
                    } else {
                        p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
                    }
                }
            }
            p.setDurability(Durability.ASYNC_WAL);
            region.put(p);
        }
        region.flush(true);
    }
    clearBlockCache(blockCache);
    assertEquals(0, blockCache.getBlockCount());
    region.compact(false);
    LOG.debug("compactStores() returned");
    for (CachedBlock block : blockCache) {
        assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
        assertNotEquals(BlockType.DATA, block.getBlockType());
    }
    ((HRegion) region).close();
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Put(org.apache.hadoop.hbase.client.Put) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag)

Example 48 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestEncodedSeekers method doPuts.

private void doPuts(Region region) throws IOException {
    LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
    for (int i = 0; i < NUM_ROWS; ++i) {
        byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
        for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
            Put put = new Put(key);
            put.setDurability(Durability.ASYNC_WAL);
            byte[] col = Bytes.toBytes(String.valueOf(j));
            byte[] value = dataGenerator.generateRandomSizeValue(key, col);
            if (includeTags) {
                Tag[] tag = new Tag[1];
                tag[0] = new ArrayBackedTag((byte) 1, "Visibility");
                KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
                put.add(kv);
            } else {
                put.addColumn(CF_BYTES, col, value);
            }
            if (VERBOSE) {
                KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
                System.err.println(Strings.padFront(i + "", ' ', 4) + " " + kvPut);
            }
            region.put(put);
        }
        if (i % NUM_ROWS_PER_FLUSH == 0) {
            region.flush(true);
        }
    }
}
Also used : LoadTestKVGenerator(org.apache.hadoop.hbase.util.test.LoadTestKVGenerator) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Put(org.apache.hadoop.hbase.client.Put)

Example 49 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestPrefixTree method testHBASE12817.

@Test
public void testHBASE12817() throws IOException {
    for (int i = 0; i < 100; i++) {
        region.put(new Put(Bytes.toBytes("obj" + (2900 + i))).addColumn(fam, qual1, Bytes.toBytes(i)));
    }
    region.put(new Put(Bytes.toBytes("obj299")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
    region.put(new Put(Bytes.toBytes("obj29")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
    region.put(new Put(Bytes.toBytes("obj2")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
    region.put(new Put(Bytes.toBytes("obj3")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
    region.flush(true);
    Scan scan = new Scan(Bytes.toBytes("obj29995"));
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> cells = new ArrayList<>();
    assertFalse(scanner.next(cells));
    assertArrayEquals(Bytes.toBytes("obj3"), Result.create(cells).getRow());
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 50 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestPrefixTree method testHBASE11728.

@Test
public void testHBASE11728() throws Exception {
    Put put = new Put(Bytes.toBytes("a-b-0-0"));
    put.addColumn(fam, qual1, Bytes.toBytes("c1-value"));
    region.put(put);
    put = new Put(row1_bytes);
    put.addColumn(fam, qual1, Bytes.toBytes("c1-value"));
    region.put(put);
    put = new Put(row2_bytes);
    put.addColumn(fam, qual2, Bytes.toBytes("c2-value"));
    region.put(put);
    put = new Put(row3_bytes);
    put.addColumn(fam, qual2, Bytes.toBytes("c2-value-2"));
    region.put(put);
    put = new Put(row4_bytes);
    put.addColumn(fam, qual2, Bytes.toBytes("c2-value-3"));
    region.put(put);
    region.flush(true);
    String[] rows = new String[3];
    rows[0] = row1;
    rows[1] = row2;
    rows[2] = row3;
    byte[][] val = new byte[3][];
    val[0] = Bytes.toBytes("c1-value");
    val[1] = Bytes.toBytes("c2-value");
    val[2] = Bytes.toBytes("c2-value-2");
    Scan scan = new Scan();
    scan.setStartRow(row1_bytes);
    scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> cells = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
        assertEquals(i < 2, scanner.next(cells));
        CellScanner cellScanner = Result.create(cells).cellScanner();
        while (cellScanner.advance()) {
            assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner.current().getRowOffset(), cellScanner.current().getRowLength()));
            assertEquals(Bytes.toString(val[i]), Bytes.toString(cellScanner.current().getValueArray(), cellScanner.current().getValueOffset(), cellScanner.current().getValueLength()));
        }
        cells.clear();
    }
    scanner.close();
    // Add column
    scan = new Scan();
    scan.addColumn(fam, qual2);
    scan.setStartRow(row1_bytes);
    scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
    scanner = region.getScanner(scan);
    for (int i = 1; i < 3; i++) {
        assertEquals(i < 2, scanner.next(cells));
        CellScanner cellScanner = Result.create(cells).cellScanner();
        while (cellScanner.advance()) {
            assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner.current().getRowOffset(), cellScanner.current().getRowLength()));
        }
        cells.clear();
    }
    scanner.close();
    scan = new Scan();
    scan.addColumn(fam, qual2);
    scan.setStartRow(Bytes.toBytes("a-b-A-1-"));
    scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
    scanner = region.getScanner(scan);
    for (int i = 1; i < 3; i++) {
        assertEquals(i < 2, scanner.next(cells));
        CellScanner cellScanner = Result.create(cells).cellScanner();
        while (cellScanner.advance()) {
            assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner.current().getRowOffset(), cellScanner.current().getRowLength()));
        }
        cells.clear();
    }
    scanner.close();
    scan = new Scan();
    scan.addColumn(fam, qual2);
    scan.setStartRow(Bytes.toBytes("a-b-A-1-140239"));
    scan.setStopRow(Bytes.toBytes("a-b-A-1:"));
    scanner = region.getScanner(scan);
    assertFalse(scanner.next(cells));
    assertFalse(cells.isEmpty());
    scanner.close();
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) CellScanner(org.apache.hadoop.hbase.CellScanner) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80