Search in sources :

Example 16 with Delete

use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.

the class TestRemoteTable method testDelete.

@Test
public void testDelete() throws IOException {
    Put put = new Put(ROW_3);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
    remoteTable.put(put);
    Get get = new Get(ROW_3);
    get.addFamily(COLUMN_1);
    get.addFamily(COLUMN_2);
    Result result = remoteTable.get(get);
    byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNotNull(value2);
    assertTrue(Bytes.equals(VALUE_2, value2));
    Delete delete = new Delete(ROW_3);
    delete.addColumn(COLUMN_2, QUALIFIER_2);
    remoteTable.delete(delete);
    get = new Get(ROW_3);
    get.addFamily(COLUMN_1);
    get.addFamily(COLUMN_2);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNull(value2);
    delete = new Delete(ROW_3);
    delete.setTimestamp(1L);
    remoteTable.delete(delete);
    get = new Get(ROW_3);
    get.addFamily(COLUMN_1);
    get.addFamily(COLUMN_2);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNull(value2);
    delete = new Delete(ROW_3);
    remoteTable.delete(delete);
    get = new Get(ROW_3);
    get.addFamily(COLUMN_1);
    get.addFamily(COLUMN_2);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNull(value1);
    assertNull(value2);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Get(org.apache.hadoop.hbase.client.Get) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 17 with Delete

use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.

the class ReplicationSink method replicateEntries.

/**
   * Replicate this array of entries directly into the local cluster using the native client. Only
   * operates against raw protobuf type saving on a conversion from pb to pojo.
   * @param entries
   * @param cells
   * @param replicationClusterId Id which will uniquely identify source cluster FS client
   *          configurations in the replication configuration directory
   * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
   *          directory
   * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
   * @throws IOException If failed to replicate the data
   */
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
    if (entries.isEmpty())
        return;
    if (cells == null)
        throw new NullPointerException("TODO: Add handling of null CellScanner");
    // to the same table.
    try {
        long totalReplicated = 0;
        // Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
        // invocation of this method per table and cluster id.
        Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
        // Map of table name Vs list of pair of family and list of hfile paths from its namespace
        Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
        for (WALEntry entry : entries) {
            TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
            Cell previousCell = null;
            Mutation m = null;
            int count = entry.getAssociatedCellCount();
            for (int i = 0; i < count; i++) {
                // Throw index out of bounds if our cell count is off
                if (!cells.advance()) {
                    throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
                }
                Cell cell = cells.current();
                // Handle bulk load hfiles replication
                if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
                    if (bulkLoadHFileMap == null) {
                        bulkLoadHFileMap = new HashMap<>();
                    }
                    buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
                } else {
                    // Handle wal replication
                    if (isNewRowOrType(previousCell, cell)) {
                        // Create new mutation
                        m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                        List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
                        for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
                            clusterIds.add(toUUID(clusterId));
                        }
                        m.setClusterIds(clusterIds);
                        addToHashMultiMap(rowMap, table, clusterIds, m);
                    }
                    if (CellUtil.isDelete(cell)) {
                        ((Delete) m).addDeleteMarker(cell);
                    } else {
                        ((Put) m).add(cell);
                    }
                    previousCell = cell;
                }
            }
            totalReplicated++;
        }
        // TODO Replicating mutations and bulk loaded data can be made parallel
        if (!rowMap.isEmpty()) {
            LOG.debug("Started replicating mutations.");
            for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
                batch(entry.getKey(), entry.getValue().values());
            }
            LOG.debug("Finished replicating mutations.");
        }
        if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
            LOG.debug("Started replicating bulk loaded data.");
            HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
            hFileReplicator.replicate();
            LOG.debug("Finished replicating bulk loaded data.");
        }
        int size = entries.size();
        this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
        this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
        this.totalReplicatedEdits.addAndGet(totalReplicated);
    } catch (IOException ex) {
        LOG.error("Unable to accept edit because:", ex);
        throw ex;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) Row(org.apache.hadoop.hbase.client.Row) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Mutation(org.apache.hadoop.hbase.client.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 18 with Delete

use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.

the class AccessControlLists method removeUserPermission.

/**
   * Removes a previously granted permission from the stored access control
   * lists.  The {@link TablePermission} being removed must exactly match what
   * is stored -- no wildcard matching is attempted.  Ie, if user "bob" has
   * been granted "READ" access to the "data" table, but only to column family
   * plus qualifier "info:colA", then trying to call this method with only
   * user "bob" and the table name "data" (but without specifying the
   * column qualifier "info:colA") will have no effect.
   *
   * @param conf the configuration
   * @param userPerm the details of the permission to be revoked
   * @param t acl table
   * @throws IOException if there is an error accessing the metadata table
   */
static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t) throws IOException {
    Delete d = new Delete(userPermissionRowKey(userPerm));
    byte[] key = userPermissionKey(userPerm);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Removing permission " + userPerm.toString());
    }
    d.addColumns(ACL_LIST_FAMILY, key);
    try {
        t.delete(d);
    } finally {
        t.close();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete)

Example 19 with Delete

use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.

the class HBaseTestingUtility method deleteTableData.

//
// ==========================================================================
/**
   * Provide an existing table name to truncate.
   * Scans the table and issues a delete for each row read.
   * @param tableName existing table
   * @return HTable to that new table
   * @throws IOException
   */
public Table deleteTableData(TableName tableName) throws IOException {
    Table table = getConnection().getTable(tableName);
    Scan scan = new Scan();
    ResultScanner resScan = table.getScanner(scan);
    for (Result res : resScan) {
        Delete del = new Delete(res.getRow());
        table.delete(del);
    }
    resScan = table.getScanner(scan);
    resScan.close();
    return table;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) HTable(org.apache.hadoop.hbase.client.HTable) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 20 with Delete

use of org.apache.hadoop.hbase.client.Delete in project hbase by apache.

the class HBaseTestingUtility method deleteNumericRows.

public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
    for (int i = startRow; i < endRow; i++) {
        byte[] data = Bytes.toBytes(String.valueOf(i));
        Delete delete = new Delete(data);
        delete.addFamily(f);
        t.delete(delete);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete)

Aggregations

Delete (org.apache.hadoop.hbase.client.Delete)306 Test (org.junit.Test)150 Put (org.apache.hadoop.hbase.client.Put)149 Result (org.apache.hadoop.hbase.client.Result)111 Table (org.apache.hadoop.hbase.client.Table)101 Scan (org.apache.hadoop.hbase.client.Scan)95 IOException (java.io.IOException)86 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)79 Cell (org.apache.hadoop.hbase.Cell)75 TableName (org.apache.hadoop.hbase.TableName)66 ArrayList (java.util.ArrayList)64 Connection (org.apache.hadoop.hbase.client.Connection)55 InterruptedIOException (java.io.InterruptedIOException)45 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)44 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)42 Get (org.apache.hadoop.hbase.client.Get)41 Mutation (org.apache.hadoop.hbase.client.Mutation)33 CellScanner (org.apache.hadoop.hbase.CellScanner)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)27 Admin (org.apache.hadoop.hbase.client.Admin)20