Search in sources :

Example 21 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestRemoteTable method testIteratorScaner.

/**
   * Test RemoteHable.Scanner.iterator method  
   */
@Test
public void testIteratorScaner() throws IOException {
    List<Put> puts = new ArrayList<>(4);
    Put put = new Put(ROW_1);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_2);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_3);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    remoteTable.put(puts);
    ResultScanner scanner = remoteTable.getScanner(new Scan());
    Iterator<Result> iterator = scanner.iterator();
    assertTrue(iterator.hasNext());
    int counter = 0;
    while (iterator.hasNext()) {
        iterator.next();
        counter++;
    }
    assertEquals(4, counter);
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 22 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestRemoteTable method testPut.

@Test
public void testPut() throws IOException {
    Put put = new Put(ROW_3);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    remoteTable.put(put);
    Get get = new Get(ROW_3);
    get.addFamily(COLUMN_1);
    Result result = remoteTable.get(get);
    byte[] value = result.getValue(COLUMN_1, QUALIFIER_1);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_1, value));
    // multiput
    List<Put> puts = new ArrayList<>(3);
    put = new Put(ROW_3);
    put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
    puts.add(put);
    remoteTable.put(puts);
    get = new Get(ROW_3);
    get.addFamily(COLUMN_2);
    result = remoteTable.get(get);
    value = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_2, value));
    get = new Get(ROW_4);
    result = remoteTable.get(get);
    value = result.getValue(COLUMN_1, QUALIFIER_1);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_1, value));
    value = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_2, value));
    assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable" + VALID_TABLE_NAME_CHARS), remoteTable.getTableName()));
}
Also used : Get(org.apache.hadoop.hbase.client.Get) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 23 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class AccessControlLists method addUserPermission.

/**
   * Stores a new user permission grant in the access control lists table.
   * @param conf the configuration
   * @param userPerm the details of the permission to be granted
   * @param t acl table instance. It is closed upon method return.
   * @throws IOException in the case of an error accessing the metadata table
   */
static void addUserPermission(Configuration conf, UserPermission userPerm, Table t, boolean mergeExistingPermissions) throws IOException {
    Permission.Action[] actions = userPerm.getActions();
    byte[] rowKey = userPermissionRowKey(userPerm);
    Put p = new Put(rowKey);
    byte[] key = userPermissionKey(userPerm);
    if ((actions == null) || (actions.length == 0)) {
        String msg = "No actions associated with user '" + Bytes.toString(userPerm.getUser()) + "'";
        LOG.warn(msg);
        throw new IOException(msg);
    }
    Set<Permission.Action> actionSet = new TreeSet<Permission.Action>();
    if (mergeExistingPermissions) {
        List<UserPermission> perms = getUserPermissions(conf, rowKey);
        UserPermission currentPerm = null;
        for (UserPermission perm : perms) {
            if (Bytes.equals(perm.getUser(), userPerm.getUser()) && ((userPerm.isGlobal() && ACL_TABLE_NAME.equals(perm.getTableName())) || perm.tableFieldsEqual(userPerm))) {
                currentPerm = perm;
                break;
            }
        }
        if (currentPerm != null && currentPerm.getActions() != null) {
            actionSet.addAll(Arrays.asList(currentPerm.getActions()));
        }
    }
    // merge current action with new action.
    actionSet.addAll(Arrays.asList(actions));
    // serialize to byte array.
    byte[] value = new byte[actionSet.size()];
    int index = 0;
    for (Permission.Action action : actionSet) {
        value[index++] = action.code();
    }
    p.addImmutable(ACL_LIST_FAMILY, key, value);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " " + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
    }
    try {
        t.put(p);
    } finally {
        t.close();
    }
}
Also used : IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) TreeSet(java.util.TreeSet)

Example 24 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class ReplicationSink method replicateEntries.

/**
   * Replicate this array of entries directly into the local cluster using the native client. Only
   * operates against raw protobuf type saving on a conversion from pb to pojo.
   * @param entries
   * @param cells
   * @param replicationClusterId Id which will uniquely identify source cluster FS client
   *          configurations in the replication configuration directory
   * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
   *          directory
   * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
   * @throws IOException If failed to replicate the data
   */
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
    if (entries.isEmpty())
        return;
    if (cells == null)
        throw new NullPointerException("TODO: Add handling of null CellScanner");
    // to the same table.
    try {
        long totalReplicated = 0;
        // Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
        // invocation of this method per table and cluster id.
        Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
        // Map of table name Vs list of pair of family and list of hfile paths from its namespace
        Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
        for (WALEntry entry : entries) {
            TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
            Cell previousCell = null;
            Mutation m = null;
            int count = entry.getAssociatedCellCount();
            for (int i = 0; i < count; i++) {
                // Throw index out of bounds if our cell count is off
                if (!cells.advance()) {
                    throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
                }
                Cell cell = cells.current();
                // Handle bulk load hfiles replication
                if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
                    if (bulkLoadHFileMap == null) {
                        bulkLoadHFileMap = new HashMap<>();
                    }
                    buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
                } else {
                    // Handle wal replication
                    if (isNewRowOrType(previousCell, cell)) {
                        // Create new mutation
                        m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                        List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
                        for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
                            clusterIds.add(toUUID(clusterId));
                        }
                        m.setClusterIds(clusterIds);
                        addToHashMultiMap(rowMap, table, clusterIds, m);
                    }
                    if (CellUtil.isDelete(cell)) {
                        ((Delete) m).addDeleteMarker(cell);
                    } else {
                        ((Put) m).add(cell);
                    }
                    previousCell = cell;
                }
            }
            totalReplicated++;
        }
        // TODO Replicating mutations and bulk loaded data can be made parallel
        if (!rowMap.isEmpty()) {
            LOG.debug("Started replicating mutations.");
            for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
                batch(entry.getKey(), entry.getValue().values());
            }
            LOG.debug("Finished replicating mutations.");
        }
        if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
            LOG.debug("Started replicating bulk loaded data.");
            HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
            hFileReplicator.replicate();
            LOG.debug("Finished replicating bulk loaded data.");
        }
        int size = entries.size();
        this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
        this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
        this.totalReplicatedEdits.addAndGet(totalReplicated);
    } catch (IOException ex) {
        LOG.error("Unable to accept edit because:", ex);
        throw ex;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) Row(org.apache.hadoop.hbase.client.Row) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Mutation(org.apache.hadoop.hbase.client.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 25 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class WALSplitter method getMutationsFromWALEntry.

/**
   * This function is used to construct mutations from a WALEntry. It also
   * reconstructs WALKey &amp; WALEdit from the passed in WALEntry
   * @param entry
   * @param cells
   * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
   *          extracted from the passed in WALEntry.
   * @return list of Pair&lt;MutationType, Mutation&gt; to be replayed
   * @throws IOException
   */
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
    if (entry == null) {
        // return an empty array
        return new ArrayList<>();
    }
    long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
    int count = entry.getAssociatedCellCount();
    List<MutationReplay> mutations = new ArrayList<>();
    Cell previousCell = null;
    Mutation m = null;
    WALKey key = null;
    WALEdit val = null;
    if (logEntry != null)
        val = new WALEdit();
    for (int i = 0; i < count; i++) {
        // Throw index out of bounds if our cell count is off
        if (!cells.advance()) {
            throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
        }
        Cell cell = cells.current();
        if (val != null)
            val.add(cell);
        boolean isNewRowOrType = previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(previousCell, cell);
        if (isNewRowOrType) {
            // Create new mutation
            if (CellUtil.isDelete(cell)) {
                m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Deletes don't have nonces.
                mutations.add(new MutationReplay(MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
            } else {
                m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Puts might come from increment or append, thus we need nonces.
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                mutations.add(new MutationReplay(MutationType.PUT, m, nonceGroup, nonce));
            }
        }
        if (CellUtil.isDelete(cell)) {
            ((Delete) m).addDeleteMarker(cell);
        } else {
            ((Put) m).add(cell);
        }
        m.setDurability(durability);
        previousCell = cell;
    }
    // reconstruct WALKey
    if (logEntry != null) {
        org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey();
        List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
        for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
            clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
        }
        key = new WALKey(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
        logEntry.setFirst(key);
        logEntry.setSecond(val);
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Mutation(org.apache.hadoop.hbase.client.Mutation) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)849 Test (org.junit.Test)414 Table (org.apache.hadoop.hbase.client.Table)237 ArrayList (java.util.ArrayList)216 Result (org.apache.hadoop.hbase.client.Result)183 Scan (org.apache.hadoop.hbase.client.Scan)164 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)149 Delete (org.apache.hadoop.hbase.client.Delete)146 Cell (org.apache.hadoop.hbase.Cell)141 IOException (java.io.IOException)134 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)134 TableName (org.apache.hadoop.hbase.TableName)118 Get (org.apache.hadoop.hbase.client.Get)114 KeyValue (org.apache.hadoop.hbase.KeyValue)98 Configuration (org.apache.hadoop.conf.Configuration)79 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)75 Connection (org.apache.hadoop.hbase.client.Connection)68 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)60 Admin (org.apache.hadoop.hbase.client.Admin)54 Mutation (org.apache.hadoop.hbase.client.Mutation)53