Search in sources :

Example 56 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class ProtobufUtil method toPut.

/**
   * Convert a protocol buffer Mutate to a Put.
   *
   * @param proto The protocol buffer MutationProto to convert
   * @param cellScanner If non-null, the Cell data that goes with this proto.
   * @return A client Put.
   * @throws IOException
   */
public static Put toPut(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    // TODO: Server-side at least why do we convert back to the Client types?  Why not just pb it?
    MutationType type = proto.getMutateType();
    assert type == MutationType.PUT : type.name();
    long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
    Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
            }
            Cell cell = cellScanner.current();
            if (put == null) {
                put = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
            }
            put.add(cell);
        }
    } else {
        if (put == null) {
            throw new IllegalArgumentException("row cannot be null");
        }
        // The proto has the metadata and the data itself
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                ByteBuffer qualifier = qv.hasQualifier() ? qv.getQualifier().asReadOnlyByteBuffer() : null;
                ByteBuffer value = qv.hasValue() ? qv.getValue().asReadOnlyByteBuffer() : null;
                long ts = timestamp;
                if (qv.hasTimestamp()) {
                    ts = qv.getTimestamp();
                }
                byte[] allTagsBytes;
                if (qv.hasTags()) {
                    allTagsBytes = qv.getTags().toByteArray();
                    if (qv.hasDeleteType()) {
                        byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
                        put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts, fromDeleteType(qv.getDeleteType()), null, allTagsBytes));
                    } else {
                        List<Tag> tags = TagUtil.asList(allTagsBytes, 0, (short) allTagsBytes.length);
                        Tag[] tagsArray = new Tag[tags.size()];
                        put.addImmutable(family, qualifier, ts, value, tags.toArray(tagsArray));
                    }
                } else {
                    if (qv.hasDeleteType()) {
                        byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
                        put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts, fromDeleteType(qv.getDeleteType())));
                    } else {
                        put.addImmutable(family, qualifier, ts, value);
                    }
                }
            }
        }
    }
    put.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        put.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return put;
}
Also used : MutationType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType) KeyValue(org.apache.hadoop.hbase.KeyValue) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell)

Example 57 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TableBasedReplicationQueuesImpl method addLog.

@Override
public void addLog(String queueId, String filename) throws ReplicationException {
    try (Table replicationTable = getOrBlockOnReplicationTable()) {
        if (!checkQueueExists(queueId)) {
            // Each queue will have an Owner, OwnerHistory, and a collection of [WAL:offset] key values
            Put putNewQueue = new Put(Bytes.toBytes(buildQueueRowKey(queueId)));
            putNewQueue.addColumn(CF_QUEUE, COL_QUEUE_OWNER, serverNameBytes);
            putNewQueue.addColumn(CF_QUEUE, COL_QUEUE_OWNER_HISTORY, EMPTY_STRING_BYTES);
            putNewQueue.addColumn(CF_QUEUE, Bytes.toBytes(filename), INITIAL_OFFSET_BYTES);
            replicationTable.put(putNewQueue);
        } else {
            // Otherwise simply add the new log and offset as a new column
            Put putNewLog = new Put(queueIdToRowKey(queueId));
            putNewLog.addColumn(CF_QUEUE, Bytes.toBytes(filename), INITIAL_OFFSET_BYTES);
            safeQueueUpdate(putNewLog);
        }
    } catch (IOException | ReplicationException e) {
        String errMsg = "Failed adding log queueId=" + queueId + " filename=" + filename;
        abortable.abort(errMsg, e);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put)

Example 58 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TableBasedReplicationQueuesImpl method setLogPosition.

@Override
public void setLogPosition(String queueId, String filename, long position) {
    try (Table replicationTable = getOrBlockOnReplicationTable()) {
        byte[] rowKey = queueIdToRowKey(queueId);
        // Check that the log exists. addLog() must have been called before setLogPosition().
        Get checkLogExists = new Get(rowKey);
        checkLogExists.addColumn(CF_QUEUE, Bytes.toBytes(filename));
        if (!replicationTable.exists(checkLogExists)) {
            String errMsg = "Could not set position of non-existent log from queueId=" + queueId + ", filename=" + filename;
            abortable.abort(errMsg, new ReplicationException(errMsg));
            return;
        }
        // Update the log offset if it exists
        Put walAndOffset = new Put(rowKey);
        walAndOffset.addColumn(CF_QUEUE, Bytes.toBytes(filename), Bytes.toBytes(position));
        safeQueueUpdate(walAndOffset);
    } catch (IOException | ReplicationException e) {
        String errMsg = "Failed writing log position queueId=" + queueId + "filename=" + filename + " position=" + position;
        abortable.abort(errMsg, e);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Get(org.apache.hadoop.hbase.client.Get) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put)

Example 59 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class BackupSystemTable method createPutsForAddWALFiles.

/**
   * Creates put list for list of WAL files
   * @param files list of WAL file paths
   * @param backupId backup id
   * @return put list
   * @throws IOException exception
   */
private List<Put> createPutsForAddWALFiles(List<String> files, String backupId, String backupRoot) throws IOException {
    List<Put> puts = new ArrayList<Put>();
    for (String file : files) {
        Put put = new Put(rowkey(WALS_PREFIX, BackupUtils.getUniqueWALFileNamePart(file)));
        put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("backupId"), Bytes.toBytes(backupId));
        put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("file"), Bytes.toBytes(file));
        put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"), Bytes.toBytes(backupRoot));
        puts.add(put);
    }
    return puts;
}
Also used : ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put)

Example 60 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class BackupSystemTable method writeRegionServerLogTimestamp.

/**
   * Write the current timestamps for each regionserver to backup system table after a successful
   * full or incremental backup. The saved timestamp is of the last log file that was backed up
   * already.
   * @param tables tables
   * @param newTimestamps timestamps
   * @param backupRoot root directory path to backup
   * @throws IOException exception
   */
public void writeRegionServerLogTimestamp(Set<TableName> tables, HashMap<String, Long> newTimestamps, String backupRoot) throws IOException {
    if (LOG.isTraceEnabled()) {
        LOG.trace("write RS log time stamps to backup system table for tables [" + StringUtils.join(tables, ",") + "]");
    }
    List<Put> puts = new ArrayList<Put>();
    for (TableName table : tables) {
        byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray();
        Put put = createPutForWriteRegionServerLogTimestamp(table, smapData, backupRoot);
        puts.add(put);
    }
    try (Table table = connection.getTable(tableName)) {
        table.put(puts);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80