Search in sources :

Example 26 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class WALPrettyPrinter method processFile.

/**
   * reads a log file and outputs its contents, one transaction at a time, as
   * specified by the currently configured options
   *
   * @param conf
   *          the HBase configuration relevant to this log file
   * @param p
   *          the path of the log file to be read
   * @throws IOException
   *           may be unable to access the configured filesystem or requested
   *           file.
   */
public void processFile(final Configuration conf, final Path p) throws IOException {
    FileSystem fs = p.getFileSystem(conf);
    if (!fs.exists(p)) {
        throw new FileNotFoundException(p.toString());
    }
    if (!fs.isFile(p)) {
        throw new IOException(p + " is not a file");
    }
    WAL.Reader log = WALFactory.createReader(fs, p, conf);
    if (log instanceof ProtobufLogReader) {
        List<String> writerClsNames = ((ProtobufLogReader) log).getWriterClsNames();
        if (writerClsNames != null && writerClsNames.size() > 0) {
            out.print("Writer Classes: ");
            for (int i = 0; i < writerClsNames.size(); i++) {
                out.print(writerClsNames.get(i));
                if (i != writerClsNames.size() - 1) {
                    out.print(" ");
                }
            }
            out.println();
        }
        String cellCodecClsName = ((ProtobufLogReader) log).getCodecClsName();
        if (cellCodecClsName != null) {
            out.println("Cell Codec Class: " + cellCodecClsName);
        }
    }
    if (outputJSON && !persistentOutput) {
        out.print("[");
        firstTxn = true;
    }
    try {
        WAL.Entry entry;
        while ((entry = log.next()) != null) {
            WALKey key = entry.getKey();
            WALEdit edit = entry.getEdit();
            // begin building a transaction structure
            Map<String, Object> txn = key.toStringMap();
            long writeTime = key.getWriteTime();
            // check output filters
            if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence)
                continue;
            if (region != null && !((String) txn.get("region")).equals(region))
                continue;
            // initialize list into which we will store atomic actions
            List<Map> actions = new ArrayList<>();
            for (Cell cell : edit.getCells()) {
                // add atomic operation to txn
                Map<String, Object> op = new HashMap<>(toStringMap(cell));
                if (outputValues)
                    op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell)));
                // check row output filter
                if (row == null || ((String) op.get("row")).equals(row)) {
                    actions.add(op);
                }
            }
            if (actions.isEmpty())
                continue;
            txn.put("actions", actions);
            if (outputJSON) {
                // JSON output is a straightforward "toString" on the txn object
                if (firstTxn)
                    firstTxn = false;
                else
                    out.print(",");
                // encode and print JSON
                out.print(MAPPER.writeValueAsString(txn));
            } else {
                // Pretty output, complete with indentation by atomic action
                out.println("Sequence=" + txn.get("sequence") + " " + ", region=" + txn.get("region") + " at write timestamp=" + new Date(writeTime));
                for (int i = 0; i < actions.size(); i++) {
                    Map op = actions.get(i);
                    out.println("row=" + op.get("row") + ", column=" + op.get("family") + ":" + op.get("qualifier"));
                    if (op.get("tag") != null) {
                        out.println("    tag: " + op.get("tag"));
                    }
                    if (outputValues)
                        out.println("    value: " + op.get("value"));
                }
            }
        }
    } finally {
        log.close();
    }
    if (outputJSON && !persistentOutput) {
        out.print("]");
    }
}
Also used : HashMap(java.util.HashMap) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Date(java.util.Date) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) ProtobufLogReader(org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader) HashMap(java.util.HashMap) Map(java.util.Map) Cell(org.apache.hadoop.hbase.Cell)

Example 27 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class WALSplitter method getMutationsFromWALEntry.

/**
   * This function is used to construct mutations from a WALEntry. It also
   * reconstructs WALKey &amp; WALEdit from the passed in WALEntry
   * @param entry
   * @param cells
   * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
   *          extracted from the passed in WALEntry.
   * @return list of Pair&lt;MutationType, Mutation&gt; to be replayed
   * @throws IOException
   */
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
    if (entry == null) {
        // return an empty array
        return new ArrayList<>();
    }
    long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
    int count = entry.getAssociatedCellCount();
    List<MutationReplay> mutations = new ArrayList<>();
    Cell previousCell = null;
    Mutation m = null;
    WALKey key = null;
    WALEdit val = null;
    if (logEntry != null)
        val = new WALEdit();
    for (int i = 0; i < count; i++) {
        // Throw index out of bounds if our cell count is off
        if (!cells.advance()) {
            throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
        }
        Cell cell = cells.current();
        if (val != null)
            val.add(cell);
        boolean isNewRowOrType = previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(previousCell, cell);
        if (isNewRowOrType) {
            // Create new mutation
            if (CellUtil.isDelete(cell)) {
                m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Deletes don't have nonces.
                mutations.add(new MutationReplay(MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
            } else {
                m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Puts might come from increment or append, thus we need nonces.
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                mutations.add(new MutationReplay(MutationType.PUT, m, nonceGroup, nonce));
            }
        }
        if (CellUtil.isDelete(cell)) {
            ((Delete) m).addDeleteMarker(cell);
        } else {
            ((Put) m).add(cell);
        }
        m.setDurability(durability);
        previousCell = cell;
    }
    // reconstruct WALKey
    if (logEntry != null) {
        org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey();
        List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
        for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
            clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
        }
        key = new WALKey(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
        logEntry.setFirst(key);
        logEntry.setSecond(val);
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Mutation(org.apache.hadoop.hbase.client.Mutation) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell)

Example 28 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class ProtobufUtil method toIncrement.

/**
   * Convert a protocol buffer Mutate to an Increment
   *
   * @param proto the protocol buffer Mutate to convert
   * @return the converted client Increment
   * @throws IOException
   */
public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    MutationType type = proto.getMutateType();
    assert type == MutationType.INCREMENT : type.name();
    byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
    Increment increment = row != null ? new Increment(row) : null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
            }
            Cell cell = cellScanner.current();
            if (increment == null) {
                increment = new Increment(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
            }
            increment.add(cell);
        }
    } else {
        if (increment == null) {
            throw new IllegalArgumentException("row cannot be null");
        }
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                byte[] qualifier = qv.getQualifier().toByteArray();
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                byte[] value = qv.getValue().toByteArray();
                byte[] tags = null;
                if (qv.hasTags()) {
                    tags = qv.getTags().toByteArray();
                }
                increment.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
            }
        }
    }
    if (proto.hasTimeRange()) {
        TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
        increment.setTimeRange(timeRange.getMin(), timeRange.getMax());
    }
    increment.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        increment.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return increment;
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Increment(org.apache.hadoop.hbase.client.Increment) QualifierValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Cell(org.apache.hadoop.hbase.Cell) ByteBufferCell(org.apache.hadoop.hbase.ByteBufferCell)

Example 29 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class ResponseConverter method getResults.

/**
   * Create Results from the cells using the cells meta data.
   * @param cellScanner
   * @param response
   * @return results
   */
public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException {
    if (response == null)
        return null;
    // If cellscanner, then the number of Results to return is the count of elements in the
    // cellsPerResult list.  Otherwise, it is how many results are embedded inside the response.
    int noOfResults = cellScanner != null ? response.getCellsPerResultCount() : response.getResultsCount();
    Result[] results = new Result[noOfResults];
    for (int i = 0; i < noOfResults; i++) {
        if (cellScanner != null) {
            // Cells are out in cellblocks.  Group them up again as Results.  How many to read at a
            // time will be found in getCellsLength -- length here is how many Cells in the i'th Result
            int noOfCells = response.getCellsPerResult(i);
            boolean isPartial = response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) : false;
            List<Cell> cells = new ArrayList<>(noOfCells);
            for (int j = 0; j < noOfCells; j++) {
                try {
                    if (cellScanner.advance() == false) {
                        // We are not able to retrieve the exact number of cells which ResultCellMeta says us.
                        // We have to scan for the same results again. Throwing DNRIOE as a client retry on the
                        // same scanner will result in OutOfOrderScannerNextException
                        String msg = "Results sent from server=" + noOfResults + ". But only got " + i + " results completely at client. Resetting the scanner to scan again.";
                        LOG.error(msg);
                        throw new DoNotRetryIOException(msg);
                    }
                } catch (IOException ioe) {
                    // We are getting IOE while retrieving the cells for Results.
                    // We have to scan for the same results again. Throwing DNRIOE as a client retry on the
                    // same scanner will result in OutOfOrderScannerNextException
                    LOG.error("Exception while reading cells from result." + "Resetting the scanner to scan again.", ioe);
                    throw new DoNotRetryIOException("Resetting the scanner.", ioe);
                }
                cells.add(cellScanner.current());
            }
            results[i] = Result.create(cells, null, response.getStale(), isPartial);
        } else {
            // Result is pure pb.
            results[i] = ProtobufUtil.toResult(response.getResults(i));
        }
    }
    return results;
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult)

Example 30 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class ProtobufUtil method toAppend.

/**
   * Convert a protocol buffer Mutate to an Append
   * @param cellScanner
   * @param proto the protocol buffer Mutate to convert
   * @return the converted client Append
   * @throws IOException
   */
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    MutationType type = proto.getMutateType();
    assert type == MutationType.APPEND : type.name();
    byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
    Append append = row != null ? new Append(row) : null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
            }
            Cell cell = cellScanner.current();
            if (append == null) {
                append = new Append(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
            }
            append.add(cell);
        }
    } else {
        if (append == null) {
            throw new IllegalArgumentException("row cannot be null");
        }
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                byte[] qualifier = qv.getQualifier().toByteArray();
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                byte[] value = qv.getValue().toByteArray();
                byte[] tags = null;
                if (qv.hasTags()) {
                    tags = qv.getTags().toByteArray();
                }
                append.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
            }
        }
    }
    append.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        append.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return append;
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) Append(org.apache.hadoop.hbase.client.Append) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) QualifierValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Cell(org.apache.hadoop.hbase.Cell) ByteBufferCell(org.apache.hadoop.hbase.ByteBufferCell)

Aggregations

Cell (org.apache.hadoop.hbase.Cell)862 Test (org.junit.Test)326 ArrayList (java.util.ArrayList)323 Scan (org.apache.hadoop.hbase.client.Scan)258 KeyValue (org.apache.hadoop.hbase.KeyValue)220 Result (org.apache.hadoop.hbase.client.Result)203 Put (org.apache.hadoop.hbase.client.Put)159 IOException (java.io.IOException)123 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)106 Get (org.apache.hadoop.hbase.client.Get)85 Table (org.apache.hadoop.hbase.client.Table)85 List (java.util.List)80 TableName (org.apache.hadoop.hbase.TableName)77 Delete (org.apache.hadoop.hbase.client.Delete)75 CellScanner (org.apache.hadoop.hbase.CellScanner)69 Configuration (org.apache.hadoop.conf.Configuration)62 InterruptedIOException (java.io.InterruptedIOException)48 Map (java.util.Map)45 Path (org.apache.hadoop.fs.Path)45 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)45