use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class WALPrettyPrinter method processFile.
/**
* reads a log file and outputs its contents, one transaction at a time, as
* specified by the currently configured options
*
* @param conf
* the HBase configuration relevant to this log file
* @param p
* the path of the log file to be read
* @throws IOException
* may be unable to access the configured filesystem or requested
* file.
*/
public void processFile(final Configuration conf, final Path p) throws IOException {
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
throw new FileNotFoundException(p.toString());
}
if (!fs.isFile(p)) {
throw new IOException(p + " is not a file");
}
WAL.Reader log = WALFactory.createReader(fs, p, conf);
if (log instanceof ProtobufLogReader) {
List<String> writerClsNames = ((ProtobufLogReader) log).getWriterClsNames();
if (writerClsNames != null && writerClsNames.size() > 0) {
out.print("Writer Classes: ");
for (int i = 0; i < writerClsNames.size(); i++) {
out.print(writerClsNames.get(i));
if (i != writerClsNames.size() - 1) {
out.print(" ");
}
}
out.println();
}
String cellCodecClsName = ((ProtobufLogReader) log).getCodecClsName();
if (cellCodecClsName != null) {
out.println("Cell Codec Class: " + cellCodecClsName);
}
}
if (outputJSON && !persistentOutput) {
out.print("[");
firstTxn = true;
}
try {
WAL.Entry entry;
while ((entry = log.next()) != null) {
WALKey key = entry.getKey();
WALEdit edit = entry.getEdit();
// begin building a transaction structure
Map<String, Object> txn = key.toStringMap();
long writeTime = key.getWriteTime();
// check output filters
if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence)
continue;
if (region != null && !((String) txn.get("region")).equals(region))
continue;
// initialize list into which we will store atomic actions
List<Map> actions = new ArrayList<>();
for (Cell cell : edit.getCells()) {
// add atomic operation to txn
Map<String, Object> op = new HashMap<>(toStringMap(cell));
if (outputValues)
op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell)));
// check row output filter
if (row == null || ((String) op.get("row")).equals(row)) {
actions.add(op);
}
}
if (actions.isEmpty())
continue;
txn.put("actions", actions);
if (outputJSON) {
// JSON output is a straightforward "toString" on the txn object
if (firstTxn)
firstTxn = false;
else
out.print(",");
// encode and print JSON
out.print(MAPPER.writeValueAsString(txn));
} else {
// Pretty output, complete with indentation by atomic action
out.println("Sequence=" + txn.get("sequence") + " " + ", region=" + txn.get("region") + " at write timestamp=" + new Date(writeTime));
for (int i = 0; i < actions.size(); i++) {
Map op = actions.get(i);
out.println("row=" + op.get("row") + ", column=" + op.get("family") + ":" + op.get("qualifier"));
if (op.get("tag") != null) {
out.println(" tag: " + op.get("tag"));
}
if (outputValues)
out.println(" value: " + op.get("value"));
}
}
}
} finally {
log.close();
}
if (outputJSON && !persistentOutput) {
out.print("]");
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class WALSplitter method getMutationsFromWALEntry.
/**
* This function is used to construct mutations from a WALEntry. It also
* reconstructs WALKey & WALEdit from the passed in WALEntry
* @param entry
* @param cells
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
* extracted from the passed in WALEntry.
* @return list of Pair<MutationType, Mutation> to be replayed
* @throws IOException
*/
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
if (entry == null) {
// return an empty array
return new ArrayList<>();
}
long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
int count = entry.getAssociatedCellCount();
List<MutationReplay> mutations = new ArrayList<>();
Cell previousCell = null;
Mutation m = null;
WALKey key = null;
WALEdit val = null;
if (logEntry != null)
val = new WALEdit();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
if (val != null)
val.add(cell);
boolean isNewRowOrType = previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(previousCell, cell);
if (isNewRowOrType) {
// Create new mutation
if (CellUtil.isDelete(cell)) {
m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Deletes don't have nonces.
mutations.add(new MutationReplay(MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
} else {
m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Puts might come from increment or append, thus we need nonces.
long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
mutations.add(new MutationReplay(MutationType.PUT, m, nonceGroup, nonce));
}
}
if (CellUtil.isDelete(cell)) {
((Delete) m).addDeleteMarker(cell);
} else {
((Put) m).add(cell);
}
m.setDurability(durability);
previousCell = cell;
}
// reconstruct WALKey
if (logEntry != null) {
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey();
List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
}
key = new WALKey(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
logEntry.setFirst(key);
logEntry.setSecond(val);
}
return mutations;
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class ProtobufUtil method toIncrement.
/**
* Convert a protocol buffer Mutate to an Increment
*
* @param proto the protocol buffer Mutate to convert
* @return the converted client Increment
* @throws IOException
*/
public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.INCREMENT : type.name();
byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
Increment increment = row != null ? new Increment(row) : null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
}
Cell cell = cellScanner.current();
if (increment == null) {
increment = new Increment(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
increment.add(cell);
}
} else {
if (increment == null) {
throw new IllegalArgumentException("row cannot be null");
}
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
byte[] qualifier = qv.getQualifier().toByteArray();
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
byte[] value = qv.getValue().toByteArray();
byte[] tags = null;
if (qv.hasTags()) {
tags = qv.getTags().toByteArray();
}
increment.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
}
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
increment.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
increment.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
increment.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return increment;
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class ResponseConverter method getResults.
/**
* Create Results from the cells using the cells meta data.
* @param cellScanner
* @param response
* @return results
*/
public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException {
if (response == null)
return null;
// If cellscanner, then the number of Results to return is the count of elements in the
// cellsPerResult list. Otherwise, it is how many results are embedded inside the response.
int noOfResults = cellScanner != null ? response.getCellsPerResultCount() : response.getResultsCount();
Result[] results = new Result[noOfResults];
for (int i = 0; i < noOfResults; i++) {
if (cellScanner != null) {
// Cells are out in cellblocks. Group them up again as Results. How many to read at a
// time will be found in getCellsLength -- length here is how many Cells in the i'th Result
int noOfCells = response.getCellsPerResult(i);
boolean isPartial = response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) : false;
List<Cell> cells = new ArrayList<>(noOfCells);
for (int j = 0; j < noOfCells; j++) {
try {
if (cellScanner.advance() == false) {
// We are not able to retrieve the exact number of cells which ResultCellMeta says us.
// We have to scan for the same results again. Throwing DNRIOE as a client retry on the
// same scanner will result in OutOfOrderScannerNextException
String msg = "Results sent from server=" + noOfResults + ". But only got " + i + " results completely at client. Resetting the scanner to scan again.";
LOG.error(msg);
throw new DoNotRetryIOException(msg);
}
} catch (IOException ioe) {
// We are getting IOE while retrieving the cells for Results.
// We have to scan for the same results again. Throwing DNRIOE as a client retry on the
// same scanner will result in OutOfOrderScannerNextException
LOG.error("Exception while reading cells from result." + "Resetting the scanner to scan again.", ioe);
throw new DoNotRetryIOException("Resetting the scanner.", ioe);
}
cells.add(cellScanner.current());
}
results[i] = Result.create(cells, null, response.getStale(), isPartial);
} else {
// Result is pure pb.
results[i] = ProtobufUtil.toResult(response.getResults(i));
}
}
return results;
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class ProtobufUtil method toAppend.
/**
* Convert a protocol buffer Mutate to an Append
* @param cellScanner
* @param proto the protocol buffer Mutate to convert
* @return the converted client Append
* @throws IOException
*/
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.APPEND : type.name();
byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
Append append = row != null ? new Append(row) : null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
}
Cell cell = cellScanner.current();
if (append == null) {
append = new Append(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
append.add(cell);
}
} else {
if (append == null) {
throw new IllegalArgumentException("row cannot be null");
}
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
byte[] qualifier = qv.getQualifier().toByteArray();
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
byte[] value = qv.getValue().toByteArray();
byte[] tags = null;
if (qv.hasTags()) {
tags = qv.getTags().toByteArray();
}
append.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
}
}
}
append.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
append.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return append;
}
Aggregations