use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.
the class ProtobufUtil method toPut.
/**
* Convert a protocol buffer Mutate to a Put.
*
* @param proto The protocol buffer MutationProto to convert
* @param cellScanner If non-null, the Cell data that goes with this proto.
* @return A client Put.
* @throws IOException
*/
public static Put toPut(final MutationProto proto, final CellScanner cellScanner) throws IOException {
// TODO: Server-side at least why do we convert back to the Client types? Why not just pb it?
MutationType type = proto.getMutateType();
assert type == MutationType.PUT : type.name();
long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
}
Cell cell = cellScanner.current();
if (put == null) {
put = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
}
put.add(cell);
}
} else {
if (put == null) {
throw new IllegalArgumentException("row cannot be null");
}
// The proto has the metadata and the data itself
ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
long ts = timestamp;
if (qv.hasTimestamp()) {
ts = qv.getTimestamp();
}
byte[] allTagsBytes;
if (qv.hasTags()) {
allTagsBytes = qv.getTags().toByteArray();
if (qv.hasDeleteType()) {
put.add(cellBuilder.clear().setRow(proto.getRow().toByteArray()).setFamily(family).setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null).setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).setTags(allTagsBytes).build());
} else {
put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family).setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null).setTimestamp(ts).setType(Cell.Type.Put).setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).setTags(allTagsBytes).build());
}
} else {
if (qv.hasDeleteType()) {
put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family).setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null).setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).build());
} else {
put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family).setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null).setTimestamp(ts).setType(Type.Put).setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build());
}
}
}
}
}
put.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
put.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return put;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.
the class ProtobufUtil method toCheckAndMutate.
public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, MutationProto mutation, CellScanner cellScanner) throws IOException {
byte[] row = condition.getRow().toByteArray();
CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row);
Filter filter = condition.hasFilter() ? ProtobufUtil.toFilter(condition.getFilter()) : null;
if (filter != null) {
builder.ifMatches(filter);
} else {
builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue());
}
TimeRange timeRange = condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime();
builder.timeRange(timeRange);
try {
MutationType type = mutation.getMutateType();
switch(type) {
case PUT:
return builder.build(ProtobufUtil.toPut(mutation, cellScanner));
case DELETE:
return builder.build(ProtobufUtil.toDelete(mutation, cellScanner));
case INCREMENT:
return builder.build(ProtobufUtil.toIncrement(mutation, cellScanner));
case APPEND:
return builder.build(ProtobufUtil.toAppend(mutation, cellScanner));
default:
throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
}
} catch (IllegalArgumentException e) {
throw new DoNotRetryIOException(e.getMessage());
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.
the class ProtobufUtil method toDelete.
/**
* Convert a protocol buffer Mutate to a Delete
*
* @param proto the protocol buffer Mutate to convert
* @param cellScanner if non-null, the data that goes with this delete.
* @return the converted client Delete
* @throws IOException
*/
public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.DELETE : type.name();
long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
// TextFormat should be fine for a Delete since it carries no data, just coordinates.
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
// TextFormat should be fine for a Delete since it carries no data, just coordinates.
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
}
Cell cell = cellScanner.current();
if (delete == null) {
delete = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
}
delete.add(cell);
}
} else {
if (delete == null) {
throw new IllegalArgumentException("row cannot be null");
}
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
DeleteType deleteType = qv.getDeleteType();
byte[] qualifier = null;
if (qv.hasQualifier()) {
qualifier = qv.getQualifier().toByteArray();
}
long ts = cellTimestampOrLatest(qv);
if (deleteType == DeleteType.DELETE_ONE_VERSION) {
delete.addColumn(family, qualifier, ts);
} else if (deleteType == DeleteType.DELETE_MULTIPLE_VERSIONS) {
delete.addColumns(family, qualifier, ts);
} else if (deleteType == DeleteType.DELETE_FAMILY_VERSION) {
delete.addFamilyVersion(family, ts);
} else {
delete.addFamily(family, ts);
}
}
}
}
delete.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return delete;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.
the class MultiThreadedAction method parseMutateInfo.
// Parse mutate info into a map of <column name> => <update action>
private Map<String, MutationType> parseMutateInfo(byte[] mutateInfo) {
Map<String, MutationType> mi = new HashMap<>();
if (mutateInfo != null) {
String mutateInfoStr = Bytes.toString(mutateInfo);
String[] mutations = mutateInfoStr.split("#");
for (String mutation : mutations) {
if (mutation.isEmpty())
continue;
Preconditions.checkArgument(mutation.contains(":"), "Invalid mutation info " + mutation);
int p = mutation.indexOf(":");
String column = mutation.substring(0, p);
MutationType type = MutationType.valueOf(Integer.parseInt(mutation.substring(p + 1)));
mi.put(column, type);
}
}
return mi;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.
the class MultiThreadedAction method verifyResultAgainstDataGenerator.
/**
* Verifies the result from get or scan using the dataGenerator (that was presumably
* also used to generate said result).
* @param verifyValues verify that values in the result make sense for row/cf/column combination
* @param verifyCfAndColumnIntegrity verify that cf/column set in the result is complete. Note
* that to use this multiPut should be used, or verification
* has to happen after writes, otherwise there can be races.
* @return true if the values of row result makes sense for row/cf/column combination and true if
* the cf/column set in the result is complete, false otherwise.
*/
public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyValues, boolean verifyCfAndColumnIntegrity) {
String rowKeyStr = Bytes.toString(result.getRow());
// See if we have any data at all.
if (result.isEmpty()) {
LOG.error("Error checking data for key [" + rowKeyStr + "], no data returned");
printLocations(result);
return false;
}
if (!verifyValues && !verifyCfAndColumnIntegrity) {
// as long as we have something, we are good.
return true;
}
// See if we have all the CFs.
byte[][] expectedCfs = dataGenerator.getColumnFamilies();
if (verifyCfAndColumnIntegrity && (expectedCfs.length != result.getMap().size())) {
LOG.error("Error checking data for key [" + rowKeyStr + "], bad family count: " + result.getMap().size());
printLocations(result);
return false;
}
// Verify each column family from get in the result.
for (byte[] cf : result.getMap().keySet()) {
String cfStr = Bytes.toString(cf);
Map<byte[], byte[]> columnValues = result.getFamilyMap(cf);
if (columnValues == null) {
LOG.error("Error checking data for key [" + rowKeyStr + "], no data for family [" + cfStr + "]]");
printLocations(result);
return false;
}
Map<String, MutationType> mutateInfo = null;
if (verifyCfAndColumnIntegrity || verifyValues) {
if (!columnValues.containsKey(MUTATE_INFO)) {
LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + Bytes.toString(MUTATE_INFO) + "]; value is not found");
printLocations(result);
return false;
}
long cfHash = Arrays.hashCode(cf);
// Verify deleted columns, and make up column counts if deleted
byte[] mutateInfoValue = columnValues.remove(MUTATE_INFO);
mutateInfo = parseMutateInfo(mutateInfoValue);
for (Map.Entry<String, MutationType> mutate : mutateInfo.entrySet()) {
if (mutate.getValue() == MutationType.DELETE) {
byte[] column = Bytes.toBytes(mutate.getKey());
long columnHash = Arrays.hashCode(column);
long hashCode = cfHash + columnHash;
if (hashCode % 2 == 0) {
if (columnValues.containsKey(column)) {
LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + mutate.getKey() + "]; should be deleted");
printLocations(result);
return false;
}
byte[] hashCodeBytes = Bytes.toBytes(hashCode);
columnValues.put(column, hashCodeBytes);
}
}
}
// Verify increment
if (!columnValues.containsKey(INCREMENT)) {
LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + Bytes.toString(INCREMENT) + "]; value is not found");
printLocations(result);
return false;
}
long currentValue = Bytes.toLong(columnValues.remove(INCREMENT));
if (verifyValues) {
long amount = mutateInfo.isEmpty() ? 0 : cfHash;
long originalValue = Arrays.hashCode(result.getRow());
long extra = currentValue - originalValue;
if (extra != 0 && (amount == 0 || extra % amount != 0)) {
LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [increment], extra [" + extra + "], amount [" + amount + "]");
printLocations(result);
return false;
}
if (amount != 0 && extra != amount) {
LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [increment], incremented [" + (extra / amount) + "] times");
}
}
// See if we have correct columns.
if (verifyCfAndColumnIntegrity && !dataGenerator.verify(result.getRow(), cf, columnValues.keySet())) {
String colsStr = "";
for (byte[] col : columnValues.keySet()) {
if (colsStr.length() > 0) {
colsStr += ", ";
}
colsStr += "[" + Bytes.toString(col) + "]";
}
LOG.error("Error checking data for key [" + rowKeyStr + "], bad columns for family [" + cfStr + "]: " + colsStr);
printLocations(result);
return false;
}
// See if values check out.
if (verifyValues) {
for (Map.Entry<byte[], byte[]> kv : columnValues.entrySet()) {
String column = Bytes.toString(kv.getKey());
MutationType mutation = mutateInfo.get(column);
boolean verificationNeeded = true;
byte[] bytes = kv.getValue();
if (mutation != null) {
boolean mutationVerified = true;
long columnHash = Arrays.hashCode(kv.getKey());
long hashCode = cfHash + columnHash;
byte[] hashCodeBytes = Bytes.toBytes(hashCode);
if (mutation == MutationType.APPEND) {
int offset = bytes.length - hashCodeBytes.length;
mutationVerified = offset > 0 && Bytes.equals(hashCodeBytes, 0, hashCodeBytes.length, bytes, offset, hashCodeBytes.length);
if (mutationVerified) {
int n = 1;
while (true) {
int newOffset = offset - hashCodeBytes.length;
if (newOffset < 0 || !Bytes.equals(hashCodeBytes, 0, hashCodeBytes.length, bytes, newOffset, hashCodeBytes.length)) {
break;
}
offset = newOffset;
n++;
}
if (n > 1) {
LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + column + "], appended [" + n + "] times");
}
byte[] dest = new byte[offset];
System.arraycopy(bytes, 0, dest, 0, offset);
bytes = dest;
}
} else if (hashCode % 2 == 0) {
// checkAndPut
mutationVerified = Bytes.equals(bytes, hashCodeBytes);
verificationNeeded = false;
}
if (!mutationVerified) {
LOG.error("Error checking data for key [" + rowKeyStr + "], mutation checking failed for column family [" + cfStr + "], column [" + column + "]; mutation [" + mutation + "], hashCode [" + hashCode + "], verificationNeeded [" + verificationNeeded + "]");
printLocations(result);
return false;
}
}
// end of mutation checking
if (verificationNeeded && !dataGenerator.verify(result.getRow(), cf, kv.getKey(), bytes)) {
LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + column + "], mutation [" + mutation + "]; value of length " + bytes.length);
printLocations(result);
return false;
}
}
}
}
}
return true;
}
Aggregations