use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class ProtobufUtil method toAppend.
/**
* Convert a protocol buffer Mutate to an Append
* @param cellScanner
* @param proto the protocol buffer Mutate to convert
* @return the converted client Append
* @throws IOException
*/
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.APPEND : type.name();
byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
Append append = row != null ? new Append(row) : null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
}
Cell cell = cellScanner.current();
if (append == null) {
append = new Append(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
append.add(cell);
}
} else {
if (append == null) {
throw new IllegalArgumentException("row cannot be null");
}
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
byte[] qualifier = qv.getQualifier().toByteArray();
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
byte[] value = qv.getValue().toByteArray();
byte[] tags = null;
if (qv.hasTags()) {
tags = qv.getTags().toByteArray();
}
append.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
}
}
}
append.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
append.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return append;
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class ProtobufUtil method toGet.
/**
* Convert a protocol buffer Mutate to a Get.
* @param proto the protocol buffer Mutate to convert.
* @param cellScanner
* @return the converted client get.
* @throws IOException
*/
public static Get toGet(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.INCREMENT || type == MutationType.APPEND : type.name();
byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
Get get = null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
}
Cell cell = cellScanner.current();
if (get == null) {
get = new Get(CellUtil.cloneRow(cell));
}
get.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
}
} else {
get = new Get(row);
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
byte[] qualifier = qv.getQualifier().toByteArray();
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
get.addColumn(family, qualifier);
}
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
get.setTimeRange(timeRange);
}
for (NameBytesPair attribute : proto.getAttributeList()) {
get.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return get;
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class RequestConverter method buildRegionAction.
/**
* Create a protocol buffer multi request for a list of actions.
* Propagates Actions original index.
*
* @param regionName
* @param actions
* @return a multi request
* @throws IOException
*/
public static RegionAction.Builder buildRegionAction(final byte[] regionName, final List<Action> actions, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());
mutationBuilder.clear();
if (row instanceof Get) {
Get g = (Get) row;
regionActionBuilder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder)));
} else if (row instanceof Delete) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder)));
} else if (row instanceof Append) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, (Append) row, mutationBuilder, action.getNonce())));
} else if (row instanceof Increment) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation((Increment) row, mutationBuilder, action.getNonce())));
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
regionActionBuilder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) {
throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
return regionActionBuilder;
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class RequestConverter method buildMutateRequest.
/**
* Create a protocol buffer MutateRequest for conditioned row mutations
*
* @param regionName
* @param row
* @param family
* @param qualifier
* @param comparator
* @param compareType
* @param rowMutations
* @return a mutate request
* @throws IOException
*/
public static ClientProtos.MultiRequest buildMutateRequest(final byte[] regionName, final byte[] row, final byte[] family, final byte[] qualifier, final ByteArrayComparable comparator, final CompareType compareType, final RowMutations rowMutations) throws IOException {
RegionAction.Builder builder = getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
builder.setAtomic(true);
ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
Condition condition = buildCondition(row, family, qualifier, comparator, compareType);
for (Mutation mutation : rowMutations.getMutations()) {
MutationType mutateType = null;
if (mutation instanceof Put) {
mutateType = MutationType.PUT;
} else if (mutation instanceof Delete) {
mutateType = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder);
actionBuilder.clear();
actionBuilder.setMutation(mp);
builder.addAction(actionBuilder.build());
}
ClientProtos.MultiRequest request = ClientProtos.MultiRequest.newBuilder().addRegionAction(builder.build()).setCondition(condition).build();
return request;
}
use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.
the class ReplicationManager method checkConfiguredWALEntryFilters.
private void checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig) throws IOException {
String filterCSV = peerConfig.getConfiguration().get(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY);
if (filterCSV != null && !filterCSV.isEmpty()) {
String[] filters = filterCSV.split(",");
for (String filter : filters) {
try {
Class clazz = Class.forName(filter);
Object o = clazz.newInstance();
} catch (Exception e) {
throw new DoNotRetryIOException("Configured WALEntryFilter " + filter + " could not be created. Failing add/update " + "peer operation.", e);
}
}
}
}
Aggregations