Search in sources :

Example 21 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class CompressionTest method testCompression.

public static void testCompression(Compression.Algorithm algo) throws IOException {
    if (compressionTestResults[algo.ordinal()] != null) {
        if (compressionTestResults[algo.ordinal()]) {
            // already passed test, dont do it again.
            return;
        } else {
            // failed.
            throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" + " previously failed test.");
        }
    }
    try {
        Compressor c = algo.getCompressor();
        algo.returnCompressor(c);
        // passes
        compressionTestResults[algo.ordinal()] = true;
    } catch (Throwable t) {
        // failure
        compressionTestResults[algo.ordinal()] = false;
        throw new DoNotRetryIOException(t);
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Compressor(org.apache.hadoop.io.compress.Compressor)

Example 22 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ProtobufUtil method toDelete.

/**
   * Convert a protocol buffer Mutate to a Delete
   *
   * @param proto the protocol buffer Mutate to convert
   * @param cellScanner if non-null, the data that goes with this delete.
   * @return the converted client Delete
   * @throws IOException
   */
public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    MutationType type = proto.getMutateType();
    assert type == MutationType.DELETE : type.name();
    long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
    Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            // TextFormat should be fine for a Delete since it carries no data, just coordinates.
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                // TextFormat should be fine for a Delete since it carries no data, just coordinates.
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
            }
            Cell cell = cellScanner.current();
            if (delete == null) {
                delete = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
            }
            delete.addDeleteMarker(cell);
        }
    } else {
        if (delete == null) {
            throw new IllegalArgumentException("row cannot be null");
        }
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                DeleteType deleteType = qv.getDeleteType();
                byte[] qualifier = null;
                if (qv.hasQualifier()) {
                    qualifier = qv.getQualifier().toByteArray();
                }
                long ts = HConstants.LATEST_TIMESTAMP;
                if (qv.hasTimestamp()) {
                    ts = qv.getTimestamp();
                }
                if (deleteType == DeleteType.DELETE_ONE_VERSION) {
                    delete.addColumn(family, qualifier, ts);
                } else if (deleteType == DeleteType.DELETE_MULTIPLE_VERSIONS) {
                    delete.addColumns(family, qualifier, ts);
                } else if (deleteType == DeleteType.DELETE_FAMILY_VERSION) {
                    delete.addFamilyVersion(family, ts);
                } else {
                    delete.addFamily(family, ts);
                }
            }
        }
    }
    delete.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return delete;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) DeleteType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType) Cell(org.apache.hadoop.hbase.Cell)

Example 23 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ProtobufUtil method toPut.

/**
   * Convert a protocol buffer Mutate to a Put.
   *
   * @param proto The protocol buffer MutationProto to convert
   * @param cellScanner If non-null, the Cell data that goes with this proto.
   * @return A client Put.
   * @throws IOException
   */
public static Put toPut(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    // TODO: Server-side at least why do we convert back to the Client types?  Why not just pb it?
    MutationType type = proto.getMutateType();
    assert type == MutationType.PUT : type.name();
    long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
    Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
            }
            Cell cell = cellScanner.current();
            if (put == null) {
                put = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
            }
            put.add(cell);
        }
    } else {
        if (put == null) {
            throw new IllegalArgumentException("row cannot be null");
        }
        // The proto has the metadata and the data itself
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                ByteBuffer qualifier = qv.hasQualifier() ? qv.getQualifier().asReadOnlyByteBuffer() : null;
                ByteBuffer value = qv.hasValue() ? qv.getValue().asReadOnlyByteBuffer() : null;
                long ts = timestamp;
                if (qv.hasTimestamp()) {
                    ts = qv.getTimestamp();
                }
                byte[] allTagsBytes;
                if (qv.hasTags()) {
                    allTagsBytes = qv.getTags().toByteArray();
                    if (qv.hasDeleteType()) {
                        byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
                        put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts, fromDeleteType(qv.getDeleteType()), null, allTagsBytes));
                    } else {
                        List<Tag> tags = TagUtil.asList(allTagsBytes, 0, (short) allTagsBytes.length);
                        Tag[] tagsArray = new Tag[tags.size()];
                        put.addImmutable(family, qualifier, ts, value, tags.toArray(tagsArray));
                    }
                } else {
                    if (qv.hasDeleteType()) {
                        byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
                        put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts, fromDeleteType(qv.getDeleteType())));
                    } else {
                        put.addImmutable(family, qualifier, ts, value);
                    }
                }
            }
        }
    }
    put.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        put.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return put;
}
Also used : MutationType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType) KeyValue(org.apache.hadoop.hbase.KeyValue) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell)

Example 24 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ProtobufUtil method toFilter.

/**
   * Convert a protocol buffer Filter to a client Filter
   *
   * @param proto the protocol buffer Filter to convert
   * @return the converted Filter
   */
@SuppressWarnings("unchecked")
public static Filter toFilter(FilterProtos.Filter proto) throws IOException {
    String type = proto.getName();
    final byte[] value = proto.getSerializedFilter().toByteArray();
    String funcName = "parseFrom";
    try {
        Class<? extends Filter> c = (Class<? extends Filter>) Class.forName(type, true, CLASS_LOADER);
        Method parseFrom = c.getMethod(funcName, byte[].class);
        if (parseFrom == null) {
            throw new IOException("Unable to locate function: " + funcName + " in type: " + type);
        }
        return (Filter) parseFrom.invoke(c, value);
    } catch (Exception e) {
        // In either case, let's not retry.
        throw new DoNotRetryIOException(e);
    }
}
Also used : Filter(org.apache.hadoop.hbase.filter.Filter) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(com.google.protobuf.ByteString) Method(java.lang.reflect.Method) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) IOException(java.io.IOException) ServiceException(com.google.protobuf.ServiceException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 25 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class CellBlockBuilder method encodeCellsTo.

private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, CompressionCodec compressor) throws IOException {
    Compressor poolCompressor = null;
    try {
        if (compressor != null) {
            if (compressor instanceof Configurable) {
                ((Configurable) compressor).setConf(this.conf);
            }
            poolCompressor = CodecPool.getCompressor(compressor);
            os = compressor.createOutputStream(os, poolCompressor);
        }
        Codec.Encoder encoder = codec.getEncoder(os);
        while (cellScanner.advance()) {
            encoder.write(cellScanner.current());
        }
        encoder.flush();
    } catch (BufferOverflowException | IndexOutOfBoundsException e) {
        throw new DoNotRetryIOException(e);
    } finally {
        os.close();
        if (poolCompressor != null) {
            CodecPool.returnCompressor(poolCompressor);
        }
    }
}
Also used : Codec(org.apache.hadoop.hbase.codec.Codec) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Compressor(org.apache.hadoop.io.compress.Compressor) Configurable(org.apache.hadoop.conf.Configurable) BufferOverflowException(java.nio.BufferOverflowException)

Aggregations

DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)77 IOException (java.io.IOException)28 Cell (org.apache.hadoop.hbase.Cell)18 ArrayList (java.util.ArrayList)12 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)12 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)12 TableName (org.apache.hadoop.hbase.TableName)11 InterruptedIOException (java.io.InterruptedIOException)10 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)10 Delete (org.apache.hadoop.hbase.client.Delete)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)9 User (org.apache.hadoop.hbase.security.User)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)5 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)5