Search in sources :

Example 11 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class TestNonTxIndexBuilder method setup.

/**
     * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be
     * called, where any read requests to
     * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test
     * field 'currentRowCells'
     */
@Before
public void setup() throws Exception {
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration(false);
    conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    // the following is used by LocalTable#getCurrentRowState()
    Region mockRegion = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(mockRegion);
    Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer<RegionScanner>() {

        @Override
        public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
            Scan sArg = (Scan) invocation.getArguments()[0];
            TimeRange timeRange = sArg.getTimeRange();
            return getMockTimeRangeRegionScanner(timeRange);
        }
    });
    // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
    HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
    Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
    Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
    Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
    mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
    Mockito.when(mockIndexMetaData.isImmutableRows()).thenReturn(false);
    Mockito.when(mockIndexMetaData.getIndexMaintainers()).thenReturn(Collections.singletonList(getTestIndexMaintainer()));
    indexBuilder = new NonTxIndexBuilder();
    indexBuilder.setup(env);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TimeRange(org.apache.hadoop.hbase.io.TimeRange) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) PhoenixIndexMetaData(org.apache.phoenix.index.PhoenixIndexMetaData) BaseRegionScanner(org.apache.phoenix.coprocessor.BaseRegionScanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PhoenixIndexCodec(org.apache.phoenix.index.PhoenixIndexCodec) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Before(org.junit.Before)

Example 12 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class NonTxIndexBuilderTest method setup.

/**
 * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be
 * called, where any read requests to
 * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test
 * field 'currentRowCells'
 */
@Before
public void setup() throws Exception {
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration(false);
    conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    // the following is used by LocalTable#getCurrentRowState()
    Region mockRegion = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(mockRegion);
    Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer<RegionScanner>() {

        @Override
        public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
            Scan sArg = (Scan) invocation.getArguments()[0];
            TimeRange timeRange = sArg.getTimeRange();
            return getMockTimeRangeRegionScanner(timeRange);
        }
    });
    // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
    HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
    Mockito.when(env.getRegionInfo()).thenReturn(mockRegionInfo);
    Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
    Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
    Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
    Mockito.when(mockRegionInfo.getTable()).thenReturn(TableName.valueOf(TEST_TABLE_STRING));
    mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
    Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation) Mockito.any())).thenReturn(true);
    Mockito.when(mockIndexMetaData.getIndexMaintainers()).thenReturn(Collections.singletonList(getTestIndexMaintainer()));
    indexBuilder = new NonTxIndexBuilder();
    indexBuilder.setup(env);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TimeRange(org.apache.hadoop.hbase.io.TimeRange) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) PhoenixIndexMetaData(org.apache.phoenix.index.PhoenixIndexMetaData) BaseRegionScanner(org.apache.phoenix.coprocessor.BaseRegionScanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PhoenixIndexCodec(org.apache.phoenix.index.PhoenixIndexCodec) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Before(org.junit.Before)

Example 13 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class SequenceRegionObserver method preIncrement.

/**
 * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment
 * implementation (HBASE-10254):
 * 1) Lack of recognition and identification of when the key value to increment doesn't exist
 * 2) Lack of the ability to set the timestamp of the updated key value.
 * Works the same as existing region.increment(), except assumes there is a single column to
 * increment and uses Phoenix LONG encoding.
 *
 * @since 3.0.0
 */
@Override
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> e, final Increment increment) throws IOException {
    RegionCoprocessorEnvironment env = e.getEnvironment();
    // We need to set this to prevent region.increment from being called
    e.bypass();
    e.complete();
    Region region = env.getRegion();
    byte[] row = increment.getRow();
    List<RowLock> locks = Lists.newArrayList();
    TimeRange tr = increment.getTimeRange();
    region.startRegionOperation();
    try {
        acquireLock(region, row, locks);
        try {
            long maxTimestamp = tr.getMax();
            boolean validateOnly = true;
            Get get = new Get(row);
            get.setTimeRange(tr.getMin(), tr.getMax());
            for (Map.Entry<byte[], List<Cell>> entry : increment.getFamilyCellMap().entrySet()) {
                byte[] cf = entry.getKey();
                for (Cell cq : entry.getValue()) {
                    long value = Bytes.toLong(cq.getValueArray(), cq.getValueOffset());
                    get.addColumn(cf, CellUtil.cloneQualifier(cq));
                    long cellTimestamp = cq.getTimestamp();
                    // on the Increment or any of its Cells.
                    if (cellTimestamp > 0 && cellTimestamp < maxTimestamp) {
                        maxTimestamp = cellTimestamp;
                        get.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, maxTimestamp);
                    }
                    validateOnly &= (Sequence.ValueOp.VALIDATE_SEQUENCE.ordinal() == value);
                }
            }
            Result result = region.get(get);
            if (result.isEmpty()) {
                return getErrorResult(row, maxTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
            }
            KeyValue currentValueKV = Sequence.getCurrentValueKV(result);
            KeyValue incrementByKV = Sequence.getIncrementByKV(result);
            KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result);
            long currentValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault());
            long incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault());
            long cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), cacheSizeKV.getValueOffset(), SortOrder.getDefault());
            // Hold timestamp constant for sequences, so that clients always only see the latest
            // value regardless of when they connect.
            long timestamp = currentValueKV.getTimestamp();
            Put put = new Put(row, timestamp);
            int numIncrementKVs = increment.getFamilyCellMap().get(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES).size();
            // creates the list of KeyValues used for the Result that will be returned
            List<Cell> cells = Sequence.getCells(result, numIncrementKVs);
            // if client is 3.0/4.0 preserve the old behavior (older clients won't have newer columns present in the increment)
            if (numIncrementKVs != Sequence.NUM_SEQUENCE_KEY_VALUES) {
                currentValue += incrementBy * cacheSize;
                // Hold timestamp constant for sequences, so that clients always only see the latest value
                // regardless of when they connect.
                KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
                put.add(newCurrentValueKV);
                Sequence.replaceCurrentValueKV(cells, newCurrentValueKV);
            } else {
                KeyValue cycleKV = Sequence.getCycleKV(result);
                KeyValue limitReachedKV = Sequence.getLimitReachedKV(result);
                KeyValue minValueKV = Sequence.getMinValueKV(result);
                KeyValue maxValueKV = Sequence.getMaxValueKV(result);
                boolean increasingSeq = incrementBy > 0 ? true : false;
                // if the minValue, maxValue, cycle and limitReached is null this sequence has been upgraded from
                // a lower version. Set minValue, maxValue, cycle and limitReached to Long.MIN_VALUE, Long.MAX_VALUE, true and false
                // respectively in order to maintain existing behavior and also update the KeyValues on the server
                boolean limitReached;
                if (limitReachedKV == null) {
                    limitReached = false;
                    KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
                    put.add(newLimitReachedKV);
                    Sequence.replaceLimitReachedKV(cells, newLimitReachedKV);
                } else {
                    limitReached = (Boolean) PBoolean.INSTANCE.toObject(limitReachedKV.getValueArray(), limitReachedKV.getValueOffset(), limitReachedKV.getValueLength());
                }
                long minValue;
                if (minValueKV == null) {
                    minValue = Long.MIN_VALUE;
                    KeyValue newMinValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, minValue, timestamp);
                    put.add(newMinValueKV);
                    Sequence.replaceMinValueKV(cells, newMinValueKV);
                } else {
                    minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), minValueKV.getValueOffset(), SortOrder.getDefault());
                }
                long maxValue;
                if (maxValueKV == null) {
                    maxValue = Long.MAX_VALUE;
                    KeyValue newMaxValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, maxValue, timestamp);
                    put.add(newMaxValueKV);
                    Sequence.replaceMaxValueKV(cells, newMaxValueKV);
                } else {
                    maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), maxValueKV.getValueOffset(), SortOrder.getDefault());
                }
                boolean cycle;
                if (cycleKV == null) {
                    cycle = false;
                    KeyValue newCycleKV = createKeyValue(row, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, cycle, timestamp);
                    put.add(newCycleKV);
                    Sequence.replaceCycleValueKV(cells, newCycleKV);
                } else {
                    cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), cycleKV.getValueOffset(), cycleKV.getValueLength());
                }
                long numSlotsToAllocate = calculateNumSlotsToAllocate(increment);
                // We don't support Bulk Allocations on sequences that have the CYCLE flag set to true
                if (cycle && !SequenceUtil.isCycleAllowed(numSlotsToAllocate)) {
                    return getErrorResult(row, maxTimestamp, SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED.getErrorCode());
                }
                // Bulk Allocations are expressed by NEXT <n> VALUES FOR
                if (SequenceUtil.isBulkAllocation(numSlotsToAllocate)) {
                    if (SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, numSlotsToAllocate)) {
                        // all the slots requested.
                        return getErrorResult(row, maxTimestamp, SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode());
                    }
                }
                if (validateOnly) {
                    return result;
                }
                // return if we have run out of sequence values
                if (limitReached) {
                    if (cycle) {
                        // reset currentValue of the Sequence row to minValue/maxValue
                        currentValue = increasingSeq ? minValue : maxValue;
                    } else {
                        return getErrorResult(row, maxTimestamp, SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode());
                    }
                }
                // check if the limit was reached
                limitReached = SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, numSlotsToAllocate);
                // update currentValue
                currentValue += incrementBy * (SequenceUtil.isBulkAllocation(numSlotsToAllocate) ? numSlotsToAllocate : cacheSize);
                // update the currentValue of the Result row
                KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
                Sequence.replaceCurrentValueKV(cells, newCurrentValueKV);
                put.add(newCurrentValueKV);
                // set the LIMIT_REACHED column to true, so that no new values can be used
                KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
                put.add(newLimitReachedKV);
            }
            // update the KeyValues on the server
            Mutation[] mutations = new Mutation[] { put };
            region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
            // return a Result with the updated KeyValues
            return Result.create(cells);
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
        // Impossible
        return null;
    } finally {
        region.closeRegionOperation();
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TimeRange(org.apache.hadoop.hbase.io.TimeRange) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Get(org.apache.hadoop.hbase.client.Get) Region(org.apache.hadoop.hbase.regionserver.Region) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 14 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class ScanRanges method create.

public static ScanRanges create(RowKeySchema schema, List<List<KeyRange>> ranges, int[] slotSpan, KeyRange minMaxRange, Integer nBuckets, boolean useSkipScan, int rowTimestampColIndex) {
    int offset = nBuckets == null ? 0 : SaltingUtil.NUM_SALTING_BYTES;
    int nSlots = ranges.size();
    if (nSlots == offset && minMaxRange == KeyRange.EVERYTHING_RANGE) {
        return EVERYTHING;
    } else if (minMaxRange == KeyRange.EMPTY_RANGE || (nSlots == 1 + offset && ranges.get(offset).size() == 1 && ranges.get(offset).get(0) == KeyRange.EMPTY_RANGE)) {
        return NOTHING;
    }
    TimeRange rowTimestampRange = getRowTimestampColumnRange(ranges, schema, rowTimestampColIndex);
    boolean isPointLookup = isPointLookup(schema, ranges, slotSpan, useSkipScan);
    if (isPointLookup) {
        // TODO: consider keeping original to use for serialization as it would be smaller?
        List<byte[]> keys = ScanRanges.getPointKeys(ranges, slotSpan, schema, nBuckets);
        List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
        KeyRange unsaltedMinMaxRange = minMaxRange;
        if (nBuckets != null && minMaxRange != KeyRange.EVERYTHING_RANGE) {
            unsaltedMinMaxRange = KeyRange.getKeyRange(stripPrefix(minMaxRange.getLowerRange(), offset), minMaxRange.lowerUnbound(), stripPrefix(minMaxRange.getUpperRange(), offset), minMaxRange.upperUnbound());
        }
        // We have full keys here, so use field from our varbinary schema
        BytesComparator comparator = ScanUtil.getComparator(SchemaUtil.VAR_BINARY_SCHEMA.getField(0));
        for (byte[] key : keys) {
            // Filter now based on unsalted minMaxRange and ignore the point key salt byte
            if (unsaltedMinMaxRange.compareLowerToUpperBound(key, offset, key.length - offset, true, comparator) <= 0 && unsaltedMinMaxRange.compareUpperToLowerBound(key, offset, key.length - offset, true, comparator) >= 0) {
                keyRanges.add(KeyRange.getKeyRange(key));
            }
        }
        ranges = Collections.singletonList(keyRanges);
        useSkipScan = keyRanges.size() > 1;
        // which is not part of the value.
        if (keys.size() > 1 || SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, schema.getField(schema.getFieldCount() - 1)) == QueryConstants.DESC_SEPARATOR_BYTE) {
            schema = SchemaUtil.VAR_BINARY_SCHEMA;
            slotSpan = ScanUtil.SINGLE_COLUMN_SLOT_SPAN;
        } else {
            // Keep original schema and don't use skip scan as it's not necessary
            // when there's a single key.
            slotSpan = new int[] { schema.getMaxFields() - 1 };
        }
    }
    List<List<KeyRange>> sortedRanges = Lists.newArrayListWithExpectedSize(ranges.size());
    for (int i = 0; i < ranges.size(); i++) {
        List<KeyRange> sorted = Lists.newArrayList(ranges.get(i));
        Collections.sort(sorted, KeyRange.COMPARATOR);
        sortedRanges.add(ImmutableList.copyOf(sorted));
    }
    // Don't set minMaxRange for point lookup because it causes issues during intersect
    // by going across region boundaries
    KeyRange scanRange = KeyRange.EVERYTHING_RANGE;
    // if (nBuckets == null || (nBuckets != null && (!isPointLookup || !useSkipScanFilter))) {
    if (nBuckets == null || !isPointLookup || !useSkipScan) {
        byte[] minKey = ScanUtil.getMinKey(schema, sortedRanges, slotSpan);
        byte[] maxKey = ScanUtil.getMaxKey(schema, sortedRanges, slotSpan);
        // have anything to filter at the upper end of the range
        if (ScanUtil.crossesPrefixBoundary(maxKey, ScanUtil.getPrefix(minKey, offset), offset)) {
            maxKey = KeyRange.UNBOUND;
        }
        // We won't filter anything at the low end of the range if we just have the salt byte
        if (minKey.length <= offset) {
            minKey = KeyRange.UNBOUND;
        }
        scanRange = KeyRange.getKeyRange(minKey, maxKey);
    }
    if (minMaxRange != KeyRange.EVERYTHING_RANGE) {
        // Intersect using modified min/max range, but keep original range to ensure it
        // can still be decomposed into it's parts
        KeyRange inclusiveExclusiveMinMaxRange = ScanUtil.convertToInclusiveExclusiveRange(minMaxRange, schema, new ImmutableBytesWritable());
        scanRange = scanRange.intersect(inclusiveExclusiveMinMaxRange);
    }
    if (scanRange == KeyRange.EMPTY_RANGE) {
        return NOTHING;
    }
    return new ScanRanges(schema, slotSpan, sortedRanges, scanRange, minMaxRange, useSkipScan, isPointLookup, nBuckets, rowTimestampRange);
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) BytesComparator(org.apache.phoenix.util.ScanUtil.BytesComparator) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) KeyRange(org.apache.phoenix.query.KeyRange) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List)

Example 15 with TimeRange

use of org.apache.hadoop.hbase.io.TimeRange in project phoenix by apache.

the class ScanRanges method getAscTimeRange.

private static TimeRange getAscTimeRange(KeyRange lowestRange, KeyRange highestRange, Field f) throws IOException {
    long low;
    long high;
    PDataCodec codec = PLong.INSTANCE.getCodec();
    if (lowestRange.lowerUnbound()) {
        low = 0;
    } else {
        long lowerRange = codec.decodeLong(lowestRange.getLowerRange(), 0, SortOrder.ASC);
        low = lowestRange.isLowerInclusive() ? lowerRange : safelyIncrement(lowerRange);
    }
    if (highestRange.upperUnbound()) {
        high = HConstants.LATEST_TIMESTAMP;
    } else {
        long upperRange = codec.decodeLong(highestRange.getUpperRange(), 0, SortOrder.ASC);
        if (highestRange.isUpperInclusive()) {
            high = safelyIncrement(upperRange);
        } else {
            high = upperRange;
        }
    }
    return new TimeRange(low, high);
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) PDataCodec(org.apache.phoenix.schema.types.PDataType.PDataCodec)

Aggregations

TimeRange (org.apache.hadoop.hbase.io.TimeRange)45 Test (org.junit.Test)11 Map (java.util.Map)10 Get (org.apache.hadoop.hbase.client.Get)10 Scan (org.apache.hadoop.hbase.client.Scan)10 Cell (org.apache.hadoop.hbase.Cell)8 NavigableSet (java.util.NavigableSet)7 NameBytesPair (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)7 HashMap (java.util.HashMap)6 Filter (org.apache.hadoop.hbase.filter.Filter)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteString (com.google.protobuf.ByteString)5 ArrayList (java.util.ArrayList)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)5 Put (org.apache.hadoop.hbase.client.Put)5 List (java.util.List)4 Increment (org.apache.hadoop.hbase.client.Increment)4 Result (org.apache.hadoop.hbase.client.Result)4 Column (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column)4 Column (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Column)4