use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestClientScanner method testSizeLimit.
@Test
@SuppressWarnings("unchecked")
public void testSizeLimit() throws IOException {
final Result[] results = new Result[1];
KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, Type.Maximum);
results[0] = Result.create(new Cell[] { kv1 });
RpcRetryingCaller<Result[]> caller = Mockito.mock(RpcRetryingCaller.class);
Mockito.when(rpcFactory.<Result[]>newCaller()).thenReturn(caller);
Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class), Mockito.anyInt())).thenAnswer(new Answer<Result[]>() {
private int count = 0;
@Override
public Result[] answer(InvocationOnMock invocation) throws Throwable {
ScannerCallableWithReplicas callable = invocation.getArgumentAt(0, ScannerCallableWithReplicas.class);
switch(count) {
case // initialize
0:
count++;
// if we set no here the implementation will trigger a close
callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES);
return results;
case // close
1:
count++;
return null;
default:
throw new RuntimeException("Expected only 2 invocations");
}
}
});
Mockito.when(rpcFactory.<Result[]>newCaller()).thenReturn(caller);
// Set a much larger cache
scan.setCaching(100);
// The single key-value will exit the loop
scan.setMaxResultSize(1);
try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) {
InOrder inOrder = Mockito.inOrder(caller);
scanner.loadCache();
inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(RetryingCallable.class), Mockito.anyInt());
assertEquals(1, scanner.cache.size());
Result r = scanner.cache.poll();
assertNotNull(r);
CellScanner cs = r.cellScanner();
assertTrue(cs.advance());
assertEquals(kv1, cs.current());
assertFalse(cs.advance());
}
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestPBCell method testRoundTrip.
/**
* Basic test to verify utility methods in {@link PBType} and delegation to protobuf works.
*/
@Test
public void testRoundTrip() {
final Cell cell = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual"), Bytes.toBytes("val"));
CellProtos.Cell c = ProtobufUtil.toCell(cell), decoded;
PositionedByteRange pbr = new SimplePositionedByteRange(c.getSerializedSize());
pbr.setPosition(0);
int encodedLength = CODEC.encode(pbr, c);
pbr.setPosition(0);
decoded = CODEC.decode(pbr);
assertEquals(encodedLength, pbr.getPosition());
assertTrue(CellUtil.equals(cell, ProtobufUtil.toCell(decoded)));
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class ProtobufUtil method toPut.
/**
* Convert a protocol buffer Mutate to a Put.
*
* @param proto The protocol buffer MutationProto to convert
* @param cellScanner If non-null, the Cell data that goes with this proto.
* @return A client Put.
* @throws IOException
*/
public static Put toPut(final MutationProto proto, final CellScanner cellScanner) throws IOException {
// TODO: Server-side at least why do we convert back to the Client types? Why not just pb it?
MutationType type = proto.getMutateType();
assert type == MutationType.PUT : type.name();
long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP;
Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
}
Cell cell = cellScanner.current();
if (put == null) {
put = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
}
put.add(cell);
}
} else {
if (put == null) {
throw new IllegalArgumentException("row cannot be null");
}
// The proto has the metadata and the data itself
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
ByteBuffer qualifier = qv.hasQualifier() ? qv.getQualifier().asReadOnlyByteBuffer() : null;
ByteBuffer value = qv.hasValue() ? qv.getValue().asReadOnlyByteBuffer() : null;
long ts = timestamp;
if (qv.hasTimestamp()) {
ts = qv.getTimestamp();
}
byte[] allTagsBytes;
if (qv.hasTags()) {
allTagsBytes = qv.getTags().toByteArray();
if (qv.hasDeleteType()) {
byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts, fromDeleteType(qv.getDeleteType()), null, allTagsBytes));
} else {
List<Tag> tags = TagUtil.asList(allTagsBytes, 0, (short) allTagsBytes.length);
Tag[] tagsArray = new Tag[tags.size()];
put.addImmutable(family, qualifier, ts, value, tags.toArray(tagsArray));
}
} else {
if (qv.hasDeleteType()) {
byte[] qual = qv.hasQualifier() ? qv.getQualifier().toByteArray() : null;
put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, ts, fromDeleteType(qv.getDeleteType())));
} else {
put.addImmutable(family, qualifier, ts, value);
}
}
}
}
}
put.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
put.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return put;
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestTagCompressionContext method testCompressUncompressTags2.
@Test
public void testCompressUncompressTags2() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
KeyValue kv1 = createKVWithTags(1);
int tagsLength1 = kv1.getTagsLength();
context.compressTags(baos, kv1.getTagsArray(), kv1.getTagsOffset(), tagsLength1);
KeyValue kv2 = createKVWithTags(3);
int tagsLength2 = kv2.getTagsLength();
context.compressTags(baos, kv2.getTagsArray(), kv2.getTagsOffset(), tagsLength2);
context.clear();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.getBuffer());
byte[] dest = new byte[tagsLength1];
context.uncompressTags(bais, dest, 0, tagsLength1);
assertTrue(Bytes.equals(kv1.getTagsArray(), kv1.getTagsOffset(), tagsLength1, dest, 0, tagsLength1));
dest = new byte[tagsLength2];
context.uncompressTags(bais, dest, 0, tagsLength2);
assertTrue(Bytes.equals(kv2.getTagsArray(), kv2.getTagsOffset(), tagsLength2, dest, 0, tagsLength2));
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestTagCompressionContext method createKVWithTags.
private KeyValue createKVWithTags(int noOfTags) {
List<Tag> tags = new ArrayList<>();
for (int i = 0; i < noOfTags; i++) {
tags.add(new ArrayBackedTag((byte) i, "tagValue" + i));
}
KeyValue kv = new KeyValue(ROW, CF, Q, 1234L, V, tags);
return kv;
}
Aggregations