use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class GetByteFunction method init.
private void init() {
Expression offsetExpr = children.get(1);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS && offsetExpr.evaluate(null, ptr)) {
offsetPreCompute = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder());
} else
offsetPreCompute = null;
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class PDataTypeTest method testUnsignedFloat.
@Test
public void testUnsignedFloat() {
Float na = 0.005f;
byte[] b = PUnsignedFloat.INSTANCE.toBytes(na);
Float nb = (Float) PUnsignedFloat.INSTANCE.toObject(b);
assertEquals(na, nb);
na = 10.0f;
b = PUnsignedFloat.INSTANCE.toBytes(na, SortOrder.DESC);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ptr.set(b);
nb = PUnsignedFloat.INSTANCE.getCodec().decodeFloat(ptr, SortOrder.DESC);
assertEquals(na, nb);
na = 2.0f;
nb = 1.0f;
byte[] ba = PUnsignedFloat.INSTANCE.toBytes(na);
byte[] bb = PUnsignedFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) > 0);
na = 0.0f;
nb = Float.MIN_VALUE;
ba = PUnsignedFloat.INSTANCE.toBytes(na);
bb = PUnsignedFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = Float.MIN_VALUE;
nb = Float.MAX_VALUE;
ba = PUnsignedFloat.INSTANCE.toBytes(na);
bb = PUnsignedFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = Float.MAX_VALUE;
nb = Float.POSITIVE_INFINITY;
ba = PUnsignedFloat.INSTANCE.toBytes(na);
bb = PUnsignedFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = Float.POSITIVE_INFINITY;
nb = Float.NaN;
ba = PUnsignedFloat.INSTANCE.toBytes(na);
bb = PUnsignedFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
Integer value = 100;
Object obj = PUnsignedFloat.INSTANCE.toObject(value, PInteger.INSTANCE);
assertTrue(obj instanceof Float);
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class LazyValueGetter method getLatestValue.
@Override
public ImmutableBytesWritable getLatestValue(ColumnReference ref) throws IOException {
// ensure we have a backing map
if (values == null) {
synchronized (this) {
values = Collections.synchronizedMap(new HashMap<ColumnReference, ImmutableBytesWritable>());
}
}
// check the value in the map
ImmutableBytesWritable value = values.get(ref);
if (value == null) {
value = get(ref);
values.put(ref, value);
}
return value;
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class HashCacheClient method addHashCache.
/**
* Send the results of scanning through the scanner to all
* region servers for regions of the table that will use the cache
* that intersect with the minMaxKeyRange.
* @param scanner scanner for the table or intermediate results being cached
* @return client-side {@link ServerCache} representing the added hash cache
* @throws SQLException
* @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed
* size
*/
public ServerCache addHashCache(ScanRanges keyRanges, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, TableRef cacheUsingTableRef, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
/**
* Serialize and compress hashCacheTable
*/
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
serialize(ptr, iterator, estimatedSize, onExpressions, singleValueOnly, keyRangeRhsExpression, keyRangeRhsValues);
return serverCache.addServerCache(keyRanges, ptr, ByteUtil.EMPTY_BYTE_ARRAY, new HashCacheFactory(), cacheUsingTableRef);
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class FormatToBytesWritableMapper method writeAggregatedRow.
/**
* Collect all column values for the same Row. RowKey may be different if indexes are involved,
* so it writes a separate record for each unique RowKey
*
* @param context Current mapper context
* @param tableName Table index in tableNames list
* @param lkv List of KV values that will be combined in a single ImmutableBytesWritable
* @throws IOException
* @throws InterruptedException
*/
private void writeAggregatedRow(Context context, String tableName, List<KeyValue> lkv) throws IOException, InterruptedException {
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
DataOutputStream outputStream = new DataOutputStream(bos);
ImmutableBytesWritable outputKey = null;
if (!lkv.isEmpty()) {
for (KeyValue cell : lkv) {
if (outputKey == null || Bytes.compareTo(outputKey.get(), outputKey.getOffset(), outputKey.getLength(), cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) != 0) {
// This a the first RowKey or a different from previous
if (outputKey != null) {
//It's a different RowKey, so we need to write it
ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray());
outputStream.close();
context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray);
}
outputKey = new ImmutableBytesWritable(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
bos = new ByteArrayOutputStream(1024);
outputStream = new DataOutputStream(bos);
}
/*
The order of aggregation: type, index of column, length of value, value itself
*/
int i = findIndex(cell);
if (i == -1) {
// we skip those KVs that are not belongs to loca index
continue;
}
outputStream.writeByte(cell.getTypeByte());
WritableUtils.writeVInt(outputStream, i);
WritableUtils.writeVInt(outputStream, cell.getValueLength());
outputStream.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray());
outputStream.close();
context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray);
}
}
Aggregations