use of org.apache.hadoop.hive.accumulo.AccumuloHiveRow.ColumnTuple in project hive by apache.
the class LazyAccumuloMap method parse.
protected void parse() {
if (null == this.cachedMap) {
this.cachedMap = new LinkedHashMap<Object, Object>();
} else {
this.cachedMap.clear();
}
LazyMapObjectInspector lazyMoi = getInspector();
Text cf = new Text(columnMapping.getColumnFamily());
for (ColumnTuple tuple : sourceRow.getTuples()) {
String cq = tuple.getCq().toString();
if (!cf.equals(tuple.getCf()) || !cq.startsWith(columnMapping.getColumnQualifierPrefix())) {
// A column family or qualifier we don't want to include in the map
continue;
}
// Because we append the cq prefix when serializing the column
// we should also remove it when pulling it from Accumulo
cq = cq.substring(columnMapping.getColumnQualifierPrefix().length());
// Keys are always primitive, respect the binary
LazyPrimitive<? extends ObjectInspector, ? extends Writable> key = LazyFactory.createLazyPrimitiveClass((PrimitiveObjectInspector) lazyMoi.getMapKeyObjectInspector(), ColumnEncoding.BINARY == columnMapping.getKeyEncoding());
ByteArrayRef keyRef = new ByteArrayRef();
keyRef.setData(cq.getBytes(Charsets.UTF_8));
key.init(keyRef, 0, keyRef.getData().length);
// Value can be anything, use the obj inspector and respect binary
LazyObject<?> value = LazyFactory.createLazyObject(lazyMoi.getMapValueObjectInspector(), ColumnEncoding.BINARY == columnMapping.getValueEncoding());
byte[] bytes = tuple.getValue();
if (bytes == null || isNull(oi.getNullSequence(), bytes, 0, bytes.length)) {
value.setNull();
} else {
ByteArrayRef valueRef = new ByteArrayRef();
valueRef.setData(bytes);
value.init(valueRef, 0, valueRef.getData().length);
}
cachedMap.put(key, value);
}
this.setParsed(true);
}
Aggregations