use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.
the class InListExpression method toString.
@Override
public String toString() {
int maxToStringLen = 200;
Expression firstChild = children.get(0);
PDataType type = firstChild.getDataType();
StringBuilder buf = new StringBuilder(firstChild + " IN (");
for (ImmutableBytesPtr value : values) {
if (firstChild.getSortOrder() != null) {
type.coerceBytes(value, type, firstChild.getSortOrder(), SortOrder.getDefault());
}
buf.append(type.toStringLiteral(value, null));
buf.append(',');
if (buf.length() >= maxToStringLen) {
buf.append("... ");
break;
}
}
buf.setCharAt(buf.length() - 1, ')');
return buf.toString();
}
use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.
the class InListExpression method readFields.
@Override
public void readFields(DataInput input) throws IOException {
super.readFields(input);
// Unused, but left for b/w compat. TODO: remove in next major release
input.readBoolean();
fixedWidth = WritableUtils.readVInt(input);
byte[] valuesBytes = Bytes.readByteArray(input);
valuesByteLength = valuesBytes.length;
int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth;
// TODO: consider using a regular HashSet as we never serialize from the server-side
values = Sets.newLinkedHashSetWithExpectedSize(len);
int offset = 0;
int i = 0;
if (i < len) {
offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr());
while (++i < len - 1) {
offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr());
}
if (i < len) {
offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr());
} else {
maxValue = minValue;
}
} else {
minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY);
}
}
use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.
the class InListExpression method readValue.
private int readValue(DataInput input, byte[] valuesBytes, int offset, ImmutableBytesPtr ptr) throws IOException {
int valueLen = fixedWidth == -1 ? WritableUtils.readVInt(input) : fixedWidth;
values.add(new ImmutableBytesPtr(valuesBytes, offset, valueLen));
return offset + valueLen;
}
use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.
the class DistinctValueWithCountServerAggregator method evaluate.
@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
// This serializes the Map. The format is as follows
// Map size(VInt ie. 1 to 5 bytes) +
// ( key length [VInt ie. 1 to 5 bytes] + key bytes + value [VInt ie. 1 to 5 bytes] )*
int serializationSize = countMapSerializationSize();
buffer = new byte[serializationSize];
int offset = 1;
offset += ByteUtil.vintToBytes(buffer, offset, this.valueVsCount.size());
for (Entry<ImmutableBytesPtr, Integer> entry : this.valueVsCount.entrySet()) {
ImmutableBytesPtr key = entry.getKey();
offset += ByteUtil.vintToBytes(buffer, offset, key.getLength());
System.arraycopy(key.get(), key.getOffset(), buffer, offset, key.getLength());
offset += key.getLength();
offset += ByteUtil.vintToBytes(buffer, offset, entry.getValue().intValue());
}
if (serializationSize > compressThreshold) {
// The size for the map serialization is above the threshold. We will do the Snappy compression here.
byte[] compressed = new byte[COMPRESS_MARKER.length + Snappy.maxCompressedLength(buffer.length)];
System.arraycopy(COMPRESS_MARKER, 0, compressed, 0, COMPRESS_MARKER.length);
int compressedLen = Snappy.compress(buffer, 1, buffer.length - 1, compressed, COMPRESS_MARKER.length);
ptr.set(compressed, 0, compressedLen + 1);
return true;
}
ptr.set(buffer, 0, offset);
return true;
}
use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.
the class IndexMaintainer method fromProto.
public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer proto, RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException {
IndexMaintainer maintainer = new IndexMaintainer(dataTableRowKeySchema, isDataTableSalted);
maintainer.nIndexSaltBuckets = proto.getSaltBuckets();
maintainer.isMultiTenant = proto.getIsMultiTenant();
maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null;
List<ServerCachingProtos.ColumnReference> indexedColumnsList = proto.getIndexedColumnsList();
maintainer.indexedColumns = new HashSet<ColumnReference>(indexedColumnsList.size());
for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) {
maintainer.indexedColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray()));
}
List<Integer> indexedColumnTypes = proto.getIndexedColumnTypeOrdinalList();
maintainer.indexedColumnTypes = new ArrayList<PDataType>(indexedColumnTypes.size());
for (Integer typeOrdinal : indexedColumnTypes) {
maintainer.indexedColumnTypes.add(PDataType.values()[typeOrdinal]);
}
maintainer.indexTableName = proto.getIndexTableName().toByteArray();
maintainer.rowKeyOrderOptimizable = proto.getRowKeyOrderOptimizable();
maintainer.dataEmptyKeyValueCF = proto.getDataTableEmptyKeyValueColFamily().toByteArray();
ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = proto.getEmptyKeyValueColFamily();
maintainer.emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength());
maintainer.indexedExpressions = new ArrayList<>();
try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getIndexedExpressions().toByteArray())) {
DataInput input = new DataInputStream(stream);
while (stream.available() > 0) {
int expressionOrdinal = WritableUtils.readVInt(input);
Expression expression = ExpressionType.values()[expressionOrdinal].newInstance();
expression.readFields(input);
maintainer.indexedExpressions.add(expression);
}
}
maintainer.rowKeyMetaData = newRowKeyMetaData(maintainer, dataTableRowKeySchema, maintainer.indexedExpressions.size(), isDataTableSalted, maintainer.isMultiTenant);
try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getRowKeyMetadata().toByteArray())) {
DataInput input = new DataInputStream(stream);
maintainer.rowKeyMetaData.readFields(input);
}
maintainer.nDataCFs = proto.getNumDataTableColFamilies();
maintainer.indexWALDisabled = proto.getIndexWalDisabled();
maintainer.estimatedIndexRowKeyBytes = proto.getIndexRowKeyByteSize();
maintainer.immutableRows = proto.getImmutable();
List<ColumnInfo> indexedColumnInfoList = proto.getIndexedColumnInfoList();
maintainer.indexedColumnsInfo = Sets.newHashSet();
for (ColumnInfo info : indexedColumnInfoList) {
maintainer.indexedColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName()));
}
// proto doesn't support single byte so need an explicit cast here
maintainer.encodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getEncodingScheme());
maintainer.immutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte) proto.getImmutableStorageScheme());
maintainer.isLocalIndex = proto.getIsLocalIndex();
List<ServerCachingProtos.ColumnReference> dataTableColRefsForCoveredColumnsList = proto.getDataTableColRefForCoveredColumnsList();
List<ServerCachingProtos.ColumnReference> indexTableColRefsForCoveredColumnsList = proto.getIndexTableColRefForCoveredColumnsList();
maintainer.coveredColumnsMap = Maps.newHashMapWithExpectedSize(dataTableColRefsForCoveredColumnsList.size());
boolean encodedColumnNames = maintainer.encodingScheme != NON_ENCODED_QUALIFIERS;
Iterator<ServerCachingProtos.ColumnReference> indexTableColRefItr = indexTableColRefsForCoveredColumnsList.iterator();
for (ServerCachingProtos.ColumnReference colRefFromProto : dataTableColRefsForCoveredColumnsList) {
ColumnReference dataTableColRef = new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray());
ColumnReference indexTableColRef;
if (encodedColumnNames) {
ServerCachingProtos.ColumnReference fromProto = indexTableColRefItr.next();
indexTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), fromProto.getQualifier().toByteArray());
} else {
byte[] cq = IndexUtil.getIndexColumnName(dataTableColRef.getFamily(), dataTableColRef.getQualifier());
byte[] cf = maintainer.isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableColRef.getFamily()) : dataTableColRef.getFamily();
indexTableColRef = new ColumnReference(cf, cq);
}
maintainer.coveredColumnsMap.put(dataTableColRef, indexTableColRef);
}
maintainer.initCachedState();
return maintainer;
}
Aggregations