use of org.apache.phoenix.schema.KeyValueSchema in project phoenix by apache.
the class PhoenixRuntime method decodeColumnValues.
/**
*
* @param conn connection that was used for reading/generating value.
* @param fullTableName fully qualified table name
* @param value byte value of the columns concatenated as a single byte array. @see {@link #encodeColumnValues(Connection, String, Object[], List)}
* @param columns list of column names for the columns that have their respective values
* present in the byte array. The column names should be in the same order as their values are in the byte array.
* The column name includes both family name, if present, and column name.
* @return decoded values for each column
* @throws SQLException
*
*/
public static Object[] decodeColumnValues(Connection conn, String fullTableName, byte[] value, List<Pair<String, String>> columns) throws SQLException {
PTable table = getTable(conn, fullTableName);
KeyValueSchema kvSchema = buildKeyValueSchema(getColumns(table, columns));
ImmutableBytesWritable ptr = new ImmutableBytesWritable(value);
ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema);
valueSet.clear();
valueSet.or(ptr);
int maxOffset = ptr.getOffset() + ptr.getLength();
Boolean hasValue;
kvSchema.iterator(ptr);
int i = 0;
List<Object> values = new ArrayList<Object>();
while (hasValue = kvSchema.next(ptr, i, maxOffset, valueSet) != null) {
if (hasValue) {
values.add(kvSchema.getField(i).getDataType().toObject(ptr));
}
i++;
}
return values.toArray();
}
use of org.apache.phoenix.schema.KeyValueSchema in project phoenix by apache.
the class BaseQueryPlan method iterator.
public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = context.getScan();
}
/*
* For aggregate queries, we still need to let the AggregationPlan to
* proceed so that we can give proper aggregates even if there are no
* row to be scanned.
*/
if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
return ResultIterator.EMPTY_ITERATOR;
}
if (tableRef == TableRef.EMPTY_TABLE_REF) {
return newIterator(scanGrouper, scan);
}
// Set miscellaneous scan attributes. This is the last chance to set them before we
// clone the scan for each parallelized chunk.
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
if (dynamicFilter != null) {
WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
}
if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
ScanUtil.setReversed(scan);
// Hack for working around PHOENIX-3121 and HBASE-16296.
// TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
int scannerCacheSize = context.getStatement().getFetchSize();
if (limit != null && limit % scannerCacheSize == 0) {
scan.setCaching(scannerCacheSize + 1);
}
}
if (statement.getHint().hasHint(Hint.SMALL)) {
scan.setSmall(true);
}
PhoenixConnection connection = context.getConnection();
// set read consistency
if (table.getType() != PTableType.SYSTEM) {
scan.setConsistency(connection.getConsistency());
}
// TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
if (!table.isTransactional()) {
// Get the time range of row_timestamp column
TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
// Get the already existing time range on the scan.
TimeRange scanTimeRange = scan.getTimeRange();
Long scn = connection.getSCN();
if (scn == null) {
// If we haven't resolved the time at the beginning of compilation, don't
// force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
scn = tableRef.getTimeStamp();
if (scn == QueryConstants.UNSET_TIMESTAMP) {
scn = HConstants.LATEST_TIMESTAMP;
}
}
try {
TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
if (timeRangeToUse == null) {
return ResultIterator.EMPTY_ITERATOR;
}
scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
byte[] tenantIdBytes;
if (table.isMultiTenant() == true) {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
ScanUtil.setTenantId(scan, tenantIdBytes);
String customAnnotations = LogUtil.customAnnotationsToString(connection);
ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
// Set local index related scan attributes.
if (table.getIndexType() == IndexType.LOCAL) {
ScanUtil.setLocalIndex(scan);
Set<PColumn> dataColumns = context.getDataColumns();
// project is not present in the index then we need to skip this plan.
if (!dataColumns.isEmpty()) {
// Set data columns to be join back from data table.
PTable parentTable = context.getCurrentTable().getTable();
String parentSchemaName = parentTable.getParentSchemaName().getString();
String parentTableName = parentTable.getParentTableName().getString();
final ParseNodeFactory FACTORY = new ParseNodeFactory();
// TODO: is it necessary to re-resolve the table?
TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
PTable dataTable = dataTableRef.getTable();
// Set data columns to be join back from data table.
serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
// Set key value schema of the data columns.
serializeSchemaIntoScan(scan, schema);
// Set index maintainer of the local index.
serializeIndexMaintainerIntoScan(scan, dataTable);
// Set view constants if exists.
serializeViewConstantsIntoScan(scan, dataTable);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
}
ResultIterator iterator = newIterator(scanGrouper, scan);
iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {
@Override
public void close() throws SQLException {
try {
super.close();
} finally {
SQLCloseables.closeAll(dependencies);
}
}
};
if (LOG.isDebugEnabled()) {
LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
}
// wrap the iterator so we start/end tracing as we expect
TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
use of org.apache.phoenix.schema.KeyValueSchema in project phoenix by apache.
the class HashJoinRegionScanner method processResults.
private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
if (result.isEmpty())
return;
Tuple tuple = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : new ResultTuple(Result.create(result));
// always returns true.
if (joinInfo.forceProjection()) {
tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
}
// TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well.
if (hasBatchLimit)
throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
int count = joinInfo.getJoinIds().length;
boolean cont = true;
for (int i = 0; i < count; i++) {
if (!(joinInfo.earlyEvaluation()[i]) || hashCaches[i] == null)
continue;
ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]);
tempTuples[i] = hashCaches[i].get(key);
JoinType type = joinInfo.getJoinTypes()[i];
if (((type == JoinType.Inner || type == JoinType.Semi) && tempTuples[i] == null) || (type == JoinType.Anti && tempTuples[i] != null)) {
cont = false;
break;
}
}
if (cont) {
if (projector == null) {
int dup = 1;
for (int i = 0; i < count; i++) {
dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size());
}
for (int i = 0; i < dup; i++) {
resultQueue.offer(tuple);
}
} else {
KeyValueSchema schema = joinInfo.getJoinedSchema();
if (!joinInfo.forceProjection()) {
// backward compatibility
tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
}
resultQueue.offer(tuple);
for (int i = 0; i < count; i++) {
boolean earlyEvaluation = joinInfo.earlyEvaluation()[i];
JoinType type = joinInfo.getJoinTypes()[i];
if (earlyEvaluation && (type == JoinType.Semi || type == JoinType.Anti))
continue;
int j = resultQueue.size();
while (j-- > 0) {
Tuple lhs = resultQueue.poll();
if (!earlyEvaluation) {
ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
tempTuples[i] = hashCaches[i].get(key);
if (tempTuples[i] == null) {
if (type == JoinType.Inner || type == JoinType.Semi) {
continue;
} else if (type == JoinType.Anti) {
resultQueue.offer(lhs);
continue;
}
}
}
if (tempTuples[i] == null) {
Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, null, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
resultQueue.offer(joined);
continue;
}
for (Tuple t : tempTuples[i]) {
Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, t, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
resultQueue.offer(joined);
}
}
}
}
// apply post-join filter
Expression postFilter = joinInfo.getPostJoinFilterExpression();
if (postFilter != null) {
for (Iterator<Tuple> iter = resultQueue.iterator(); iter.hasNext(); ) {
Tuple t = iter.next();
postFilter.reset();
ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
try {
if (!postFilter.evaluate(t, tempPtr)) {
iter.remove();
continue;
}
} catch (IllegalDataException e) {
iter.remove();
continue;
}
Boolean b = (Boolean) postFilter.getDataType().toObject(tempPtr);
if (!b.booleanValue()) {
iter.remove();
}
}
}
}
}
use of org.apache.phoenix.schema.KeyValueSchema in project phoenix by apache.
the class PhoenixRuntime method encodeValues.
/**
*
* @param conn connection that was used for reading/generating value.
* @param fullTableName fully qualified table name
* @param values values of the columns
* @param columns list of pair of column that includes column family as first part and column name as the second part.
* Column family is optional and hence nullable. Columns in the list have to be in the same order as the order of occurence
* of their values in the object array.
* @return values encoded in a byte array
* @throws SQLException
* @see {@link #decodeValues(Connection, String, byte[], List)}
*/
@Deprecated
public static byte[] encodeValues(Connection conn, String fullTableName, Object[] values, List<Pair<String, String>> columns) throws SQLException {
PTable table = getTable(conn, fullTableName);
List<PColumn> pColumns = getPColumns(table, columns);
List<Expression> expressions = new ArrayList<Expression>(pColumns.size());
int i = 0;
for (PColumn col : pColumns) {
Object value = values[i];
// for purposes of encoding, sort order of the columns doesn't matter.
Expression expr = LiteralExpression.newConstant(value, col.getDataType(), col.getMaxLength(), col.getScale());
expressions.add(expr);
i++;
}
KeyValueSchema kvSchema = buildKeyValueSchema(pColumns);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema);
return kvSchema.toBytes(expressions.toArray(new Expression[0]), valueSet, ptr);
}
use of org.apache.phoenix.schema.KeyValueSchema in project phoenix by apache.
the class PhoenixRuntime method decodeValues.
/**
*
* @param conn connection that was used for reading/generating value.
* @param fullTableName fully qualified table name
* @param value byte value of the columns concatenated as a single byte array. @see {@link #encodeColumnValues(Connection, String, Object[], List)}
* @param columns list of column names for the columns that have their respective values
* present in the byte array. The column names should be in the same order as their values are in the byte array.
* The column name includes both family name, if present, and column name.
* @return decoded values for each column
* @throws SQLException
*
*/
@Deprecated
public static Object[] decodeValues(Connection conn, String fullTableName, byte[] value, List<Pair<String, String>> columns) throws SQLException {
PTable table = getTable(conn, fullTableName);
KeyValueSchema kvSchema = buildKeyValueSchema(getPColumns(table, columns));
ImmutableBytesWritable ptr = new ImmutableBytesWritable(value);
ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema);
valueSet.clear();
valueSet.or(ptr);
int maxOffset = ptr.getOffset() + ptr.getLength();
Boolean hasValue;
kvSchema.iterator(ptr);
int i = 0;
List<Object> values = new ArrayList<Object>();
while (hasValue = kvSchema.next(ptr, i, maxOffset, valueSet) != null) {
if (hasValue) {
values.add(kvSchema.getField(i).getDataType().toObject(ptr));
}
i++;
}
return values.toArray();
}
Aggregations