use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexMaintainer method hasIndexedColumnChanged.
private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection<KeyValue> pendingUpdates) throws IOException {
if (pendingUpdates.isEmpty()) {
return false;
}
Map<ColumnReference, Cell> newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size());
for (Cell kv : pendingUpdates) {
newState.put(new ColumnReference(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)), kv);
}
for (ColumnReference ref : indexedColumns) {
Cell newValue = newState.get(ref);
if (newValue != null) {
// Indexed column has potentially changed
ImmutableBytesWritable oldValue = oldState.getLatestValue(ref);
boolean newValueSetAsNull = (newValue.getTypeByte() == Type.DeleteColumn.getCode() || newValue.getTypeByte() == Type.Delete.getCode() || CellUtil.matchingValue(newValue, HConstants.EMPTY_BYTE_ARRAY));
//then just skip to the next indexed column.
if (newValueSetAsNull && oldValue == null) {
continue;
}
if ((oldValue == null && !newValueSetAsNull) || (oldValue != null && newValueSetAsNull)) {
return true;
}
// If the old value is different than the new value, the index row needs to be deleted
if (Bytes.compareTo(oldValue.get(), oldValue.getOffset(), oldValue.getLength(), newValue.getValueArray(), newValue.getValueOffset(), newValue.getValueLength()) != 0) {
return true;
}
}
}
return false;
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexManagementUtil method newLocalStateScan.
public static Scan newLocalStateScan(Scan scan, List<? extends Iterable<? extends ColumnReference>> refsArray) {
Scan s = scan;
if (scan == null) {
s = new Scan();
}
s.setRaw(true);
// add the necessary columns to the scan
for (Iterable<? extends ColumnReference> refs : refsArray) {
for (ColumnReference ref : refs) {
s.addFamily(ref.getFamily());
}
}
s.setMaxVersions();
return s;
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexMaintainer method initCachedState.
/**
* Init calculated state reading/creating
*/
private void initCachedState() {
byte[] emptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst();
dataEmptyKeyValueRef = new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(), emptyKvQualifier);
this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumnsMap.size());
// columns that are required to evaluate all expressions in indexedExpressions (not including columns in data row key)
this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size());
for (Expression expression : indexedExpressions) {
KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() {
@Override
public Void visit(KeyValueColumnExpression expression) {
if (indexedColumns.add(new ColumnReference(expression.getColumnFamily(), expression.getColumnQualifier()))) {
indexedColumnTypes.add(expression.getDataType());
}
return null;
}
};
expression.accept(visitor);
}
allColumns.addAll(indexedColumns);
for (ColumnReference colRef : coveredColumnsMap.keySet()) {
if (immutableStorageScheme == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
allColumns.add(colRef);
} else {
allColumns.add(new ColumnReference(colRef.getFamily(), QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES));
}
}
int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0);
int nIndexPkColumns = getIndexPkColumnCount();
dataPkPosition = new int[nIndexPkColumns];
Arrays.fill(dataPkPosition, EXPRESSION_NOT_PRESENT);
int numViewConstantColumns = 0;
BitSet viewConstantColumnBitSet = rowKeyMetaData.getViewConstantColumnBitSet();
for (int i = dataPkOffset; i < dataRowKeySchema.getFieldCount(); i++) {
if (!viewConstantColumnBitSet.get(i)) {
int indexPkPosition = rowKeyMetaData.getIndexPkPosition(i - dataPkOffset);
this.dataPkPosition[indexPkPosition] = i;
} else {
numViewConstantColumns++;
}
}
// Calculate the max number of trailing nulls that we should get rid of after building the index row key.
// We only get rid of nulls for variable length types, so we have to be careful to consider the type of the
// index table, not the data type of the data table
int expressionsPos = indexedExpressions.size();
int indexPkPos = nIndexPkColumns - numViewConstantColumns - 1;
while (indexPkPos >= 0) {
int dataPkPos = dataPkPosition[indexPkPos];
boolean isDataNullable;
PDataType dataType;
if (dataPkPos == EXPRESSION_NOT_PRESENT) {
isDataNullable = true;
dataType = indexedExpressions.get(--expressionsPos).getDataType();
} else {
Field dataField = dataRowKeySchema.getField(dataPkPos);
dataType = dataField.getDataType();
isDataNullable = dataField.isNullable();
}
PDataType indexDataType = IndexUtil.getIndexColumnDataType(isDataNullable, dataType);
if (indexDataType.isFixedWidth()) {
break;
}
indexPkPos--;
}
maxTrailingNulls = nIndexPkColumns - indexPkPos - 1;
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexUtil method generateIndexData.
public static List<Mutation> generateIndexData(final PTable table, PTable index, final Map<ImmutableBytesPtr, RowMutationState> valuesMap, List<Mutation> dataMutations, final KeyValueBuilder kvBuilder, PhoenixConnection connection) throws SQLException {
try {
final ImmutableBytesPtr ptr = new ImmutableBytesPtr();
IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
List<Mutation> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
for (final Mutation dataMutation : dataMutations) {
long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
ptr.set(dataMutation.getRow());
/*
* We only need to generate the additional mutations for a Put for immutable indexes.
* Deletes of rows are handled by running a re-written query against the index table,
* and Deletes of column values should never be necessary, as you should never be
* updating an existing row.
*/
if (dataMutation instanceof Put) {
ValueGetter valueGetter = new ValueGetter() {
@Override
public byte[] getRowKey() {
return dataMutation.getRow();
}
@Override
public ImmutableBytesWritable getLatestValue(ColumnReference ref) {
// maintainer to always treat this Put as a new row.
if (isEmptyKeyValue(table, ref)) {
return null;
}
byte[] family = ref.getFamily();
byte[] qualifier = ref.getQualifier();
Map<byte[], List<Cell>> familyMap = dataMutation.getFamilyCellMap();
List<Cell> kvs = familyMap.get(family);
if (kvs == null) {
return null;
}
for (Cell kv : kvs) {
if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
kvBuilder.getValueAsPtr(kv, ptr);
return ptr;
}
}
return null;
}
};
byte[] regionStartKey = null;
byte[] regionEndkey = null;
if (maintainer.isLocalIndex()) {
HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
}
indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, regionStartKey, regionEndkey));
}
}
return indexMutations;
} catch (IOException e) {
throw new SQLException(e);
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexUtil method deserializeDataTableColumnsToJoin.
public static ColumnReference[] deserializeDataTableColumnsToJoin(Scan scan) {
byte[] columnsBytes = scan.getAttribute(BaseScannerRegionObserver.DATA_TABLE_COLUMNS_TO_JOIN);
if (columnsBytes == null)
return null;
// TODO: size?
ByteArrayInputStream stream = new ByteArrayInputStream(columnsBytes);
try {
DataInputStream input = new DataInputStream(stream);
int numColumns = WritableUtils.readVInt(input);
ColumnReference[] dataColumns = new ColumnReference[numColumns];
for (int i = 0; i < numColumns; i++) {
dataColumns[i] = new ColumnReference(Bytes.readByteArray(input), Bytes.readByteArray(input));
}
return dataColumns;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
Aggregations