use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class EndToEndCoveredColumnsIndexBuilderIT method testExpectedResultsInTableStateForSinglePut.
/**
* Test that we see the expected values in a {@link TableState} when doing single puts against a
* region.
* @throws Exception on failure
*/
@Test
public void testExpectedResultsInTableStateForSinglePut() throws Exception {
//just do a simple Put to start with
long ts = state.ts;
Put p = new Put(row, ts);
p.add(family, qual, Bytes.toBytes("v1"));
// get all the underlying kvs for the put
final List<Cell> expectedKvs = new ArrayList<Cell>();
final List<Cell> allKvs = new ArrayList<Cell>();
allKvs.addAll(p.getFamilyMap().get(family));
// setup the verifier for the data we expect to write
// first call shouldn't have anything in the table
final ColumnReference familyRef = new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS);
VerifyingIndexCodec codec = state.codec;
codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", expectedKvs, familyRef));
codec.verifiers.add(new ListMatchingVerifier("put state 1", allKvs, familyRef));
// do the actual put (no indexing will actually be done)
HTable primary = state.table;
primary.put(p);
primary.flushCommits();
// now we do another put to the same row. We should see just the old row state, followed by the
// new + old
p = new Put(row, ts + 1);
p.add(family, qual, Bytes.toBytes("v2"));
expectedKvs.addAll(allKvs);
// add them first b/c the ts is newer
allKvs.addAll(0, p.get(family, qual));
codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", expectedKvs, familyRef));
codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));
// do the actual put
primary.put(p);
primary.flushCommits();
// cleanup after ourselves
cleanup(state);
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class PhoenixTransactionalIndexer method processRollback.
private void processRollback(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute, ResultScanner scanner, Transaction tx, Set<ColumnReference> mutableColumns, Collection<Pair<Mutation, byte[]>> indexUpdates, Map<ImmutableBytesPtr, MultiMutation> mutations) throws IOException {
if (scanner != null) {
Result result;
// Loop through last committed row state plus all new rows associated with current transaction
// to generate point delete markers for all index rows that were added. We don't have Tephra
// manage index rows in change sets because we don't want to be hit with the additional
// memory hit and do not need to do conflict detection on index rows.
ColumnReference emptyColRef = new ColumnReference(indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(), indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier());
while ((result = scanner.next()) != null) {
Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow()));
// Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest
// (as if we're "replaying" them in time order).
List<Cell> cells = result.listCells();
Collections.sort(cells, new Comparator<Cell>() {
@Override
public int compare(Cell o1, Cell o2) {
int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp());
if (c != 0)
return c;
c = o1.getTypeByte() - o2.getTypeByte();
if (c != 0)
return c;
c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(), o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength());
if (c != 0)
return c;
return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(), o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(), o1.getQualifierLength());
}
});
int i = 0;
int nCells = cells.size();
Result oldResult = null, newResult;
long readPtr = tx.getReadPointer();
do {
boolean hasPuts = false;
LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
long writePtr;
Cell cell = cells.get(i);
do {
hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
writePtr = cell.getTimestamp();
ListIterator<Cell> it = singleTimeCells.listIterator();
do {
// Add at the beginning of the list to match the expected HBase
// newest to oldest sort order (which TxTableState relies on
// with the Result.getLatestColumnValue() calls). However, we
// still want to add Cells in the expected order for each time
// bound as otherwise we won't find it in our old state.
it.add(cell);
} while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr);
} while (i < nCells && cell.getTimestamp() <= readPtr);
// want to delete the current row).
if (oldResult != null) {
TxTableState state = new TxTableState(env, mutableColumns, indexMetaData.getAttributes(), writePtr, m, emptyColRef, oldResult);
generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
}
// delete.
if (hasPuts) {
newResult = Result.create(singleTimeCells);
// First row may represent the current state which we don't want to delete
if (writePtr > readPtr) {
TxTableState state = new TxTableState(env, mutableColumns, indexMetaData.getAttributes(), writePtr, m, emptyColRef, newResult);
generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
}
oldResult = newResult;
} else {
oldResult = null;
}
} while (i < nCells);
}
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class PhoenixTransactionalIndexer method processMutation.
private void processMutation(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute, ResultScanner scanner, Transaction tx, Set<ColumnReference> upsertColumns, Collection<Pair<Mutation, byte[]>> indexUpdates, Map<ImmutableBytesPtr, MultiMutation> mutations, Map<ImmutableBytesPtr, MultiMutation> mutationsToFindPreviousValue) throws IOException {
if (scanner != null) {
Result result;
ColumnReference emptyColRef = new ColumnReference(indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(), indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier());
// Process existing data table rows by removing the old index row and adding the new index row
while ((result = scanner.next()) != null) {
Mutation m = mutationsToFindPreviousValue.remove(new ImmutableBytesPtr(result.getRow()));
TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), tx.getWritePointer(), m, emptyColRef, result);
generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
generatePuts(indexMetaData, indexUpdates, state);
}
}
// Process new data table by adding new index rows
for (Mutation m : mutations.values()) {
TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), tx.getWritePointer(), m);
generatePuts(indexMetaData, indexUpdates, state);
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexMaintainer method getEstimatedByteSize.
public int getEstimatedByteSize() {
int size = WritableUtils.getVIntSize(nIndexSaltBuckets);
size += WritableUtils.getVIntSize(estimatedIndexRowKeyBytes);
size += WritableUtils.getVIntSize(indexedColumns.size());
size += viewIndexId == null ? 0 : viewIndexId.length;
for (ColumnReference ref : indexedColumns) {
size += WritableUtils.getVIntSize(ref.getFamily().length);
size += ref.getFamily().length;
size += WritableUtils.getVIntSize(ref.getQualifier().length);
size += ref.getQualifier().length;
}
for (int i = 0; i < indexedColumnTypes.size(); i++) {
PDataType type = indexedColumnTypes.get(i);
size += WritableUtils.getVIntSize(type.ordinal());
}
Set<ColumnReference> dataTableColRefs = coveredColumnsMap.keySet();
size += WritableUtils.getVIntSize(dataTableColRefs.size());
for (ColumnReference ref : dataTableColRefs) {
size += WritableUtils.getVIntSize(ref.getFamilyWritable().getSize());
size += ref.getFamily().length;
size += WritableUtils.getVIntSize(ref.getQualifierWritable().getSize());
size += ref.getQualifier().length;
}
size += indexTableName.length + WritableUtils.getVIntSize(indexTableName.length);
size += rowKeyMetaData.getByteSize();
size += dataEmptyKeyValueCF.length + WritableUtils.getVIntSize(dataEmptyKeyValueCF.length);
size += emptyKeyValueCFPtr.getLength() + WritableUtils.getVIntSize(emptyKeyValueCFPtr.getLength());
size += WritableUtils.getVIntSize(nDataCFs + 1);
size += WritableUtils.getVIntSize(indexedExpressions.size());
for (Expression expression : indexedExpressions) {
size += WritableUtils.getVIntSize(ExpressionType.valueOf(expression).ordinal());
}
size += estimatedExpressionSize;
return size;
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexMaintainer method buildUpdateMutation.
public Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, ImmutableBytesWritable dataRowKeyPtr, long ts, byte[] regionStartKey, byte[] regionEndKey) throws IOException {
byte[] indexRowKey = this.buildRowKey(valueGetter, dataRowKeyPtr, regionStartKey, regionEndKey);
Put put = null;
// New row being inserted: add the empty key value
if (valueGetter == null || valueGetter.getLatestValue(dataEmptyKeyValueRef) == null) {
put = new Put(indexRowKey);
// add the keyvalue for the empty row
put.add(kvBuilder.buildPut(new ImmutableBytesPtr(indexRowKey), this.getEmptyKeyValueFamily(), dataEmptyKeyValueRef.getQualifierWritable(), ts, // set the value to the empty column name
dataEmptyKeyValueRef.getQualifierWritable()));
put.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
ImmutableBytesPtr rowKey = new ImmutableBytesPtr(indexRowKey);
if (immutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
// map from index column family to list of pair of index column and data column (for covered columns)
Map<ImmutableBytesPtr, List<Pair<ColumnReference, ColumnReference>>> familyToColListMap = Maps.newHashMap();
for (ColumnReference ref : this.getCoveredColumns()) {
ColumnReference indexColRef = this.coveredColumnsMap.get(ref);
ImmutableBytesPtr cf = new ImmutableBytesPtr(indexColRef.getFamily());
if (!familyToColListMap.containsKey(cf)) {
familyToColListMap.put(cf, Lists.<Pair<ColumnReference, ColumnReference>>newArrayList());
}
familyToColListMap.get(cf).add(Pair.newPair(indexColRef, ref));
}
// iterate over each column family and create a byte[] containing all the columns
for (Entry<ImmutableBytesPtr, List<Pair<ColumnReference, ColumnReference>>> entry : familyToColListMap.entrySet()) {
byte[] columnFamily = entry.getKey().copyBytesIfNecessary();
List<Pair<ColumnReference, ColumnReference>> colRefPairs = entry.getValue();
int maxEncodedColumnQualifier = Integer.MIN_VALUE;
// find the max col qualifier
for (Pair<ColumnReference, ColumnReference> colRefPair : colRefPairs) {
maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, encodingScheme.decode(colRefPair.getFirst().getQualifier()));
}
Expression[] colValues = EncodedColumnsUtil.createColumnExpressionArray(maxEncodedColumnQualifier);
// set the values of the columns
for (Pair<ColumnReference, ColumnReference> colRefPair : colRefPairs) {
ColumnReference indexColRef = colRefPair.getFirst();
ColumnReference dataColRef = colRefPair.getSecond();
Expression expression = new SingleCellColumnExpression(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public SortOrder getSortOrder() {
return null;
}
@Override
public Integer getScale() {
return null;
}
@Override
public Integer getMaxLength() {
return null;
}
@Override
public PDataType getDataType() {
return null;
}
}, dataColRef.getFamily(), dataColRef.getQualifier(), encodingScheme);
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
expression.evaluate(new ValueGetterTuple(valueGetter), ptr);
byte[] value = ptr.copyBytesIfNecessary();
if (value != null) {
int indexArrayPos = encodingScheme.decode(indexColRef.getQualifier()) - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 1;
colValues[indexArrayPos] = new LiteralExpression(value);
}
}
List<Expression> children = Arrays.asList(colValues);
// we use SingleCellConstructorExpression to serialize multiple columns into a single byte[]
SingleCellConstructorExpression singleCellConstructorExpression = new SingleCellConstructorExpression(immutableStorageScheme, children);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
singleCellConstructorExpression.evaluate(new BaseTuple() {
}, ptr);
if (put == null) {
put = new Put(indexRowKey);
put.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
ImmutableBytesPtr colFamilyPtr = new ImmutableBytesPtr(columnFamily);
//this is a little bit of extra work for installations that are running <0.94.14, but that should be rare and is a short-term set of wrappers - it shouldn't kill GC
put.add(kvBuilder.buildPut(rowKey, colFamilyPtr, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR, ts, ptr));
}
} else {
for (ColumnReference ref : this.getCoveredColumns()) {
ColumnReference indexColRef = this.coveredColumnsMap.get(ref);
ImmutableBytesPtr cq = indexColRef.getQualifierWritable();
ImmutableBytesPtr cf = indexColRef.getFamilyWritable();
ImmutableBytesWritable value = valueGetter.getLatestValue(ref);
if (value != null) {
if (put == null) {
put = new Put(indexRowKey);
put.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
put.add(kvBuilder.buildPut(rowKey, cf, cq, ts, value));
}
}
}
return put;
}
Aggregations