use of org.apache.phoenix.hbase.index.MultiMutation in project phoenix by apache.
the class TestNonTxIndexBuilder method getMultipleVersionMutation.
private MultiMutation getMultipleVersionMutation(int versions) {
MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
for (int i = versions - 1; i >= 0; i--) {
Put put = new Put(ROW);
put.addImmutable(FAM, INDEXED_QUALIFIER, i, Bytes.toBytes(i));
mutation.addAll(put);
}
return mutation;
}
use of org.apache.phoenix.hbase.index.MultiMutation in project phoenix by apache.
the class TestNonTxIndexBuilder method testRebuildMultipleVersionRow.
/**
* Tests a partial rebuild of a row with multiple versions. 3 versions of the row in data table,
* and we rebuild the index starting from time t=2
*
* There should be one index row version per data row version.
*/
@Test
public void testRebuildMultipleVersionRow() throws IOException {
// when doing a rebuild, we are replaying mutations so we want to ignore newer mutations
// see LocalTable#getCurrentRowState()
Mockito.when(mockIndexMetaData.ignoreNewerMutations()).thenReturn(true);
// the current row state has 3 versions, but if we rebuild as of t=2, scanner in LocalTable
// should only return first
Cell currentCell1 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, PUT_TYPE, VALUE_1);
Cell currentCell2 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 2, PUT_TYPE, VALUE_2);
Cell currentCell3 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 3, PUT_TYPE, VALUE_3);
Cell currentCell4 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 4, PUT_TYPE, VALUE_4);
setCurrentRowState(Arrays.asList(currentCell4, currentCell3, currentCell2, currentCell1));
// rebuilder replays mutations starting from t=2
MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
Put put = new Put(ROW);
put.addImmutable(FAM, INDEXED_QUALIFIER, 4, VALUE_4);
mutation.addAll(put);
put = new Put(ROW);
put.addImmutable(FAM, INDEXED_QUALIFIER, 3, VALUE_3);
mutation.addAll(put);
put = new Put(ROW);
put.addImmutable(FAM, INDEXED_QUALIFIER, 2, VALUE_2);
mutation.addAll(put);
Collection<Pair<Mutation, byte[]>> indexUpdates = indexBuilder.getIndexUpdate(mutation, mockIndexMetaData);
// 3 puts and 3 deletes (one to hide existing index row for VALUE_1, and two to hide index
// rows for VALUE_2, VALUE_3)
assertEquals(6, indexUpdates.size());
assertContains(indexUpdates, 2, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
2);
assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2);
assertContains(indexUpdates, 3, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
3);
assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 3);
assertContains(indexUpdates, 4, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
4);
assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 4);
}
use of org.apache.phoenix.hbase.index.MultiMutation in project phoenix by apache.
the class PhoenixTransactionalIndexer method getIndexUpdates.
private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
Transaction tx = indexMetaData.getTransaction();
if (tx == null) {
throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
}
boolean isRollback = txRollbackAttribute != null;
boolean isImmutable = indexMetaData.isImmutableRows();
ResultScanner currentScanner = null;
TransactionAwareHTable txTable = null;
// Collect up all mutations in batch
Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
if (isImmutable && !isRollback) {
findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
} else {
findPriorValueMutations = mutations;
}
while (mutationIterator.hasNext()) {
Mutation m = mutationIterator.next();
// add the mutation to the batch set
ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
addMutation(findPriorValueMutations, row, m);
}
addMutation(mutations, row, m);
}
// Collect the set of mutable ColumnReferences so that we can first
// run a scan to get the current state. We'll need this to delete
// the existing index rows.
List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
int estimatedSize = indexMaintainers.size() * 10;
Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
for (IndexMaintainer indexMaintainer : indexMaintainers) {
// For transactional tables, we use an index maintainer
// to aid in rollback if there's a KeyValue column in the index. The alternative would be
// to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
// client side.
Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
mutableColumns.addAll(allColumns);
}
Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
try {
// this logic will work there too.
if (!findPriorValueMutations.isEmpty()) {
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
}
Scan scan = new Scan();
// Project all mutable columns
for (ColumnReference ref : mutableColumns) {
scan.addColumn(ref.getFamily(), ref.getQualifier());
}
/*
* Indexes inherit the storage scheme of the data table which means all the indexes have the same
* storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
* supporting new indexes over existing data tables to have a different storage scheme than the data
* table.
*/
byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
// Project empty key value column
scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
scanRanges.initializeScan(scan);
TableName tableName = env.getRegion().getRegionInfo().getTable();
HTableInterface htable = env.getTable(tableName);
txTable = new TransactionAwareHTable(htable);
txTable.startTx(tx);
// For rollback, we need to see all versions, including
// the last committed version as there may be multiple
// checkpointed versions.
SkipScanFilter filter = scanRanges.getSkipScanFilter();
if (isRollback) {
filter = new SkipScanFilter(filter, true);
tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
}
scan.setFilter(filter);
currentScanner = txTable.getScanner(scan);
}
if (isRollback) {
processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
} else {
processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
}
} finally {
if (txTable != null)
txTable.close();
}
return indexUpdates;
}
use of org.apache.phoenix.hbase.index.MultiMutation in project phoenix by apache.
the class NonTxIndexBuilderTest method testManyVersions.
/**
* Tests getting an index update for a mutation with 200 versions Before, the issue PHOENIX-3807
* was causing this test to take >90 seconds, so here we set a timeout of 5 seconds
*/
@Test(timeout = 10000)
public void testManyVersions() throws IOException {
// when doing a rebuild, we are replaying mutations so we want to ignore newer mutations
// see LocalTable#getCurrentRowState()
Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(ReplayWrite.INDEX_ONLY);
MultiMutation mutation = getMultipleVersionMutation(200);
currentRowCells = mutation.getFamilyCellMap().get(FAM);
Collection<Pair<Mutation, byte[]>> indexUpdates = Lists.newArrayList();
for (Mutation m : IndexManagementUtil.flattenMutationsByTimestamp(Collections.singletonList(mutation))) {
indexUpdates.addAll(indexBuilder.getIndexUpdate(m, mockIndexMetaData));
}
assertNotEquals(0, indexUpdates.size());
}
use of org.apache.phoenix.hbase.index.MultiMutation in project phoenix by apache.
the class PhoenixTxIndexMutationGenerator method getIndexUpdates.
public Collection<Pair<Mutation, byte[]>> getIndexUpdates(HTableInterface htable, Iterator<Mutation> mutationIterator) throws IOException, SQLException {
if (!mutationIterator.hasNext()) {
return Collections.emptyList();
}
List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
ResultScanner currentScanner = null;
// Collect up all mutations in batch
Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
// Collect the set of mutable ColumnReferences so that we can first
// run a scan to get the current state. We'll need this to delete
// the existing index rows.
int estimatedSize = indexMaintainers.size() * 10;
Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
for (IndexMaintainer indexMaintainer : indexMaintainers) {
// For transactional tables, we use an index maintainer
// to aid in rollback if there's a KeyValue column in the index. The alternative would be
// to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
// client side.
Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
mutableColumns.addAll(allColumns);
}
Mutation m = mutationIterator.next();
Map<String, byte[]> updateAttributes = m.getAttributesMap();
byte[] txRollbackAttribute = updateAttributes.get(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY);
boolean isRollback = txRollbackAttribute != null;
boolean isImmutable = indexMetaData.isImmutableRows();
Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
if (isImmutable && !isRollback) {
findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
} else {
findPriorValueMutations = mutations;
}
while (true) {
// add the mutation to the batch set
ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
// if we have no non PK columns, no need to find the prior values
if (mutations != findPriorValueMutations && indexMetaData.requiresPriorRowState(m)) {
addMutation(findPriorValueMutations, row, m);
}
addMutation(mutations, row, m);
if (!mutationIterator.hasNext()) {
break;
}
m = mutationIterator.next();
}
Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
// this logic will work there too.
if (!findPriorValueMutations.isEmpty()) {
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
}
Scan scan = new Scan();
// Project all mutable columns
for (ColumnReference ref : mutableColumns) {
scan.addColumn(ref.getFamily(), ref.getQualifier());
}
/*
* Indexes inherit the storage scheme of the data table which means all the indexes have the same
* storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
* supporting new indexes over existing data tables to have a different storage scheme than the data
* table.
*/
byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
// Project empty key value column
scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
scanRanges.initializeScan(scan);
PhoenixTransactionalTable txTable = TransactionFactory.getTransactionProvider().getTransactionalTable(indexMetaData.getTransactionContext(), htable);
// For rollback, we need to see all versions, including
// the last committed version as there may be multiple
// checkpointed versions.
SkipScanFilter filter = scanRanges.getSkipScanFilter();
if (isRollback) {
filter = new SkipScanFilter(filter, true);
indexMetaData.getTransactionContext().setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT_ALL);
}
scan.setFilter(filter);
currentScanner = txTable.getScanner(scan);
}
if (isRollback) {
processRollback(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates, mutations);
} else {
processMutation(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
}
return indexUpdates;
}
Aggregations