use of org.apache.phoenix.filter.SkipScanFilter in project phoenix by apache.
the class ParallelIteratorsSplitTest method foreach.
private static Collection<?> foreach(ScanRanges scanRanges, int[] widths, KeyRange[] expectedSplits) {
SkipScanFilter filter = new SkipScanFilter(scanRanges.getRanges(), buildSchema(widths));
Scan scan = new Scan().setFilter(filter).setStartRow(KeyRange.UNBOUND).setStopRow(KeyRange.UNBOUND);
List<Object> ret = Lists.newArrayList();
ret.add(new Object[] { scan, scanRanges, Arrays.<KeyRange>asList(expectedSplits) });
return ret;
}
use of org.apache.phoenix.filter.SkipScanFilter in project phoenix by apache.
the class PhoenixTransactionalIndexer method getIndexUpdates.
private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
Transaction tx = indexMetaData.getTransaction();
if (tx == null) {
throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
}
boolean isRollback = txRollbackAttribute != null;
boolean isImmutable = indexMetaData.isImmutableRows();
ResultScanner currentScanner = null;
TransactionAwareHTable txTable = null;
// Collect up all mutations in batch
Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
if (isImmutable && !isRollback) {
findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
} else {
findPriorValueMutations = mutations;
}
while (mutationIterator.hasNext()) {
Mutation m = mutationIterator.next();
// add the mutation to the batch set
ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
addMutation(findPriorValueMutations, row, m);
}
addMutation(mutations, row, m);
}
// Collect the set of mutable ColumnReferences so that we can first
// run a scan to get the current state. We'll need this to delete
// the existing index rows.
List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
int estimatedSize = indexMaintainers.size() * 10;
Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
for (IndexMaintainer indexMaintainer : indexMaintainers) {
// For transactional tables, we use an index maintainer
// to aid in rollback if there's a KeyValue column in the index. The alternative would be
// to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
// client side.
Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
mutableColumns.addAll(allColumns);
}
Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
try {
// this logic will work there too.
if (!findPriorValueMutations.isEmpty()) {
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
}
Scan scan = new Scan();
// Project all mutable columns
for (ColumnReference ref : mutableColumns) {
scan.addColumn(ref.getFamily(), ref.getQualifier());
}
/*
* Indexes inherit the storage scheme of the data table which means all the indexes have the same
* storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
* supporting new indexes over existing data tables to have a different storage scheme than the data
* table.
*/
byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
// Project empty key value column
scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
scanRanges.initializeScan(scan);
TableName tableName = env.getRegion().getRegionInfo().getTable();
HTableInterface htable = env.getTable(tableName);
txTable = new TransactionAwareHTable(htable);
txTable.startTx(tx);
// For rollback, we need to see all versions, including
// the last committed version as there may be multiple
// checkpointed versions.
SkipScanFilter filter = scanRanges.getSkipScanFilter();
if (isRollback) {
filter = new SkipScanFilter(filter, true);
tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
}
scan.setFilter(filter);
currentScanner = txTable.getScanner(scan);
}
if (isRollback) {
processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
} else {
processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
}
} finally {
if (txTable != null)
txTable.close();
}
return indexUpdates;
}
use of org.apache.phoenix.filter.SkipScanFilter in project phoenix by apache.
the class ScanUtil method intersectScanRange.
public static boolean intersectScanRange(Scan scan, byte[] startKey, byte[] stopKey, boolean useSkipScan) {
boolean mayHaveRows = false;
int offset = 0;
if (ScanUtil.isLocalIndex(scan)) {
offset = startKey.length != 0 ? startKey.length : stopKey.length;
}
byte[] existingStartKey = scan.getStartRow();
byte[] existingStopKey = scan.getStopRow();
if (existingStartKey.length > 0) {
if (startKey.length == 0 || Bytes.compareTo(existingStartKey, startKey) > 0) {
startKey = existingStartKey;
}
} else {
mayHaveRows = true;
}
if (existingStopKey.length > 0) {
if (stopKey.length == 0 || Bytes.compareTo(existingStopKey, stopKey) < 0) {
stopKey = existingStopKey;
}
} else {
mayHaveRows = true;
}
scan.setStartRow(startKey);
scan.setStopRow(stopKey);
if (offset > 0 && useSkipScan) {
byte[] temp = null;
if (startKey.length != 0) {
temp = new byte[startKey.length - offset];
System.arraycopy(startKey, offset, temp, 0, startKey.length - offset);
startKey = temp;
}
if (stopKey.length != 0) {
temp = new byte[stopKey.length - offset];
System.arraycopy(stopKey, offset, temp, 0, stopKey.length - offset);
stopKey = temp;
}
}
mayHaveRows = mayHaveRows || Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) < 0;
// If the scan is using skip scan filter, intersect and replace the filter.
if (mayHaveRows && useSkipScan) {
Filter filter = scan.getFilter();
if (filter instanceof SkipScanFilter) {
SkipScanFilter oldFilter = (SkipScanFilter) filter;
SkipScanFilter newFilter = oldFilter.intersect(startKey, stopKey);
if (newFilter == null) {
return false;
}
// Intersect found: replace skip scan with intersected one
scan.setFilter(newFilter);
} else if (filter instanceof FilterList) {
FilterList oldList = (FilterList) filter;
FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
for (Filter f : oldList.getFilters()) {
if (f instanceof SkipScanFilter) {
SkipScanFilter newFilter = ((SkipScanFilter) f).intersect(startKey, stopKey);
if (newFilter == null) {
return false;
}
newList.addFilter(newFilter);
} else {
newList.addFilter(f);
}
}
scan.setFilter(newList);
}
}
return mayHaveRows;
}
use of org.apache.phoenix.filter.SkipScanFilter in project phoenix by apache.
the class WhereCompilerTest method testInListWithAnd1Filter.
@Test
public void testInListWithAnd1Filter() throws SQLException {
String tenantId1 = "000000000000001";
String tenantId2 = "000000000000002";
String tenantId3 = "000000000000003";
String entityId = "00000000000000X";
String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'", ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId);
PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
QueryPlan plan = pstmt.optimizeQuery();
Scan scan = plan.getContext().getScan();
Filter filter = scan.getFilter();
assertEquals(new SkipScanFilter(ImmutableList.of(Arrays.asList(pointRange(tenantId1, entityId), pointRange(tenantId2, entityId), pointRange(tenantId3, entityId))), SchemaUtil.VAR_BINARY_SCHEMA), filter);
}
use of org.apache.phoenix.filter.SkipScanFilter in project phoenix by apache.
the class WhereCompilerTest method testOr2InFilter.
@Test
@Ignore("OR not yet optimized")
public void testOr2InFilter() throws SQLException {
String tenantId1 = "000000000000001";
String tenantId2 = "000000000000002";
String tenantId3 = "000000000000003";
String query = String.format("select * from %s where organization_id='%s' OR organization_id='%s' OR organization_id='%s'", ATABLE_NAME, tenantId1, tenantId3, tenantId2);
PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
QueryPlan plan = pstmt.optimizeQuery();
Scan scan = plan.getContext().getScan();
Filter filter = scan.getFilter();
assertEquals(new SkipScanFilter(ImmutableList.of(Arrays.asList(pointRange(tenantId1), pointRange(tenantId2), pointRange(tenantId3))), plan.getTableRef().getTable().getRowKeySchema()), filter);
byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1);
assertArrayEquals(startRow, scan.getStartRow());
byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId3);
assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
Aggregations