use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.
the class GroupedAggregateRegionObserver method doPostScannerOpen.
/**
* Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
* of expressions from the scan and returns the aggregated rows of each group. For example,
* given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
* the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
* do a sort and a final aggregation, since multiple rows with the same key may be returned from
* different regions.
*/
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
boolean keyOrdered = false;
byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);
if (expressionBytes == null) {
expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
keyOrdered = true;
}
int offset = 0;
if (ScanUtil.isLocalIndex(scan)) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
Region region = c.getEnvironment().getRegion();
offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offset);
}
List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
RegionScanner innerScanner = s;
boolean useProto = false;
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
useProto = localIndexBytes != null;
if (localIndexBytes == null) {
localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
}
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
}
if (j != null) {
innerScanner = new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier);
}
long limit = Long.MAX_VALUE;
byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
if (limitBytes != null) {
limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
}
if (keyOrdered) {
// already in the required group by key order
return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
} else {
// Otherwse, collect them all up in an in memory map
return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
}
}
use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.
the class IndexUtil method generateIndexData.
public static List<Mutation> generateIndexData(final PTable table, PTable index, final Map<ImmutableBytesPtr, RowMutationState> valuesMap, List<Mutation> dataMutations, final KeyValueBuilder kvBuilder, PhoenixConnection connection) throws SQLException {
try {
final ImmutableBytesPtr ptr = new ImmutableBytesPtr();
IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
List<Mutation> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
for (final Mutation dataMutation : dataMutations) {
long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
ptr.set(dataMutation.getRow());
/*
* We only need to generate the additional mutations for a Put for immutable indexes.
* Deletes of rows are handled by running a re-written query against the index table,
* and Deletes of column values should never be necessary, as you should never be
* updating an existing row.
*/
if (dataMutation instanceof Put) {
ValueGetter valueGetter = new ValueGetter() {
@Override
public byte[] getRowKey() {
return dataMutation.getRow();
}
@Override
public ImmutableBytesWritable getLatestValue(ColumnReference ref) {
// maintainer to always treat this Put as a new row.
if (isEmptyKeyValue(table, ref)) {
return null;
}
byte[] family = ref.getFamily();
byte[] qualifier = ref.getQualifier();
Map<byte[], List<Cell>> familyMap = dataMutation.getFamilyCellMap();
List<Cell> kvs = familyMap.get(family);
if (kvs == null) {
return null;
}
for (Cell kv : kvs) {
if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
kvBuilder.getValueAsPtr(kv, ptr);
return ptr;
}
}
return null;
}
};
byte[] regionStartKey = null;
byte[] regionEndkey = null;
if (maintainer.isLocalIndex()) {
HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
}
indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, regionStartKey, regionEndkey));
}
}
return indexMutations;
} catch (IOException e) {
throw new SQLException(e);
}
}
use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.
the class IndexUtil method generateDeleteIndexData.
public static List<Delete> generateDeleteIndexData(final PTable table, PTable index, List<Delete> dataMutations, ImmutableBytesWritable ptr, final KeyValueBuilder kvBuilder, PhoenixConnection connection) throws SQLException {
try {
IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
List<Delete> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
for (final Mutation dataMutation : dataMutations) {
long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
ptr.set(dataMutation.getRow());
byte[] regionStartKey = null;
byte[] regionEndkey = null;
if (maintainer.isLocalIndex()) {
HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
}
Delete delete = maintainer.buildDeleteMutation(kvBuilder, null, ptr, Collections.<KeyValue>emptyList(), ts, regionStartKey, regionEndkey);
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, dataMutation.getAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY));
indexMutations.add(delete);
}
return indexMutations;
} catch (IOException e) {
throw new SQLException(e);
}
}
use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.
the class TestNonTxIndexBuilder method getTestIndexMaintainer.
private IndexMaintainer getTestIndexMaintainer() throws Exception {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
// disable column encoding, makes debugging easier
props.put(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, "0");
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.setAutoCommit(true);
conn.createStatement().execute(TEST_TABLE_DDL);
conn.createStatement().execute(TEST_TABLE_INDEX_DDL);
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TEST_TABLE_STRING));
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
table.getIndexMaintainers(ptr, pconn);
List<IndexMaintainer> indexMaintainerList = IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true);
assertEquals(1, indexMaintainerList.size());
IndexMaintainer indexMaintainer = indexMaintainerList.get(0);
return indexMaintainer;
} finally {
conn.close();
}
}
use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.
the class MetaDataEndpointImpl method dropIndexes.
private void dropIndexes(PTable table, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp, byte[] schemaName, byte[] tableName, List<Mutation> additionalTableMetaData, PColumn columnToDelete, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
// Look for columnToDelete in any indexes. If found as PK column, get lock and drop the
// index and then invalidate it
// Covered columns are deleted from the index by the client
PhoenixConnection connection = null;
try {
connection = table.getIndexes().isEmpty() ? null : QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
for (PTable index : table.getIndexes()) {
byte[] tenantId = index.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes();
IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
byte[] indexKey = SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), index.getTableName().getBytes());
Pair<String, String> columnToDeleteInfo = new Pair<>(columnToDelete.getFamilyName().getString(), columnToDelete.getName().getString());
ColumnReference colDropRef = new ColumnReference(columnToDelete.getFamilyName().getBytes(), columnToDelete.getColumnQualifierBytes());
boolean isColumnIndexed = indexMaintainer.getIndexedColumnInfo().contains(columnToDeleteInfo);
boolean isCoveredColumn = indexMaintainer.getCoveredColumns().contains(colDropRef);
// If index requires this column for its pk, then drop it
if (isColumnIndexed) {
// Since we're dropping the index, lock it to ensure
// that a change in index state doesn't
// occur while we're dropping it.
acquireLock(region, indexKey, locks);
// Drop the index table. The doDropTable will expand
// this to all of the table rows and invalidate the
// index table
additionalTableMetaData.add(new Delete(indexKey, clientTimeStamp));
byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantId, schemaName, tableName, index.getTableName().getBytes());
// Drop the link between the data table and the
// index table
additionalTableMetaData.add(new Delete(linkKey, clientTimeStamp));
doDropTable(indexKey, tenantId, index.getSchemaName().getBytes(), index.getTableName().getBytes(), tableName, index.getType(), additionalTableMetaData, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false);
invalidateList.add(new ImmutableBytesPtr(indexKey));
} else // If the dropped column is a covered index column, invalidate the index
if (isCoveredColumn) {
invalidateList.add(new ImmutableBytesPtr(indexKey));
}
}
}
Aggregations