use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class QueryCompiler method compileJoinQuery.
/*
* Call compileJoinQuery() for join queries recursively down to the leaf JoinTable nodes.
* This matches the input JoinTable node against patterns in the following order:
* 1. A (leaf JoinTable node, which can be a named table reference or a subquery of any kind.)
* Returns the compilation result of a single table scan or of an independent subquery.
* 2. Matching either of (when hint USE_SORT_MERGE_JOIN not specified):
* 1) A LEFT/INNER JOIN B
* 2) A LEFT/INNER JOIN B (LEFT/INNER JOIN C)+, if hint NO_STAR_JOIN not specified
* where A can be a named table reference or a flat subquery, and B, C, ... can be a named
* table reference, a sub-join or a subquery of any kind.
* Returns a HashJoinPlan{scan: A, hash: B, C, ...}.
* 3. Matching pattern:
* A RIGHT/INNER JOIN B (when hint USE_SORT_MERGE_JOIN not specified)
* where B can be a named table reference or a flat subquery, and A can be a named table
* reference, a sub-join or a subquery of any kind.
* Returns a HashJoinPlan{scan: B, hash: A}.
* NOTE that "A LEFT/RIGHT/INNER/FULL JOIN B RIGHT/INNER JOIN C" is viewed as
* "(A LEFT/RIGHT/INNER/FULL JOIN B) RIGHT/INNER JOIN C" here, which means the left part in the
* parenthesis is considered a sub-join.
* viewed as a sub-join.
* 4. All the rest that do not qualify for previous patterns or conditions, including FULL joins.
* Returns a SortMergeJoinPlan, the sorting part of which is pushed down to the JoinTable nodes
* of both sides as order-by clauses.
* NOTE that SEMI or ANTI joins are treated the same way as LEFT joins in JoinTable pattern matching.
*
* If no join algorithm hint is provided, according to the above compilation process, a join query
* plan can probably consist of both HashJoinPlan and SortMergeJoinPlan which may enclose each other.
* TODO 1) Use table statistics to guide the choice of join plans.
* 2) Make it possible to hint a certain join algorithm for a specific join step.
*/
@SuppressWarnings("unchecked")
protected QueryPlan compileJoinQuery(StatementContext context, List<Object> binds, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException {
byte[] emptyByteArray = new byte[0];
List<JoinSpec> joinSpecs = joinTable.getJoinSpecs();
if (joinSpecs.isEmpty()) {
Table table = joinTable.getTable();
SelectStatement subquery = table.getAsSubquery(orderBy);
if (!table.isSubselect()) {
context.setCurrentTable(table.getTableRef());
PTable projectedTable = table.createProjectedTable(!projectPKColumns, context);
TupleProjector projector = new TupleProjector(projectedTable);
TupleProjector.serializeProjectorIntoScan(context.getScan(), projector);
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
table.projectColumns(context.getScan());
return compileSingleFlatQuery(context, subquery, binds, asSubquery, !asSubquery, null, projectPKColumns ? projector : null, true);
}
QueryPlan plan = compileSubquery(subquery, false);
PTable projectedTable = table.createProjectedTable(plan.getProjector());
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context));
}
boolean[] starJoinVector;
if (!this.useSortMergeJoin && (starJoinVector = joinTable.getStarJoinVector()) != null) {
Table table = joinTable.getTable();
PTable initialProjectedTable;
TableRef tableRef;
SelectStatement query;
TupleProjector tupleProjector;
if (!table.isSubselect()) {
context.setCurrentTable(table.getTableRef());
initialProjectedTable = table.createProjectedTable(!projectPKColumns, context);
tableRef = table.getTableRef();
table.projectColumns(context.getScan());
query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery);
tupleProjector = new TupleProjector(initialProjectedTable);
} else {
SelectStatement subquery = table.getAsSubquery(orderBy);
QueryPlan plan = compileSubquery(subquery, false);
initialProjectedTable = table.createProjectedTable(plan.getProjector());
tableRef = plan.getTableRef();
context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
tupleProjector = new TupleProjector(plan.getProjector());
}
context.setCurrentTable(tableRef);
PTable projectedTable = initialProjectedTable;
int count = joinSpecs.size();
ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count];
List<Expression>[] joinExpressions = new List[count];
JoinType[] joinTypes = new JoinType[count];
PTable[] tables = new PTable[count];
int[] fieldPositions = new int[count];
StatementContext[] subContexts = new StatementContext[count];
QueryPlan[] subPlans = new QueryPlan[count];
HashSubPlan[] hashPlans = new HashSubPlan[count];
fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size();
for (int i = 0; i < count; i++) {
JoinSpec joinSpec = joinSpecs.get(i);
Scan subScan = ScanUtil.newScan(originalScan);
subContexts[i] = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
subPlans[i] = compileJoinQuery(subContexts[i], binds, joinSpec.getJoinTable(), true, true, null);
boolean hasPostReference = joinSpec.getJoinTable().hasPostReference();
if (hasPostReference) {
tables[i] = subContexts[i].getResolver().getTables().get(0).getTable();
projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType());
} else {
tables[i] = null;
}
}
for (int i = 0; i < count; i++) {
JoinSpec joinSpec = joinSpecs.get(i);
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes()));
// place-holder
joinIds[i] = new ImmutableBytesPtr(emptyByteArray);
Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContexts[i], true);
joinExpressions[i] = joinConditions.getFirst();
List<Expression> hashExpressions = joinConditions.getSecond();
Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions);
Expression keyRangeLhsExpression = keyRangeExpressions.getFirst();
Expression keyRangeRhsExpression = keyRangeExpressions.getSecond();
joinTypes[i] = joinSpec.getType();
if (i < count - 1) {
fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size()));
}
hashPlans[i] = new HashSubPlan(i, subPlans[i], optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression);
}
TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
QueryPlan plan = compileSingleFlatQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin(), null, !table.isSubselect() && projectPKColumns ? tupleProjector : null, true);
Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table);
Integer limit = null;
Integer offset = null;
if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
limit = plan.getLimit();
offset = plan.getOffset();
}
HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset));
return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, hashPlans);
}
JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
JoinType type = lastJoinSpec.getType();
if (!this.useSortMergeJoin && (type == JoinType.Right || type == JoinType.Inner) && lastJoinSpec.getJoinTable().getJoinSpecs().isEmpty() && lastJoinSpec.getJoinTable().getTable().isFlat()) {
JoinTable rhsJoinTable = lastJoinSpec.getJoinTable();
Table rhsTable = rhsJoinTable.getTable();
JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
Scan subScan = ScanUtil.newScan(originalScan);
StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, true, null);
PTable rhsProjTable;
TableRef rhsTableRef;
SelectStatement rhs;
TupleProjector tupleProjector;
if (!rhsTable.isSubselect()) {
context.setCurrentTable(rhsTable.getTableRef());
rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context);
rhsTableRef = rhsTable.getTableRef();
rhsTable.projectColumns(context.getScan());
rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery);
tupleProjector = new TupleProjector(rhsProjTable);
} else {
SelectStatement subquery = rhsTable.getAsSubquery(orderBy);
QueryPlan plan = compileSubquery(subquery, false);
rhsProjTable = rhsTable.createProjectedTable(plan.getProjector());
rhsTableRef = plan.getTableRef();
context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
tupleProjector = new TupleProjector(plan.getProjector());
}
context.setCurrentTable(rhsTableRef);
context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes()));
ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] { new ImmutableBytesPtr(emptyByteArray) };
Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, true);
List<Expression> joinExpressions = joinConditions.getSecond();
List<Expression> hashExpressions = joinConditions.getFirst();
boolean needsMerge = lhsJoin.hasPostReference();
PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null;
int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0;
PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable;
TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes()));
QueryPlan rhsPlan = compileSingleFlatQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right, null, !rhsTable.isSubselect() && projectPKColumns ? tupleProjector : null, true);
Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable);
Integer limit = null;
Integer offset = null;
if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
limit = rhsPlan.getLimit();
offset = rhsPlan.getOffset();
}
HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[] { joinExpressions }, new JoinType[] { type == JoinType.Right ? JoinType.Left : type }, new boolean[] { true }, new PTable[] { lhsTable }, new int[] { fieldPosition }, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset));
Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions);
return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] { new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond()) });
}
JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
JoinTable rhsJoin = lastJoinSpec.getJoinTable();
if (type == JoinType.Right) {
JoinTable temp = lhsJoin;
lhsJoin = rhsJoin;
rhsJoin = temp;
}
List<EqualParseNode> joinConditionNodes = lastJoinSpec.getOnConditions();
List<OrderByNode> lhsOrderBy = Lists.<OrderByNode>newArrayListWithExpectedSize(joinConditionNodes.size());
List<OrderByNode> rhsOrderBy = Lists.<OrderByNode>newArrayListWithExpectedSize(joinConditionNodes.size());
for (EqualParseNode condition : joinConditionNodes) {
lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true));
rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true));
}
Scan lhsScan = ScanUtil.newScan(originalScan);
StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), lhsScan, new SequenceManager(statement));
boolean preserveRowkey = !projectPKColumns && type != JoinType.Full;
QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, !preserveRowkey, lhsOrderBy);
PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable();
boolean isInRowKeyOrder = preserveRowkey && lhsPlan.getOrderBy().getOrderByExpressions().isEmpty();
Scan rhsScan = ScanUtil.newScan(originalScan);
StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), rhsScan, new SequenceManager(statement));
QueryPlan rhsPlan = compileJoinQuery(rhsCtx, binds, rhsJoin, true, true, rhsOrderBy);
PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable();
Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, false);
List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst();
List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond();
boolean needsMerge = rhsJoin.hasPostReference();
int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0;
PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable;
ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), joinTable.getStatement().getUdfParseNodes());
TableRef tableRef = resolver.getTables().get(0);
StatementContext subCtx = new StatementContext(statement, resolver, ScanUtil.newScan(originalScan), new SequenceManager(statement));
subCtx.setCurrentTable(tableRef);
QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, fieldPosition, lastJoinSpec.isSingleValueOnly());
context.setCurrentTable(tableRef);
context.setResolver(resolver);
TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString()));
ParseNode where = joinTable.getPostFiltersCombined();
SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode>emptyList(), where, null, null, orderBy, null, null, 0, false, joinTable.getStatement().hasSequence(), Collections.<SelectStatement>emptyList(), joinTable.getStatement().getUdfParseNodes()) : NODE_FACTORY.select(joinTable.getStatement(), from, where);
return compileSingleFlatQuery(context, select, binds, asSubquery, false, innerPlan, null, isInRowKeyOrder);
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class PostIndexDDLCompiler method compile.
public MutationPlan compile(final PTable indexTable) throws SQLException {
/*
* Handles:
* 1) Populate a newly created table with contents.
* 2) Activate the index by setting the INDEX_STATE to
*/
// NOTE: For first version, we would use a upsert/select to populate the new index table and
// returns synchronously. Creating an index on an existing table with large amount of data
// will as a result take a very very long time.
// In the long term, we should change this to an asynchronous process to populate the index
// that would allow the user to easily monitor the process of index creation.
StringBuilder indexColumns = new StringBuilder();
StringBuilder dataColumns = new StringBuilder();
// Add the pk index columns
List<PColumn> indexPKColumns = indexTable.getPKColumns();
int nIndexPKColumns = indexTable.getPKColumns().size();
boolean isSalted = indexTable.getBucketNum() != null;
boolean isMultiTenant = connection.getTenantId() != null && indexTable.isMultiTenant();
boolean isViewIndex = indexTable.getViewIndexId() != null;
int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isViewIndex ? 1 : 0);
for (int i = posOffset; i < nIndexPKColumns; i++) {
PColumn col = indexPKColumns.get(i);
String indexColName = col.getName().getString();
// need to escape backslash as this used in the SELECT statement
String dataColName = StringUtil.escapeBackslash(col.getExpressionStr());
dataColumns.append(dataColName).append(",");
indexColumns.append('"').append(indexColName).append("\",");
indexColumnNames.add(indexColName);
dataColumnNames.add(dataColName);
}
// Add the covered columns
for (PColumnFamily family : indexTable.getColumnFamilies()) {
for (PColumn col : family.getColumns()) {
if (col.getViewConstant() == null) {
String indexColName = col.getName().getString();
String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColName);
String dataColumnName = IndexUtil.getDataColumnName(indexColName);
if (!dataFamilyName.equals("")) {
dataColumns.append('"').append(dataFamilyName).append("\".");
}
dataColumns.append('"').append(dataColumnName).append("\",");
indexColumns.append('"').append(indexColName).append("\",");
indexColumnNames.add(indexColName);
dataColumnNames.add(dataColumnName);
}
}
}
final PTable dataTable = dataTableRef.getTable();
dataColumns.setLength(dataColumns.length() - 1);
indexColumns.setLength(indexColumns.length() - 1);
String schemaName = dataTable.getSchemaName().getString();
String tableName = indexTable.getTableName().getString();
StringBuilder updateStmtStr = new StringBuilder();
updateStmtStr.append("UPSERT /*+ NO_INDEX */ INTO ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(tableName).append("\"(").append(indexColumns).append(") ");
final StringBuilder selectQueryBuilder = new StringBuilder();
selectQueryBuilder.append(" SELECT ").append(dataColumns).append(" FROM ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(dataTable.getTableName().getString()).append('"');
this.selectQuery = selectQueryBuilder.toString();
updateStmtStr.append(this.selectQuery);
try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
DelegateMutationPlan delegate = new DelegateMutationPlan(statement.compileMutation(updateStmtStr.toString())) {
@Override
public MutationState execute() throws SQLException {
connection.getMutationState().commitDDLFence(dataTable);
return super.execute();
}
};
return delegate;
}
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class ServerCacheClient method addServerCache.
public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
ConnectionQueryServices services = connection.getQueryServices();
MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
List<Closeable> closeables = new ArrayList<Closeable>();
closeables.add(chunk);
ServerCache hashCacheSpec = null;
SQLException firstException = null;
final byte[] cacheId = generateId();
/**
* Execute EndPoint in parallel on each server to send compressed hash cache
*/
// TODO: generalize and package as a per region server EndPoint caller
// (ideally this would be functionality provided by the coprocessor framework)
boolean success = false;
ExecutorService executor = services.getExecutor();
List<Future<Boolean>> futures = Collections.emptyList();
try {
final PTable cacheUsingTable = cacheUsingTableRef.getTable();
List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
int nRegions = locations.size();
// Size these based on worst case
futures = new ArrayList<Future<Boolean>>(nRegions);
Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
for (HRegionLocation entry : locations) {
// Keep track of servers we've sent to and only send once
byte[] regionStartKey = entry.getRegionInfo().getStartKey();
byte[] regionEndKey = entry.getRegionInfo().getEndKey();
if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
// Call RPC once per server
servers.add(entry);
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
}
final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
closeables.add(htable);
futures.add(executor.submit(new JobCallable<Boolean>() {
@Override
public Boolean call() throws Exception {
final Map<byte[], AddServerCacheResponse> results;
try {
results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
@Override
public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
final byte[] tenantIdBytes;
if (cacheUsingTable.isMultiTenant()) {
try {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
} catch (SQLException e) {
throw new IOException(e);
}
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
if (tenantIdBytes != null) {
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
}
builder.setCacheId(ByteStringer.wrap(cacheId));
builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
builder.setHasProtoBufIndexMaintainer(true);
ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
builder.setCacheFactory(svrCacheFactoryBuider.build());
builder.setTxState(ByteStringer.wrap(txState));
instance.addServerCache(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
} catch (Throwable t) {
throw new Exception(t);
}
if (results != null && results.size() == 1) {
return results.values().iterator().next().getReturn();
}
return false;
}
/**
* Defines the grouping for round robin behavior. All threads spawned to process
* this scan will be grouped together and time sliced with other simultaneously
* executing parallel scans.
*/
@Override
public Object getJobId() {
return ServerCacheClient.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
}
}
}
hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
// Execute in parallel
int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
for (Future<Boolean> future : futures) {
future.get(timeoutMs, TimeUnit.MILLISECONDS);
}
cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
success = true;
} catch (SQLException e) {
firstException = e;
} catch (Exception e) {
firstException = new SQLException(e);
} finally {
try {
if (!success) {
SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
for (Future<Boolean> future : futures) {
future.cancel(true);
}
}
} finally {
try {
Closeables.closeAll(closeables);
} catch (IOException e) {
if (firstException == null) {
firstException = new SQLException(e);
}
} finally {
if (firstException != null) {
throw firstException;
}
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
}
return hashCacheSpec;
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class DeleteCompiler method deleteRows.
private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
PTable table = targetTableRef.getTable();
PhoenixStatement statement = childContext.getStatement();
PhoenixConnection connection = statement.getConnection();
PName tenantId = connection.getTenantId();
byte[] tenantIdBytes = null;
if (tenantId != null) {
tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
}
final boolean isAutoCommit = connection.getAutoCommit();
ConnectionQueryServices services = connection.getQueryServices();
final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
List<Map<ImmutableBytesPtr, RowMutationState>> indexMutations = null;
// the data table through a single query to save executing an additional one.
if (!indexTableRefs.isEmpty()) {
indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
for (int i = 0; i < indexTableRefs.size(); i++) {
indexMutations.add(Maps.<ImmutableBytesPtr, RowMutationState>newHashMapWithExpectedSize(batchSize));
}
}
List<PColumn> pkColumns = table.getPKColumns();
boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
boolean isSharedViewIndex = table.getViewIndexId() != null;
int offset = (table.getBucketNum() == null ? 0 : 1);
byte[][] values = new byte[pkColumns.size()][];
if (isSharedViewIndex) {
values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
}
if (isMultiTenant) {
values[offset++] = tenantIdBytes;
}
try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
int rowCount = 0;
while (rs.next()) {
// allocate new as this is a key in a Map
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
// there's no transation required.
if (sourceTableRef.equals(targetTableRef)) {
rs.getCurrentRow().getKey(ptr);
} else {
for (int i = offset; i < values.length; i++) {
byte[] byteValue = rs.getBytes(i + 1 - offset);
// TODO: consider going under the hood and just getting the bytes
if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
}
values[i] = byteValue;
}
table.newKey(ptr, values);
}
// When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
// row key will already have its value.
mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
for (int i = 0; i < indexTableRefs.size(); i++) {
// allocate new as this is a key in a Map
ImmutableBytesPtr indexPtr = new ImmutableBytesPtr();
rs.getCurrentRow().getKey(indexPtr);
indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
if (mutations.size() > maxSize) {
throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
}
rowCount++;
// Commit a batch if auto commit is true and we're at our batch size
if (isAutoCommit && rowCount % batchSize == 0) {
MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(state);
for (int i = 0; i < indexTableRefs.size(); i++) {
MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(indexState);
}
connection.getMutationState().send();
mutations.clear();
if (indexMutations != null) {
indexMutations.clear();
}
}
}
// If auto commit is true, this last batch will be committed upon return
int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
for (int i = 0; i < indexTableRefs.size(); i++) {
// To prevent the counting of these index rows, we have a negative for remainingRows.
MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
state.join(indexState);
}
return state;
}
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class TestUtil method doMajorCompaction.
/**
* Runs a major compaction, and then waits until the compaction is complete before returning.
*
* @param tableName name of the table to be compacted
*/
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {
tableName = SchemaUtil.normalizeIdentifier(tableName);
// We simply write a marker row, request a major compaction, and then wait until the marker
// row is gone
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
MutationState mutationState = pconn.getMutationState();
if (table.isTransactional()) {
mutationState.startTransaction();
}
try (HTableInterface htable = mutationState.getHTable(table)) {
byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
Put put = new Put(markerRowKey);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
htable.put(put);
Delete delete = new Delete(markerRowKey);
delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
htable.delete(delete);
htable.close();
if (table.isTransactional()) {
mutationState.commit();
}
HBaseAdmin hbaseAdmin = services.getAdmin();
hbaseAdmin.flush(tableName);
hbaseAdmin.majorCompact(tableName);
hbaseAdmin.close();
boolean compactionDone = false;
while (!compactionDone) {
Thread.sleep(6000L);
Scan scan = new Scan();
scan.setStartRow(markerRowKey);
scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
scan.setRaw(true);
try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
ResultScanner scanner = htableForRawScan.getScanner(scan);
List<Result> results = Lists.newArrayList(scanner);
LOG.info("Results: " + results);
compactionDone = results.isEmpty();
scanner.close();
}
LOG.info("Compaction done: " + compactionDone);
// need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
if (!compactionDone && table.isTransactional()) {
hbaseAdmin = services.getAdmin();
hbaseAdmin.flush(tableName);
hbaseAdmin.majorCompact(tableName);
hbaseAdmin.close();
}
}
}
}
Aggregations