use of org.apache.phoenix.cache.ServerCacheClient.ServerCache in project phoenix by apache.
the class PostDDLCompiler method compile.
public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public List<PFunction> getFunctions() {
return Collections.<PFunction>emptyList();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new FunctionNotFoundException(functionName);
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
}, scan, new SequenceManager(statement));
return new BaseMutationPlan(context, Operation.UPSERT) {
/* FIXME */
@Override
public MutationState execute() throws SQLException {
if (tableRefs.isEmpty()) {
return new MutationState(0, 1000, connection);
}
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(true);
SQLException sqlE = null;
/*
* Handles:
* 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
* 2) deletion of all column values for a ALTER TABLE DROP COLUMN
* 3) updating the necessary rows to have an empty KV
* 4) updating table stats
*/
long totalMutationCount = 0;
for (final TableRef tableRef : tableRefs) {
Scan scan = ScanUtil.newScan(context.getScan());
SelectStatement select = SelectStatement.COUNT_ONE;
// We need to use this tableRef
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return Collections.singletonList(tableRef);
}
@Override
public java.util.List<PFunction> getFunctions() {
return Collections.emptyList();
}
;
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
return new ColumnRef(tableRef, column.getPosition());
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
;
@Override
public boolean hasUDFs() {
return false;
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
};
PhoenixStatement statement = new PhoenixStatement(connection);
StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
long ts = timestamp;
// in this case, so maybe this is ok.
if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
ts = TransactionUtil.convertToNanoseconds(ts);
}
ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
if (emptyCF != null) {
scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
}
ServerCache cache = null;
try {
if (deleteList != null) {
if (deleteList.isEmpty()) {
scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
// In the case of a row deletion, add index metadata so mutable secondary indexing works
/* TODO: we currently manually run a scan to delete the index data here
ImmutableBytesWritable ptr = context.getTempPtr();
tableRef.getTable().getIndexMaintainers(ptr);
if (ptr.getLength() > 0) {
IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
byte[] uuidValue = cache.getId();
scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
}
*/
} else {
// In the case of the empty key value column family changing, do not send the index
// metadata, as we're currently managing this from the client. It's possible for the
// data empty column family to stay the same, while the index empty column family
// changes.
PColumn column = deleteList.get(0);
byte[] cq = column.getColumnQualifierBytes();
if (emptyCF == null) {
scan.addColumn(column.getFamilyName().getBytes(), cq);
}
scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
}
}
List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
if (projectCFs == null) {
for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
columnFamilies.add(family.getName().getBytes());
}
} else {
for (byte[] projectCF : projectCFs) {
columnFamilies.add(projectCF);
}
}
// Need to project all column families into the scan, since we haven't yet created our empty key value
RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
// since at this point we haven't added the empty key value everywhere.
if (columnFamilies != null) {
scan.getFamilyMap().clear();
for (byte[] family : columnFamilies) {
scan.addFamily(family);
}
projector = new RowProjector(projector, false);
}
// any other Post DDL operations.
try {
// Since dropping a VIEW does not affect the underlying data, we do
// not need to pass through the view statement here.
// Push where clause into scan
WhereCompiler.compile(context, select);
} catch (ColumnFamilyNotFoundException e) {
continue;
} catch (ColumnNotFoundException e) {
continue;
} catch (AmbiguousColumnException e) {
continue;
}
QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
try {
ResultIterator iterator = plan.iterator();
try {
Tuple row = iterator.next();
ImmutableBytesWritable ptr = context.getTempPtr();
totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
iterator.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
} catch (TableNotFoundException e) {
// Ignore and continue, as HBase throws when table hasn't been written to
// FIXME: Remove if this is fixed in 0.96
}
} finally {
if (cache != null) {
// Remove server cache if there is one
cache.close();
}
}
}
final long count = totalMutationCount;
return new MutationState(1, 1000, connection) {
@Override
public long getUpdateCount() {
return count;
}
};
} finally {
if (!wasAutoCommit)
connection.setAutoCommit(wasAutoCommit);
}
}
};
}
use of org.apache.phoenix.cache.ServerCacheClient.ServerCache in project phoenix by apache.
the class MutationState method send.
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
boolean isTransactional;
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
final PTable table = tableRef.getTable();
Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName, List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList != null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache != null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
txTableRefs.add(origTableRef);
addDMLFence(table);
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
// during a commit), as we don't need conflict detection for these.
if (tableInfo.isDataTable()) {
// Even for immutable, we need to do this so that an abort has the state
// necessary to generate the rows to delete.
addTransactionParticipant(txnAware);
} else {
txnAware.startTx(getTransaction());
}
hTable = txnAware;
}
long numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
long startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled())
logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
long mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
long mutationSizeBytes = calculateMutationSize(mutationList);
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span, "Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
sqlE = new CommitException(e, getUncommittedStatementIndexes(), serverTimestamp);
} finally {
try {
if (cache != null)
cache.close();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
use of org.apache.phoenix.cache.ServerCacheClient.ServerCache in project phoenix by apache.
the class DeleteCompiler method compile.
public MutationPlan compile(DeleteStatement delete) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
final boolean isAutoCommit = connection.getAutoCommit();
final boolean hasLimit = delete.getLimit() != null;
final ConnectionQueryServices services = connection.getQueryServices();
List<QueryPlan> queryPlans;
NamedTableNode tableNode = delete.getTable();
String tableName = tableNode.getName().getTableName();
String schemaName = tableNode.getName().getSchemaName();
boolean retryOnce = !isAutoCommit;
TableRef tableRefToBe;
boolean noQueryReqd = false;
boolean runOnServer = false;
SelectStatement select = null;
ColumnResolver resolverToBe = null;
Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
DeletingParallelIteratorFactory parallelIteratorFactory;
QueryPlan dataPlanToBe = null;
while (true) {
try {
resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
tableRefToBe = resolverToBe.getTables().get(0);
PTable table = tableRefToBe.getTable();
// TODO: SchemaUtil.isReadOnly(PTable, connection)?
if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
throw new ReadOnlyTableException(schemaName, tableName);
} else if (table.isTransactional() && connection.getSCN() != null) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
noQueryReqd = !hasLimit;
// Can't run on same server for transactional data, as we need the row keys for the data
// that is being upserted for conflict detection purposes.
runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
HintNode hint = delete.getHint();
if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
}
List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
boolean isSalted = table.getBucketNum() != null;
boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
boolean isSharedViewIndex = table.getViewIndexId() != null;
for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
PColumn column = table.getPKColumns().get(i);
aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
}
select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
select = StatementNormalizer.normalize(select, resolverToBe);
SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
if (transformedSelect != select) {
resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
}
parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
QueryOptimizer optimizer = new QueryOptimizer(services);
QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
dataPlanToBe = compiler.compile();
queryPlans = Lists.newArrayList(mayHaveImmutableIndexes ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory) : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
if (mayHaveImmutableIndexes) {
// FIXME: this is ugly
// Lookup the table being deleted from in the cache, as it's possible that the
// optimizer updated the cache if it found indexes that were out of date.
// If the index was marked as disabled, it should not be in the list
// of immutable indexes.
table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
tableRefToBe.setTable(table);
immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
}
} catch (MetaDataEntityNotFoundException e) {
// Otherwise throw, as we'll just get the same error next time.
if (retryOnce) {
retryOnce = false;
MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
if (result.wasUpdated()) {
continue;
}
}
throw e;
}
break;
}
boolean isBuildingImmutable = false;
final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
if (hasImmutableIndexes) {
for (PTable index : immutableIndex.values()) {
if (index.getIndexState() == PIndexState.BUILDING) {
isBuildingImmutable = true;
break;
}
}
}
final QueryPlan dataPlan = dataPlanToBe;
// tableRefs is parallel with queryPlans
TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
if (hasImmutableIndexes) {
int i = 0;
Iterator<QueryPlan> plans = queryPlans.iterator();
while (plans.hasNext()) {
QueryPlan plan = plans.next();
PTable table = plan.getTableRef().getTable();
if (table.getType() == PTableType.INDEX) {
// index plans
tableRefs[i++] = plan.getTableRef();
immutableIndex.remove(table.getKey());
} else if (!isBuildingImmutable) {
// data plan
/*
* If we have immutable indexes that we need to maintain, don't execute the data plan
* as we can save a query by piggy-backing on any of the other index queries, since the
* PK columns that we need are always in each index row.
*/
plans.remove();
}
}
/*
* If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
* because it could not be executed. This would occur if a column in the where clause is not found in the
* immutable index.
*/
if (!immutableIndex.isEmpty()) {
Collection<PTable> immutableIndexes = immutableIndex.values();
if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString()).setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
}
runOnServer = false;
}
}
List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
for (PTable index : immutableIndex.values()) {
buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
}
// Make sure the first plan is targeting deletion from the data table
// In the case of an immutable index, we'll also delete from the index.
final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
/*
* Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
* from the data table, while the others will be for deleting rows from immutable indexes.
*/
List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
for (int i = 0; i < tableRefs.length; i++) {
final TableRef tableRef = tableRefs[i];
final QueryPlan plan = queryPlans.get(i);
if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
runOnServer = false;
// FIXME: why set this to false in this case?
noQueryReqd = false;
}
final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
final StatementContext context = plan.getContext();
// may have been optimized out. Instead, we check that there's a single SkipScanFilter
if (noQueryReqd && (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup()) {
mutationPlans.add(new MutationPlan() {
@Override
public ParameterMetaData getParameterMetaData() {
return context.getBindManager().getParameterMetaData();
}
@Override
public MutationState execute() throws SQLException {
// We have a point lookup, so we know we have a simple set of fully qualified
// keys for our ranges
ScanRanges ranges = context.getScanRanges();
Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
while (iterator.hasNext()) {
mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
}
@Override
public StatementContext getContext() {
return context;
}
@Override
public TableRef getTargetRef() {
return dataTableRef;
}
@Override
public Set<TableRef> getSourceRefs() {
// Don't include the target
return Collections.emptySet();
}
@Override
public Operation getOperation() {
return operation;
}
@Override
public Long getEstimatedRowsToScan() throws SQLException {
return 0l;
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
return 0l;
}
});
} else if (runOnServer) {
// TODO: better abstraction
Scan scan = context.getScan();
// Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
// future dated data row mutations that will get in the way of generating the
// correct index rows on replay.
scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
// Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
// The coprocessor will delete each row returned from the scan
// Ignoring ORDER BY, since with auto commit on and no limit makes no difference
SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
if (plan.getProjector().projectEveryRow()) {
projectorToBe = new RowProjector(projectorToBe, true);
}
final RowProjector projector = projectorToBe;
final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
mutationPlans.add(new MutationPlan() {
@Override
public ParameterMetaData getParameterMetaData() {
return context.getBindManager().getParameterMetaData();
}
@Override
public StatementContext getContext() {
return context;
}
@Override
public TableRef getTargetRef() {
return dataTableRef;
}
@Override
public Set<TableRef> getSourceRefs() {
return dataPlan.getSourceRefs();
}
@Override
public Operation getOperation() {
return operation;
}
@Override
public MutationState execute() throws SQLException {
// TODO: share this block of code with UPSERT SELECT
ImmutableBytesWritable ptr = context.getTempPtr();
PTable table = tableRef.getTable();
table.getIndexMaintainers(ptr, context.getConnection());
byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
ServerCache cache = null;
try {
if (ptr.getLength() > 0) {
byte[] uuidValue = ServerCacheClient.generateId();
context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
}
ResultIterator iterator = aggPlan.iterator();
try {
Tuple row = iterator.next();
final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
return new MutationState(maxSize, maxSizeBytes, connection) {
@Override
public long getUpdateCount() {
return mutationCount;
}
};
} finally {
iterator.close();
}
} finally {
if (cache != null) {
cache.close();
}
}
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
planSteps.add("DELETE ROWS");
planSteps.addAll(queryPlanSteps);
return new ExplainPlan(planSteps);
}
@Override
public Long getEstimatedRowsToScan() throws SQLException {
return aggPlan.getEstimatedRowsToScan();
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
return aggPlan.getEstimatedBytesToScan();
}
});
} else {
List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
if (!buildingImmutableIndexes.isEmpty()) {
immutableIndexRefsToBe = buildingImmutableIndexes;
} else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
}
final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
mutationPlans.add(new MutationPlan() {
@Override
public ParameterMetaData getParameterMetaData() {
return context.getBindManager().getParameterMetaData();
}
@Override
public StatementContext getContext() {
return context;
}
@Override
public TableRef getTargetRef() {
return dataTableRef;
}
@Override
public Set<TableRef> getSourceRefs() {
return dataPlan.getSourceRefs();
}
@Override
public Operation getOperation() {
return operation;
}
@Override
public MutationState execute() throws SQLException {
ResultIterator iterator = plan.iterator();
try {
if (!hasLimit) {
Tuple tuple;
long totalRowCount = 0;
if (parallelIteratorFactory2 != null) {
parallelIteratorFactory2.setRowProjector(plan.getProjector());
parallelIteratorFactory2.setTargetTableRef(tableRef);
parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
}
while ((tuple = iterator.next()) != null) {
// Runs query
Cell kv = tuple.getValue(0);
totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
}
// Return total number of rows that have been delete. In the case of auto commit being off
// the mutations will all be in the mutation state of the current connection.
MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
// set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
return state;
} else {
return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
}
} finally {
iterator.close();
}
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
planSteps.add("DELETE ROWS");
planSteps.addAll(queryPlanSteps);
return new ExplainPlan(planSteps);
}
@Override
public Long getEstimatedRowsToScan() throws SQLException {
return plan.getEstimatedRowsToScan();
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
return plan.getEstimatedBytesToScan();
}
});
}
}
return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
use of org.apache.phoenix.cache.ServerCacheClient.ServerCache in project phoenix by apache.
the class HashJoinPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = delegate.getContext().getScan();
}
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
ExecutorService executor = services.getExecutor();
List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
if (joinInfo != null) {
hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
firstJobEndTime = new AtomicLong(0);
keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
}
for (int i = 0; i < count; i++) {
final int index = i;
futures.add(executor.submit(new JobCallable<ServerCache>() {
@Override
public ServerCache call() throws Exception {
ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
return cache;
}
@Override
public Object getJobId() {
return HashJoinPlan.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
}
SQLException firstException = null;
for (int i = 0; i < count; i++) {
try {
ServerCache result = futures.get(i).get();
if (result != null) {
dependencies.add(result);
}
subPlans[i].postProcess(result, this);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (firstException == null) {
firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
}
} catch (ExecutionException e) {
if (firstException == null) {
firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
}
}
}
if (firstException != null) {
SQLCloseables.closeAllQuietly(dependencies);
throw firstException;
}
Expression postFilter = null;
boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
if (recompileWhereClause || hasKeyRangeExpressions) {
StatementContext context = delegate.getContext();
PTable table = context.getCurrentTable().getTable();
ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
if (recompileWhereClause) {
postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
}
if (hasKeyRangeExpressions) {
WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
}
}
if (joinInfo != null) {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
if (statement.getInnerSelectStatement() != null && postFilter != null) {
iterator = new FilterResultIterator(iterator, postFilter);
}
return iterator;
}
use of org.apache.phoenix.cache.ServerCacheClient.ServerCache in project phoenix by apache.
the class MutationState method setMetaDataOnMutations.
private ServerCache setMetaDataOnMutations(TableRef tableRef, List<? extends Mutation> mutations, ImmutableBytesWritable indexMetaDataPtr) throws SQLException {
PTable table = tableRef.getTable();
final byte[] tenantIdBytes;
if (table.isMultiTenant()) {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
ServerCache cache = null;
byte[] attribValue = null;
byte[] uuidValue = null;
byte[] txState = ByteUtil.EMPTY_BYTE_ARRAY;
if (table.isTransactional()) {
txState = encodeTransaction();
}
boolean hasIndexMetaData = indexMetaDataPtr.getLength() > 0;
if (hasIndexMetaData) {
if (IndexMetaDataCacheClient.useIndexMetadataCache(connection, mutations, indexMetaDataPtr.getLength() + txState.length)) {
IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
cache = client.addIndexMetadataCache(mutations, indexMetaDataPtr, txState);
uuidValue = cache.getId();
} else {
attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr);
uuidValue = ServerCacheClient.generateId();
}
} else if (txState.length == 0) {
return null;
}
// or set the index metadata directly on the Mutation
for (Mutation mutation : mutations) {
if (connection.getTenantId() != null) {
mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantIdBytes);
}
mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
if (attribValue != null) {
mutation.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue);
if (txState.length > 0) {
mutation.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
}
} else if (!hasIndexMetaData && txState.length > 0) {
mutation.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
}
}
return cache;
}
Aggregations