use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class TraceQueryPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
final PhoenixConnection conn = stmt.getConnection();
if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
return ResultIterator.EMPTY_ITERATOR;
}
return new ResultIterator() {
@Override
public void close() throws SQLException {
}
@Override
public Tuple next() throws SQLException {
if (!first)
return null;
TraceScope traceScope = conn.getTraceScope();
if (traceStatement.isTraceOn()) {
conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
if (conn.getSampler() == Sampler.NEVER) {
closeTraceScope(conn);
}
if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
traceScope = Tracing.startNewSpan(conn, "Enabling trace");
if (traceScope.getSpan() != null) {
conn.setTraceScope(traceScope);
} else {
closeTraceScope(conn);
}
}
} else {
closeTraceScope(conn);
conn.setSampler(Sampler.NEVER);
}
if (traceScope == null || traceScope.getSpan() == null)
return null;
first = false;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ParseNodeFactory factory = new ParseNodeFactory();
LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
expression.evaluate(null, ptr);
byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(cell);
return new ResultTuple(Result.create(cells));
}
private void closeTraceScope(final PhoenixConnection conn) {
if (conn.getTraceScope() != null) {
conn.getTraceScope().close();
conn.setTraceScope(null);
}
}
@Override
public void explain(List<String> planSteps) {
}
};
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class UnionPlan method getExplainPlan.
@Override
public ExplainPlan getExplainPlan() throws SQLException {
List<String> steps = new ArrayList<String>();
steps.add("UNION ALL OVER " + this.plans.size() + " QUERIES");
ResultIterator iterator = iterator();
iterator.explain(steps);
// Indent plans steps nested under union, except last client-side merge/concat step (if there is one)
int offset = !orderBy.getOrderByExpressions().isEmpty() && limit != null ? 2 : limit != null ? 1 : 0;
for (int i = 1; i < steps.size() - offset; i++) {
steps.set(i, " " + steps.get(i));
}
return new ExplainPlan(steps);
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class PostDDLCompiler method compile.
public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public List<PFunction> getFunctions() {
return Collections.<PFunction>emptyList();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new FunctionNotFoundException(functionName);
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
}, scan, new SequenceManager(statement));
return new BaseMutationPlan(context, Operation.UPSERT) {
/* FIXME */
@Override
public MutationState execute() throws SQLException {
if (tableRefs.isEmpty()) {
return new MutationState(0, 1000, connection);
}
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(true);
SQLException sqlE = null;
/*
* Handles:
* 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
* 2) deletion of all column values for a ALTER TABLE DROP COLUMN
* 3) updating the necessary rows to have an empty KV
* 4) updating table stats
*/
long totalMutationCount = 0;
for (final TableRef tableRef : tableRefs) {
Scan scan = ScanUtil.newScan(context.getScan());
SelectStatement select = SelectStatement.COUNT_ONE;
// We need to use this tableRef
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return Collections.singletonList(tableRef);
}
@Override
public java.util.List<PFunction> getFunctions() {
return Collections.emptyList();
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
return new ColumnRef(tableRef, column.getPosition());
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
};
PhoenixStatement statement = new PhoenixStatement(connection);
StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
long ts = timestamp;
// in this case, so maybe this is ok.
if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
ts = TransactionUtil.convertToNanoseconds(ts);
}
ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
if (emptyCF != null) {
scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
}
ServerCache cache = null;
try {
if (deleteList != null) {
if (deleteList.isEmpty()) {
scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
// In the case of a row deletion, add index metadata so mutable secondary indexing works
/* TODO: we currently manually run a scan to delete the index data here
ImmutableBytesWritable ptr = context.getTempPtr();
tableRef.getTable().getIndexMaintainers(ptr);
if (ptr.getLength() > 0) {
IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
byte[] uuidValue = cache.getId();
scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
}
*/
} else {
// In the case of the empty key value column family changing, do not send the index
// metadata, as we're currently managing this from the client. It's possible for the
// data empty column family to stay the same, while the index empty column family
// changes.
PColumn column = deleteList.get(0);
byte[] cq = column.getColumnQualifierBytes();
if (emptyCF == null) {
scan.addColumn(column.getFamilyName().getBytes(), cq);
}
scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
}
}
List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
if (projectCFs == null) {
for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
columnFamilies.add(family.getName().getBytes());
}
} else {
for (byte[] projectCF : projectCFs) {
columnFamilies.add(projectCF);
}
}
// Need to project all column families into the scan, since we haven't yet created our empty key value
RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
// since at this point we haven't added the empty key value everywhere.
if (columnFamilies != null) {
scan.getFamilyMap().clear();
for (byte[] family : columnFamilies) {
scan.addFamily(family);
}
projector = new RowProjector(projector, false);
}
// any other Post DDL operations.
try {
// Since dropping a VIEW does not affect the underlying data, we do
// not need to pass through the view statement here.
// Push where clause into scan
WhereCompiler.compile(context, select);
} catch (ColumnFamilyNotFoundException e) {
continue;
} catch (ColumnNotFoundException e) {
continue;
} catch (AmbiguousColumnException e) {
continue;
}
QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
try {
ResultIterator iterator = plan.iterator();
try {
Tuple row = iterator.next();
ImmutableBytesWritable ptr = context.getTempPtr();
totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
iterator.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
} catch (TableNotFoundException e) {
// Ignore and continue, as HBase throws when table hasn't been written to
// FIXME: Remove if this is fixed in 0.96
}
} finally {
if (cache != null) {
// Remove server cache if there is one
cache.close();
}
}
}
final long count = totalMutationCount;
return new MutationState(1, 1000, connection) {
@Override
public long getUpdateCount() {
return count;
}
};
} finally {
if (!wasAutoCommit)
connection.setAutoCommit(wasAutoCommit);
}
}
};
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class ClientScanPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
ResultIterator iterator = delegate.iterator(scanGrouper, scan);
if (where != null) {
iterator = new FilterResultIterator(iterator, where);
}
if (!orderBy.getOrderByExpressions().isEmpty()) {
// TopN
int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
iterator = new OrderedResultIterator(iterator, orderBy.getOrderByExpressions(), thresholdBytes, limit, offset, projector.getEstimatedRowByteSize());
} else {
if (offset != null) {
iterator = new OffsetResultIterator(iterator, offset);
}
if (limit != null) {
iterator = new LimitingResultIterator(iterator, limit);
}
}
if (context.getSequenceManager().getSequenceCount() > 0) {
iterator = new SequenceResultIterator(iterator, context.getSequenceManager());
}
return iterator;
}
use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.
the class LiteralResultIterationPlan method newIterator.
@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, final Map<ImmutableBytesPtr, ServerCache> caches) throws SQLException {
ResultIterator scanner = new ResultIterator() {
private final Iterator<Tuple> tupleIterator = tuples.iterator();
private boolean closed = false;
private int count = 0;
private int offsetCount = 0;
@Override
public void close() throws SQLException {
SQLCloseables.closeAll(caches.values());
this.closed = true;
}
@Override
public Tuple next() throws SQLException {
while (!this.closed && (offset != null && offsetCount < offset) && tupleIterator.hasNext()) {
offsetCount++;
tupleIterator.next();
}
if (!this.closed && (limit == null || count++ < limit) && tupleIterator.hasNext()) {
return tupleIterator.next();
}
return null;
}
@Override
public void explain(List<String> planSteps) {
}
};
if (context.getSequenceManager().getSequenceCount() > 0) {
scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
}
return scanner;
}
Aggregations