use of org.apache.ignite.internal.processors.query.GridQueryFieldsResult in project ignite by apache.
the class DmlStatementsProcessor method executeUpdateStatement.
/**
* Actually perform SQL DML operation locally.
*
* @param schemaName Schema name.
* @param cctx Cache context.
* @param prepStmt Prepared statement for DML query.
* @param fieldsQry Fields query.
* @param filters Cache name and key filter.
* @param failedKeys Keys to restrict UPDATE and DELETE operations with. Null or empty array means no restriction.
* @return Pair [number of successfully processed items; keys that have failed to be processed]
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "ConstantConditions", "unchecked" })
private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, PreparedStatement prepStmt, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel, Object[] failedKeys) throws IgniteCheckedException {
int mainCacheId = CU.cacheId(cctx.name());
Integer errKeysPos = null;
UpdatePlan plan = getPlanForStatement(schemaName, prepStmt, errKeysPos);
if (plan.fastUpdateArgs != null) {
assert F.isEmpty(failedKeys) && errKeysPos == null;
return doFastUpdate(plan, fieldsQry.getArgs());
}
assert !F.isEmpty(plan.selectQry);
QueryCursorImpl<List<?>> cur;
// subquery and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocSubqry) {
SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQry, fieldsQry.isCollocated()).setArgs(fieldsQry.getArgs()).setDistributedJoins(fieldsQry.isDistributedJoins()).setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder()).setLocal(fieldsQry.isLocal()).setPageSize(fieldsQry.getPageSize()).setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS);
cur = (QueryCursorImpl<List<?>>) idx.queryDistributedSqlFields(schemaName, newFieldsQry, true, cancel, mainCacheId);
} else {
final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQry, F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel);
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel);
}
int pageSize = loc ? 0 : fieldsQry.getPageSize();
switch(plan.mode) {
case MERGE:
return new UpdateResult(doMerge(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY);
case INSERT:
return new UpdateResult(doInsert(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY);
case UPDATE:
return doUpdate(plan, cur, pageSize);
case DELETE:
return doDelete(cctx, cur, pageSize);
default:
throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode + ']', IgniteQueryErrorCode.UNEXPECTED_OPERATION);
}
}
use of org.apache.ignite.internal.processors.query.GridQueryFieldsResult in project ignite by apache.
the class DmlStatementsProcessor method streamUpdateQuery.
/**
* Perform given statement against given data streamer. Only rows based INSERT and MERGE are supported
* as well as key bound UPDATE and DELETE (ones with filter {@code WHERE _key = ?}).
*
* @param streamer Streamer to feed data to.
* @param stmt Statement.
* @param args Statement arguments.
* @return Number of rows in given statement for INSERT and MERGE, {@code 1} otherwise.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Object[] args) throws IgniteCheckedException {
args = U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY);
Prepared p = GridSqlQueryParser.prepared(stmt);
assert p != null;
UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, null);
if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name()))
throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (plan.mode == UpdateMode.INSERT && plan.rowsNum > 0) {
assert plan.isLocSubqry;
final GridCacheContext cctx = plan.tbl.rowDescriptor().context();
QueryCursorImpl<List<?>> cur;
final ArrayList<List<?>> data = new ArrayList<>(plan.rowsNum);
final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQry, F.asList(args), null, false, 0, null);
QueryCursorImpl<List<?>> stepCur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), cctx.keepBinary());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, null);
data.addAll(stepCur.getAll());
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
return data.iterator();
}
}, null);
if (plan.rowsNum == 1) {
IgniteBiTuple t = rowToKeyValue(cctx, cur.iterator().next(), plan);
streamer.addData(t.getKey(), t.getValue());
return 1;
}
Map<Object, Object> rows = new LinkedHashMap<>(plan.rowsNum);
for (List<?> row : cur) {
final IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
rows.put(t.getKey(), t.getValue());
}
streamer.addData(rows);
return rows.size();
} else
throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
use of org.apache.ignite.internal.processors.query.GridQueryFieldsResult in project ignite by apache.
the class IgniteH2Indexing method doRunPrepared.
/**
* Execute an all-ready {@link SqlFieldsQuery}.
* @param schemaName Schema name.
* @param prepared H2 command.
* @param qry Fields query with flags.
* @param twoStepQry Two-step query if this query must be executed in a distributed way.
* @param meta Metadata for {@code twoStepQry}.
* @param keepBinary Whether binary objects must not be deserialized automatically.
* @param cancel Query cancel state holder. @return Query result.
*/
private List<? extends FieldsQueryCursor<List<?>>> doRunPrepared(String schemaName, Prepared prepared, SqlFieldsQuery qry, GridCacheTwoStepQuery twoStepQry, List<GridQueryFieldMetadata> meta, boolean keepBinary, GridQueryCancel cancel) {
String sqlQry = qry.getSql();
boolean loc = qry.isLocal();
IndexingQueryFilter filter = (loc ? backupFilter(null, qry.getPartitions()) : null);
if (!prepared.isQuery()) {
if (DmlStatementsProcessor.isDmlStatement(prepared)) {
try {
Connection conn = connectionForSchema(schemaName);
if (!loc)
return dmlProc.updateSqlFieldsDistributed(schemaName, conn, prepared, qry, cancel);
else {
final GridQueryFieldsResult updRes = dmlProc.updateSqlFieldsLocal(schemaName, conn, prepared, qry, filter, cancel);
return Collections.singletonList(new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(updRes.iterator(), objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel));
}
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
if (DdlStatementsProcessor.isDdlStatement(prepared)) {
if (loc)
throw new IgniteSQLException("DDL statements are not supported for LOCAL caches", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
try {
return Collections.singletonList(ddlProc.runDdlStatement(sqlQry, prepared));
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
}
}
if (prepared instanceof NoOperation) {
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(Collections.singletonList(Collections.singletonList(0L)), null, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
return Collections.singletonList(resCur);
}
throw new IgniteSQLException("Unsupported DDL/DML operation: " + prepared.getClass().getName());
}
if (twoStepQry != null) {
if (log.isDebugEnabled())
log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry);
checkQueryType(qry, true);
return Collections.singletonList(doRunDistributedQuery(schemaName, qry, twoStepQry, meta, keepBinary, cancel));
}
// We've encountered a local query, let's just run it.
try {
return Collections.singletonList(queryLocalSqlFields(schemaName, qry, keepBinary, filter, cancel));
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute local statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
use of org.apache.ignite.internal.processors.query.GridQueryFieldsResult in project ignite by apache.
the class DmlStatementsProcessor method executeUpdateStatement.
/**
* Actually perform SQL DML operation locally.
*
* @param schemaName Schema name.
* @param cctx Cache context.
* @param c Connection.
* @param prepared Prepared statement for DML query.
* @param fieldsQry Fields query.
* @param loc Local query flag.
* @param filters Cache name and key filter.
* @param cancel Query cancel state holder.
* @return Pair [number of successfully processed items; keys that have failed to be processed]
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "ConstantConditions", "unchecked" })
private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, Connection c, Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
Integer errKeysPos = null;
UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos);
UpdateResult fastUpdateRes = plan.processFast(fieldsQry.getArgs());
if (fastUpdateRes != null)
return fastUpdateRes;
if (plan.distributedPlan() != null) {
UpdateResult result = doDistributedUpdate(schemaName, fieldsQry, plan, cancel);
// null is returned in case not all nodes support distributed DML.
if (result != null)
return result;
}
Iterable<List<?>> cur;
// sub-query and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocalSubquery()) {
assert !F.isEmpty(plan.selectQuery());
SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQuery(), fieldsQry.isCollocated()).setArgs(fieldsQry.getArgs()).setDistributedJoins(fieldsQry.isDistributedJoins()).setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder()).setLocal(fieldsQry.isLocal()).setPageSize(fieldsQry.getPageSize()).setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS);
cur = (QueryCursorImpl<List<?>>) idx.querySqlFields(schemaName, newFieldsQry, null, true, true, cancel).get(0);
} else if (plan.hasRows())
cur = plan.createRows(fieldsQry.getArgs());
else {
final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQuery(), F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel);
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel);
}
int pageSize = loc ? 0 : fieldsQry.getPageSize();
return processDmlSelectResult(cctx, plan, cur, pageSize);
}
use of org.apache.ignite.internal.processors.query.GridQueryFieldsResult in project ignite by apache.
the class GridIndexingSpiAbstractSelfTest method testLongQueries.
/**
* Test long queries write explain warnings into log.
*
* @throws Exception If failed.
*/
public void testLongQueries() throws Exception {
IgniteH2Indexing spi = getIndexing();
ignite0.createCache(cacheACfg());
long longQryExecTime = IgniteConfiguration.DFLT_LONG_QRY_WARN_TIMEOUT;
GridStringLogger log = new GridStringLogger(false, this.log);
IgniteLogger oldLog = GridTestUtils.getFieldValue(spi, "log");
try {
GridTestUtils.setFieldValue(spi, "log", log);
String sql = "select sum(x) FROM SYSTEM_RANGE(?, ?)";
long now = U.currentTimeMillis();
long time = now;
long range = 1000000L;
while (now - time <= longQryExecTime * 3 / 2) {
time = now;
range *= 3;
GridQueryFieldsResult res = spi.queryLocalSqlFields(spi.schema("A"), sql, Arrays.<Object>asList(1, range), null, false, 0, null);
assert res.iterator().hasNext();
now = U.currentTimeMillis();
}
String res = log.toString();
assertTrue(res.contains("/* PUBLIC.RANGE_INDEX */"));
} finally {
GridTestUtils.setFieldValue(spi, "log", oldLog);
}
}
Aggregations