use of org.apache.ignite.internal.processors.query.IgniteSQLException in project ignite by apache.
the class UpdatePlanBuilder method gridTableForElement.
/**
* @param target Expression to extract the table from.
* @return Back end table for this element.
*/
private static GridSqlTable gridTableForElement(GridSqlElement target) {
Set<GridSqlTable> tbls = new HashSet<>();
DmlAstUtils.collectAllGridTablesInTarget(target, tbls);
if (tbls.size() != 1)
throw new IgniteSQLException("Failed to determine target table", IgniteQueryErrorCode.TABLE_NOT_FOUND);
return tbls.iterator().next();
}
use of org.apache.ignite.internal.processors.query.IgniteSQLException in project ignite by apache.
the class JdbcResultSet method next.
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public boolean next() throws SQLException {
ensureNotClosed();
if (it == null || (stmt.getMaxRows() > 0 && pos >= stmt.getMaxRows())) {
curr = null;
return false;
} else if (it.hasNext()) {
curr = new ArrayList<>(it.next());
pos++;
if (finished && !it.hasNext())
it = null;
return true;
} else if (!finished) {
JdbcConnection conn = (JdbcConnection) stmt.getConnection();
Ignite ignite = conn.ignite();
UUID nodeId = conn.nodeId();
boolean loc = nodeId == null;
// Connections from new clients send queries with new tasks, so we have to continue in the same manner
JdbcQueryTask qryTask = new JdbcQueryTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), null, true, loc, null, fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), conn.isDistributedJoins());
try {
JdbcQueryTask.QueryResult res = loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask);
finished = res.isFinished();
it = res.getRows().iterator();
return next();
} catch (IgniteSQLException e) {
throw e.toJdbcException();
} catch (Exception e) {
throw new SQLException("Failed to query Ignite.", e);
}
}
it = null;
return false;
}
use of org.apache.ignite.internal.processors.query.IgniteSQLException in project ignite by apache.
the class JdbcStatement method execute.
/** {@inheritDoc} */
@Override
public boolean execute(String sql) throws SQLException {
if (!conn.isDmlSupported()) {
// We attempt to run a query without any checks as long as server does not support DML anyway,
// so it simply will throw an exception when given a DML statement instead of a query.
rs = executeQuery(sql);
return true;
}
ensureNotClosed();
rs = null;
updateCnt = -1;
if (F.isEmpty(sql))
throw new SQLException("SQL query is empty");
Ignite ignite = conn.ignite();
UUID nodeId = conn.nodeId();
UUID uuid = UUID.randomUUID();
boolean loc = nodeId == null;
JdbcQueryTask qryTask = new JdbcQueryTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), sql, null, loc, getArgs(), fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), conn.isDistributedJoins());
try {
JdbcQueryTask.QueryResult res = loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask);
if (res.isQuery()) {
JdbcResultSet rs = new JdbcResultSet(uuid, this, res.getTbls(), res.getCols(), res.getTypes(), res.getRows(), res.isFinished());
rs.setFetchSize(fetchSize);
resSets.add(rs);
this.rs = rs;
} else
updateCnt = updateCounterFromQueryResult(res.getRows());
return res.isQuery();
} catch (IgniteSQLException e) {
throw e.toJdbcException();
} catch (Exception e) {
throw new SQLException("Failed to query Ignite.", e);
}
}
use of org.apache.ignite.internal.processors.query.IgniteSQLException in project ignite by apache.
the class JdbcRequestHandler method executeBatchedQuery.
/**
* Executes query and updates result counters.
*
* @param qry Query.
* @param updCntsAcc Per query rows updates counter.
* @param firstErr First error data - code and message.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
private void executeBatchedQuery(SqlFieldsQueryEx qry, List<Integer> updCntsAcc, IgniteBiTuple<Integer, String> firstErr) {
try {
if (cliCtx.isStream()) {
List<Long> cnt = ctx.query().streamBatchedUpdateQuery(qry.getSchema(), cliCtx, qry.getSql(), qry.batchedArguments());
for (int i = 0; i < cnt.size(); i++) updCntsAcc.add(cnt.get(i).intValue());
return;
}
List<FieldsQueryCursor<List<?>>> qryRes = ctx.query().querySqlFields(null, qry, cliCtx, true, true);
for (FieldsQueryCursor<List<?>> cur : qryRes) {
if (cur instanceof BulkLoadContextCursor)
throw new IgniteSQLException("COPY command cannot be executed in batch mode.");
assert !((QueryCursorImpl) cur).isQuery();
Iterator<List<?>> it = cur.iterator();
if (it.hasNext()) {
int val = ((Long) it.next().get(0)).intValue();
updCntsAcc.add(val);
}
}
} catch (Exception e) {
int code;
String msg;
if (e instanceof IgniteSQLException) {
BatchUpdateException batchCause = X.cause(e, BatchUpdateException.class);
if (batchCause != null) {
int[] updCntsOnErr = batchCause.getUpdateCounts();
for (int i = 0; i < updCntsOnErr.length; i++) updCntsAcc.add(updCntsOnErr[i]);
msg = batchCause.getMessage();
code = batchCause.getErrorCode();
} else {
for (int i = 0; i < qry.batchedArguments().size(); i++) updCntsAcc.add(Statement.EXECUTE_FAILED);
msg = e.getMessage();
code = ((IgniteSQLException) e).statusCode();
}
} else {
for (int i = 0; i < qry.batchedArguments().size(); i++) updCntsAcc.add(Statement.EXECUTE_FAILED);
msg = e.getMessage();
code = IgniteQueryErrorCode.UNKNOWN;
}
if (firstErr.isEmpty())
firstErr.set(code, msg);
else
U.error(log, "Failed to execute batch query [qry=" + qry + ']', e);
}
}
use of org.apache.ignite.internal.processors.query.IgniteSQLException in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Run DDL statement.
*
* @param sql Original SQL.
* @param cmd Command.
* @return Result.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings("unchecked")
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException {
IgniteInternalFuture fut = null;
try {
if (cmd instanceof SqlCreateIndexCommand) {
SqlCreateIndexCommand cmd0 = (SqlCreateIndexCommand) cmd;
GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
assert tbl.rowDescriptor() != null;
isDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd0.indexName());
newIdx.setIndexType(cmd0.spatial() ? QueryIndexType.GEOSPATIAL : QueryIndexType.SORTED);
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (SqlIndexColumn col : cmd0.columns()) {
GridQueryProperty prop = typeDesc.property(col.name());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, col.name());
flds.put(prop.name(), !col.descending());
}
newIdx.setFields(flds);
newIdx.setInlineSize(cmd0.inlineSize());
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd0.ifNotExists(), cmd0.parallel());
} else if (cmd instanceof SqlDropIndexCommand) {
SqlDropIndexCommand cmd0 = (SqlDropIndexCommand) cmd;
GridH2Table tbl = idx.dataTableForIndex(cmd0.schemaName(), cmd0.indexName());
if (tbl != null) {
isDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd0.schemaName(), cmd0.indexName(), cmd0.ifExists());
} else {
if (cmd0.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd0.indexName());
}
} else if (cmd instanceof SqlAlterTableCommand) {
SqlAlterTableCommand cmd0 = (SqlAlterTableCommand) cmd;
GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
}
if (tbl == null) {
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
}
Boolean logging = cmd0.logging();
assert logging != null : "Only LOGGING/NOLOGGING are supported at the moment.";
IgniteCluster cluster = ctx.grid().cluster();
if (logging) {
boolean res = cluster.enableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already enabled for table: " + cmd0.tableName());
} else {
boolean res = cluster.disableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already disabled for table: " + cmd0.tableName());
}
fut = new GridFinishedFuture();
} else if (cmd instanceof SqlCreateUserCommand) {
SqlCreateUserCommand addCmd = (SqlCreateUserCommand) cmd;
ctx.authentication().addUser(addCmd.userName(), addCmd.password());
} else if (cmd instanceof SqlAlterUserCommand) {
SqlAlterUserCommand altCmd = (SqlAlterUserCommand) cmd;
ctx.authentication().updateUser(altCmd.userName(), altCmd.password());
} else if (cmd instanceof SqlDropUserCommand) {
SqlDropUserCommand dropCmd = (SqlDropUserCommand) cmd;
ctx.authentication().removeUser(dropCmd.userName());
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
return H2Utils.zeroCursor();
} catch (SchemaOperationException e) {
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
Aggregations