use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class DmlStatementsProcessor method rowToKeyValue.
/**
* Convert row presented as an array of Objects into key-value pair to be inserted to cache.
* @param cctx Cache context.
* @param row Row to process.
* @param plan Update plan.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions", "ResultOfMethodCallIgnored" })
private IgniteBiTuple<?, ?> rowToKeyValue(GridCacheContext cctx, List<?> row, UpdatePlan plan) throws IgniteCheckedException {
GridH2RowDescriptor rowDesc = plan.tbl.rowDescriptor();
GridQueryTypeDescriptor desc = rowDesc.type();
Object key = plan.keySupplier.apply(row);
if (QueryUtils.isSqlType(desc.keyClass())) {
assert plan.keyColIdx != -1;
key = convert(key, rowDesc, desc.keyClass(), plan.colTypes[plan.keyColIdx]);
}
Object val = plan.valSupplier.apply(row);
if (QueryUtils.isSqlType(desc.valueClass())) {
assert plan.valColIdx != -1;
val = convert(val, rowDesc, desc.valueClass(), plan.colTypes[plan.valColIdx]);
}
if (key == null)
throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY);
if (val == null)
throw new IgniteSQLException("Value for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_VALUE);
Map<String, Object> newColVals = new HashMap<>();
for (int i = 0; i < plan.colNames.length; i++) {
if (i == plan.keyColIdx || i == plan.valColIdx)
continue;
String colName = plan.colNames[i];
GridQueryProperty prop = desc.property(colName);
assert prop != null;
Class<?> expCls = prop.type();
newColVals.put(colName, convert(row.get(i), rowDesc, expCls, plan.colTypes[i]));
}
// We update columns in the order specified by the table for a reason - table's
// column order preserves their precedence for correct update of nested properties.
Column[] cols = plan.tbl.getColumns();
// First 3 columns are _key, _val and _ver. Skip 'em.
for (int i = DEFAULT_COLUMNS_COUNT; i < cols.length; i++) {
if (plan.tbl.rowDescriptor().isKeyValueOrVersionColumn(i))
continue;
String colName = cols[i].getName();
if (!newColVals.containsKey(colName))
continue;
Object colVal = newColVals.get(colName);
desc.setValue(colName, key, val, colVal);
}
if (cctx.binaryMarshaller()) {
if (key instanceof BinaryObjectBuilder)
key = ((BinaryObjectBuilder) key).build();
if (val instanceof BinaryObjectBuilder)
val = ((BinaryObjectBuilder) val).build();
}
return new IgniteBiTuple<>(key, val);
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class JdbcRequestHandler method getSchemas.
/**
* @param req Request.
* @return Response.
*/
private ClientListenerResponse getSchemas(JdbcMetaSchemasRequest req) {
try {
String schemaPtrn = req.schemaName();
Set<String> schemas = new HashSet<>();
for (String cacheName : ctx.cache().publicCacheNames()) {
for (GridQueryTypeDescriptor table : ctx.query().types(cacheName)) {
if (matches(table.schemaName(), schemaPtrn))
schemas.add(table.schemaName());
}
}
return new JdbcResponse(new JdbcMetaSchemasResult(schemas));
} catch (Exception e) {
U.error(log, "Failed to get schemas metadata [reqId=" + req.requestId() + ", req=" + req + ']', e);
return exceptionToResult(e);
}
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class JdbcRequestHandler method getPrimaryKeys.
/**
* @param req Request.
* @return Response.
*/
private ClientListenerResponse getPrimaryKeys(JdbcMetaPrimaryKeysRequest req) {
try {
Collection<JdbcPrimaryKeyMeta> meta = new HashSet<>();
for (String cacheName : ctx.cache().publicCacheNames()) {
for (GridQueryTypeDescriptor table : ctx.query().types(cacheName)) {
if (!matches(table.schemaName(), req.schemaName()))
continue;
if (!matches(table.tableName(), req.tableName()))
continue;
List<String> fields = new ArrayList<>();
for (String field : table.fields().keySet()) {
if (table.property(field).key())
fields.add(field);
}
final String keyName = table.keyFieldName() == null ? "PK_" + table.schemaName() + "_" + table.tableName() : table.keyFieldName();
if (fields.isEmpty()) {
meta.add(new JdbcPrimaryKeyMeta(table.schemaName(), table.tableName(), keyName, Collections.singletonList("_KEY")));
} else
meta.add(new JdbcPrimaryKeyMeta(table.schemaName(), table.tableName(), keyName, fields));
}
}
return new JdbcResponse(new JdbcMetaPrimaryKeysResult(meta));
} catch (Exception e) {
U.error(log, "Failed to get parameters metadata [reqId=" + req.requestId() + ", req=" + req + ']', e);
return exceptionToResult(e);
}
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Run DDL statement.
*
* @param sql Original SQL.
* @param cmd Command.
* @return Result.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings("unchecked")
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException {
IgniteInternalFuture fut = null;
try {
if (cmd instanceof SqlCreateIndexCommand) {
SqlCreateIndexCommand cmd0 = (SqlCreateIndexCommand) cmd;
GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
assert tbl.rowDescriptor() != null;
isDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd0.indexName());
newIdx.setIndexType(cmd0.spatial() ? QueryIndexType.GEOSPATIAL : QueryIndexType.SORTED);
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (SqlIndexColumn col : cmd0.columns()) {
GridQueryProperty prop = typeDesc.property(col.name());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, col.name());
flds.put(prop.name(), !col.descending());
}
newIdx.setFields(flds);
newIdx.setInlineSize(cmd0.inlineSize());
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd0.ifNotExists(), cmd0.parallel());
} else if (cmd instanceof SqlDropIndexCommand) {
SqlDropIndexCommand cmd0 = (SqlDropIndexCommand) cmd;
GridH2Table tbl = idx.dataTableForIndex(cmd0.schemaName(), cmd0.indexName());
if (tbl != null) {
isDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd0.schemaName(), cmd0.indexName(), cmd0.ifExists());
} else {
if (cmd0.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd0.indexName());
}
} else if (cmd instanceof SqlAlterTableCommand) {
SqlAlterTableCommand cmd0 = (SqlAlterTableCommand) cmd;
GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
}
if (tbl == null) {
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
}
Boolean logging = cmd0.logging();
assert logging != null : "Only LOGGING/NOLOGGING are supported at the moment.";
IgniteCluster cluster = ctx.grid().cluster();
if (logging) {
boolean res = cluster.enableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already enabled for table: " + cmd0.tableName());
} else {
boolean res = cluster.disableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already disabled for table: " + cmd0.tableName());
}
fut = new GridFinishedFuture();
} else if (cmd instanceof SqlCreateUserCommand) {
SqlCreateUserCommand addCmd = (SqlCreateUserCommand) cmd;
ctx.authentication().addUser(addCmd.userName(), addCmd.password());
} else if (cmd instanceof SqlAlterUserCommand) {
SqlAlterUserCommand altCmd = (SqlAlterUserCommand) cmd;
ctx.authentication().updateUser(altCmd.userName(), altCmd.password());
} else if (cmd instanceof SqlDropUserCommand) {
SqlDropUserCommand dropCmd = (SqlDropUserCommand) cmd;
ctx.authentication().removeUser(dropCmd.userName());
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
return H2Utils.zeroCursor();
} catch (SchemaOperationException e) {
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Execute DDL statement.
*
* @param sql SQL.
* @param prepared Prepared.
* @return Cursor on query results.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings({ "unchecked", "ThrowableResultOfMethodCallIgnored" })
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, Prepared prepared) throws IgniteCheckedException {
IgniteInternalFuture fut = null;
try {
GridSqlStatement stmt0 = new GridSqlQueryParser(false).parse(prepared);
if (stmt0 instanceof GridSqlCreateIndex) {
GridSqlCreateIndex cmd = (GridSqlCreateIndex) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
assert tbl.rowDescriptor() != null;
isDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd.index().getName());
newIdx.setIndexType(cmd.index().getIndexType());
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (Map.Entry<String, Boolean> e : cmd.index().getFields().entrySet()) {
GridQueryProperty prop = typeDesc.property(e.getKey());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, e.getKey());
flds.put(prop.name(), e.getValue());
}
newIdx.setFields(flds);
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd.ifNotExists(), 0);
} else if (stmt0 instanceof GridSqlDropIndex) {
GridSqlDropIndex cmd = (GridSqlDropIndex) stmt0;
GridH2Table tbl = idx.dataTableForIndex(cmd.schemaName(), cmd.indexName());
if (tbl != null) {
isDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd.schemaName(), cmd.indexName(), cmd.ifExists());
} else {
if (cmd.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd.indexName());
}
} else if (stmt0 instanceof GridSqlCreateTable) {
GridSqlCreateTable cmd = (GridSqlCreateTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("CREATE TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl != null) {
if (!cmd.ifNotExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_EXISTS, cmd.tableName());
} else {
QueryEntity e = toQueryEntity(cmd);
CacheConfiguration<?, ?> ccfg = new CacheConfiguration<>(cmd.tableName());
ccfg.setQueryEntities(Collections.singleton(e));
ccfg.setSqlSchema(cmd.schemaName());
SchemaOperationException err = QueryUtils.checkQueryEntityConflicts(ccfg, ctx.cache().cacheDescriptors().values());
if (err != null)
throw err;
ctx.query().dynamicTableCreate(cmd.schemaName(), e, cmd.templateName(), cmd.cacheName(), cmd.cacheGroup(), cmd.dataRegionName(), cmd.affinityKey(), cmd.atomicityMode(), cmd.writeSynchronizationMode(), cmd.backups(), cmd.ifNotExists());
}
} else if (stmt0 instanceof GridSqlDropTable) {
GridSqlDropTable cmd = (GridSqlDropTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("DROP TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else
ctx.query().dynamicTableDrop(tbl.cacheName(), cmd.tableName(), cmd.ifExists());
} else if (stmt0 instanceof GridSqlAlterTableAddColumn) {
GridSqlAlterTableAddColumn cmd = (GridSqlAlterTableAddColumn) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifTableExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot add column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<QueryField> cols = new ArrayList<>(cmd.columns().length);
boolean allFieldsNullable = true;
for (GridSqlColumn col : cmd.columns()) {
if (tbl.doesColumnExist(col.columnName())) {
if ((!cmd.ifNotExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_EXISTS, col.columnName());
} else {
cols = null;
break;
}
}
QueryField field = new QueryField(col.columnName(), DataType.getTypeClassName(col.column().getType()), col.column().isNullable(), col.defaultValue());
cols.add(field);
allFieldsNullable &= field.isNullable();
}
if (cols != null) {
assert tbl.rowDescriptor() != null;
if (!allFieldsNullable)
QueryUtils.checkNotNullAllowed(tbl.cache().config());
fut = ctx.query().dynamicColumnAdd(tbl.cacheName(), cmd.schemaName(), tbl.rowDescriptor().type().tableName(), cols, cmd.ifTableExists(), cmd.ifNotExists());
}
}
} else if (stmt0 instanceof GridSqlAlterTableDropColumn) {
GridSqlAlterTableDropColumn cmd = (GridSqlAlterTableDropColumn) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifTableExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
assert tbl.rowDescriptor() != null;
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot drop column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<String> cols = new ArrayList<>(cmd.columns().length);
GridQueryTypeDescriptor type = tbl.rowDescriptor().type();
for (String colName : cmd.columns()) {
if (!tbl.doesColumnExist(colName)) {
if ((!cmd.ifExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, colName);
} else {
cols = null;
break;
}
}
SchemaOperationException err = QueryUtils.validateDropColumn(type, colName);
if (err != null)
throw err;
cols.add(colName);
}
if (cols != null) {
fut = ctx.query().dynamicColumnRemove(tbl.cacheName(), cmd.schemaName(), type.tableName(), cols, cmd.ifTableExists(), cmd.ifExists());
}
}
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(Collections.singletonList(Collections.singletonList(0L)), null, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
return resCur;
} catch (SchemaOperationException e) {
U.error(null, "DDL operation failure", e);
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
Aggregations