use of org.apache.ignite.internal.processors.query.schema.SchemaOperationException in project ignite by apache.
the class GridQueryProcessor method dynamicTableCreate.
/**
* Create cache and table from given query entity.
*
* @param schemaName Schema name to create table in.
* @param entity Entity to create table from.
* @param templateName Template name.
* @param cacheName Cache name.
* @param cacheGroup Cache group name.
* @param dataRegion Data region name.
* @param affinityKey Affinity key column name.
* @param atomicityMode Atomicity mode.
* @param writeSyncMode Write synchronization mode.
* @param backups Backups.
* @param ifNotExists Quietly ignore this command if table already exists.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
public void dynamicTableCreate(String schemaName, QueryEntity entity, String templateName, String cacheName, String cacheGroup, @Nullable String dataRegion, String affinityKey, @Nullable CacheAtomicityMode atomicityMode, @Nullable CacheWriteSynchronizationMode writeSyncMode, int backups, boolean ifNotExists) throws IgniteCheckedException {
assert !F.isEmpty(templateName);
assert backups >= 0;
CacheConfiguration<?, ?> ccfg = ctx.cache().getConfigFromTemplate(templateName);
if (ccfg == null) {
if (QueryUtils.TEMPLATE_PARTITIONED.equalsIgnoreCase(templateName))
ccfg = new CacheConfiguration<>().setCacheMode(CacheMode.PARTITIONED);
else if (QueryUtils.TEMPLATE_REPLICATED.equalsIgnoreCase(templateName))
ccfg = new CacheConfiguration<>().setCacheMode(CacheMode.REPLICATED);
else
throw new SchemaOperationException(SchemaOperationException.CODE_CACHE_NOT_FOUND, templateName);
ccfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
}
if (!F.isEmpty(ccfg.getQueryEntities()))
throw new SchemaOperationException("Template cache already contains query entities which it should not: " + templateName);
if (!F.isEmpty(entity.getNotNullFields()))
QueryUtils.checkNotNullAllowed(ccfg);
if (F.isEmpty(cacheName))
cacheName = QueryUtils.createTableCacheName(schemaName, entity.getTableName());
ccfg.setName(cacheName);
if (!F.isEmpty(cacheGroup))
ccfg.setGroupName(cacheGroup);
if (!F.isEmpty(dataRegion))
ccfg.setDataRegionName(dataRegion);
if (atomicityMode != null)
ccfg.setAtomicityMode(atomicityMode);
if (writeSyncMode != null)
ccfg.setWriteSynchronizationMode(writeSyncMode);
ccfg.setBackups(backups);
ccfg.setSqlSchema(schemaName);
ccfg.setSqlEscapeAll(true);
ccfg.setQueryEntities(Collections.singleton(entity));
if (affinityKey != null)
ccfg.setKeyConfiguration(new CacheKeyConfiguration(entity.getKeyType(), affinityKey));
boolean res;
try {
res = ctx.grid().getOrCreateCache0(ccfg, true).get2();
} catch (CacheException e) {
if (e.getCause() instanceof SchemaOperationException)
throw (SchemaOperationException) e.getCause();
else
throw e;
}
if (!res && !ifNotExists)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_EXISTS, entity.getTableName());
}
use of org.apache.ignite.internal.processors.query.schema.SchemaOperationException in project ignite by apache.
the class GridQueryProcessor method processSchemaOperationLocal.
/**
* Process schema operation.
*
* @param op Operation.
* @param type Type descriptor.
* @param depId Cache deployment ID.
* @param cancelTok Cancel token.
* @throws SchemaOperationException If failed.
*/
@SuppressWarnings("StatementWithEmptyBody")
public void processSchemaOperationLocal(SchemaAbstractOperation op, QueryTypeDescriptorImpl type, IgniteUuid depId, SchemaIndexOperationCancellationToken cancelTok) throws SchemaOperationException {
if (log.isDebugEnabled())
log.debug("Started local index operation [opId=" + op.id() + ']');
String cacheName = op.cacheName();
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null || !F.eq(depId, cache.context().dynamicDeploymentId()))
throw new SchemaOperationException(SchemaOperationException.CODE_CACHE_NOT_FOUND, cacheName);
try {
if (op instanceof SchemaIndexCreateOperation) {
final SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
QueryIndexDescriptorImpl idxDesc = QueryUtils.createIndexDescriptor(type, op0.index());
GridCacheContext cctx = cache.context();
SchemaIndexCacheFilter filter = new TableCacheFilter(cctx, op0.tableName());
SchemaIndexCacheVisitor visitor = new SchemaIndexCacheVisitorImpl(cctx, filter, cancelTok, op0.parallel());
idx.dynamicIndexCreate(op0.schemaName(), op0.tableName(), idxDesc, op0.ifNotExists(), visitor);
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
idx.dynamicIndexDrop(op0.schemaName(), op0.indexName(), op0.ifExists());
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
processDynamicAddColumn(type, op0.columns());
idx.dynamicAddColumn(op0.schemaName(), op0.tableName(), op0.columns(), op0.ifTableExists(), op0.ifNotExists());
} else if (op instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
processDynamicDropColumn(type, op0.columns());
idx.dynamicDropColumn(op0.schemaName(), op0.tableName(), op0.columns(), op0.ifTableExists(), op0.ifExists());
} else
throw new SchemaOperationException("Unsupported operation: " + op);
} catch (Exception e) {
if (e instanceof SchemaOperationException)
throw (SchemaOperationException) e;
else
throw new SchemaOperationException("Schema change operation failed: " + e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.schema.SchemaOperationException in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Run DDL statement.
*
* @param sql Original SQL.
* @param cmd Command.
* @return Result.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings("unchecked")
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException {
IgniteInternalFuture fut = null;
try {
if (cmd instanceof SqlCreateIndexCommand) {
SqlCreateIndexCommand cmd0 = (SqlCreateIndexCommand) cmd;
GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
assert tbl.rowDescriptor() != null;
isDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd0.indexName());
newIdx.setIndexType(cmd0.spatial() ? QueryIndexType.GEOSPATIAL : QueryIndexType.SORTED);
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (SqlIndexColumn col : cmd0.columns()) {
GridQueryProperty prop = typeDesc.property(col.name());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, col.name());
flds.put(prop.name(), !col.descending());
}
newIdx.setFields(flds);
newIdx.setInlineSize(cmd0.inlineSize());
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd0.ifNotExists(), cmd0.parallel());
} else if (cmd instanceof SqlDropIndexCommand) {
SqlDropIndexCommand cmd0 = (SqlDropIndexCommand) cmd;
GridH2Table tbl = idx.dataTableForIndex(cmd0.schemaName(), cmd0.indexName());
if (tbl != null) {
isDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd0.schemaName(), cmd0.indexName(), cmd0.ifExists());
} else {
if (cmd0.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd0.indexName());
}
} else if (cmd instanceof SqlAlterTableCommand) {
SqlAlterTableCommand cmd0 = (SqlAlterTableCommand) cmd;
GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
}
if (tbl == null) {
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
}
Boolean logging = cmd0.logging();
assert logging != null : "Only LOGGING/NOLOGGING are supported at the moment.";
IgniteCluster cluster = ctx.grid().cluster();
if (logging) {
boolean res = cluster.enableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already enabled for table: " + cmd0.tableName());
} else {
boolean res = cluster.disableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already disabled for table: " + cmd0.tableName());
}
fut = new GridFinishedFuture();
} else if (cmd instanceof SqlCreateUserCommand) {
SqlCreateUserCommand addCmd = (SqlCreateUserCommand) cmd;
ctx.authentication().addUser(addCmd.userName(), addCmd.password());
} else if (cmd instanceof SqlAlterUserCommand) {
SqlAlterUserCommand altCmd = (SqlAlterUserCommand) cmd;
ctx.authentication().updateUser(altCmd.userName(), altCmd.password());
} else if (cmd instanceof SqlDropUserCommand) {
SqlDropUserCommand dropCmd = (SqlDropUserCommand) cmd;
ctx.authentication().removeUser(dropCmd.userName());
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
return H2Utils.zeroCursor();
} catch (SchemaOperationException e) {
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.schema.SchemaOperationException in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Execute DDL statement.
*
* @param sql SQL.
* @param prepared Prepared.
* @return Cursor on query results.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings({ "unchecked", "ThrowableResultOfMethodCallIgnored" })
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, Prepared prepared) throws IgniteCheckedException {
IgniteInternalFuture fut = null;
try {
GridSqlStatement stmt0 = new GridSqlQueryParser(false).parse(prepared);
if (stmt0 instanceof GridSqlCreateIndex) {
GridSqlCreateIndex cmd = (GridSqlCreateIndex) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
assert tbl.rowDescriptor() != null;
isDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd.index().getName());
newIdx.setIndexType(cmd.index().getIndexType());
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (Map.Entry<String, Boolean> e : cmd.index().getFields().entrySet()) {
GridQueryProperty prop = typeDesc.property(e.getKey());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, e.getKey());
flds.put(prop.name(), e.getValue());
}
newIdx.setFields(flds);
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd.ifNotExists(), 0);
} else if (stmt0 instanceof GridSqlDropIndex) {
GridSqlDropIndex cmd = (GridSqlDropIndex) stmt0;
GridH2Table tbl = idx.dataTableForIndex(cmd.schemaName(), cmd.indexName());
if (tbl != null) {
isDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd.schemaName(), cmd.indexName(), cmd.ifExists());
} else {
if (cmd.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd.indexName());
}
} else if (stmt0 instanceof GridSqlCreateTable) {
GridSqlCreateTable cmd = (GridSqlCreateTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("CREATE TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl != null) {
if (!cmd.ifNotExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_EXISTS, cmd.tableName());
} else {
QueryEntity e = toQueryEntity(cmd);
CacheConfiguration<?, ?> ccfg = new CacheConfiguration<>(cmd.tableName());
ccfg.setQueryEntities(Collections.singleton(e));
ccfg.setSqlSchema(cmd.schemaName());
SchemaOperationException err = QueryUtils.checkQueryEntityConflicts(ccfg, ctx.cache().cacheDescriptors().values());
if (err != null)
throw err;
ctx.query().dynamicTableCreate(cmd.schemaName(), e, cmd.templateName(), cmd.cacheName(), cmd.cacheGroup(), cmd.dataRegionName(), cmd.affinityKey(), cmd.atomicityMode(), cmd.writeSynchronizationMode(), cmd.backups(), cmd.ifNotExists());
}
} else if (stmt0 instanceof GridSqlDropTable) {
GridSqlDropTable cmd = (GridSqlDropTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("DROP TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else
ctx.query().dynamicTableDrop(tbl.cacheName(), cmd.tableName(), cmd.ifExists());
} else if (stmt0 instanceof GridSqlAlterTableAddColumn) {
GridSqlAlterTableAddColumn cmd = (GridSqlAlterTableAddColumn) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifTableExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot add column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<QueryField> cols = new ArrayList<>(cmd.columns().length);
boolean allFieldsNullable = true;
for (GridSqlColumn col : cmd.columns()) {
if (tbl.doesColumnExist(col.columnName())) {
if ((!cmd.ifNotExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_EXISTS, col.columnName());
} else {
cols = null;
break;
}
}
QueryField field = new QueryField(col.columnName(), DataType.getTypeClassName(col.column().getType()), col.column().isNullable(), col.defaultValue());
cols.add(field);
allFieldsNullable &= field.isNullable();
}
if (cols != null) {
assert tbl.rowDescriptor() != null;
if (!allFieldsNullable)
QueryUtils.checkNotNullAllowed(tbl.cache().config());
fut = ctx.query().dynamicColumnAdd(tbl.cacheName(), cmd.schemaName(), tbl.rowDescriptor().type().tableName(), cols, cmd.ifTableExists(), cmd.ifNotExists());
}
}
} else if (stmt0 instanceof GridSqlAlterTableDropColumn) {
GridSqlAlterTableDropColumn cmd = (GridSqlAlterTableDropColumn) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifTableExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
assert tbl.rowDescriptor() != null;
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot drop column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<String> cols = new ArrayList<>(cmd.columns().length);
GridQueryTypeDescriptor type = tbl.rowDescriptor().type();
for (String colName : cmd.columns()) {
if (!tbl.doesColumnExist(colName)) {
if ((!cmd.ifExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, colName);
} else {
cols = null;
break;
}
}
SchemaOperationException err = QueryUtils.validateDropColumn(type, colName);
if (err != null)
throw err;
cols.add(colName);
}
if (cols != null) {
fut = ctx.query().dynamicColumnRemove(tbl.cacheName(), cmd.schemaName(), type.tableName(), cols, cmd.ifTableExists(), cmd.ifExists());
}
}
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(Collections.singletonList(Collections.singletonList(0L)), null, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
return resCur;
} catch (SchemaOperationException e) {
U.error(null, "DDL operation failure", e);
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.schema.SchemaOperationException in project ignite by apache.
the class GridCacheUtils method convertToCacheException.
/**
* @param e Ignite checked exception.
* @return CacheException runtime exception, never null.
*/
@NotNull
public static RuntimeException convertToCacheException(IgniteCheckedException e) {
IgniteClientDisconnectedCheckedException disconnectedErr = e.getCause(IgniteClientDisconnectedCheckedException.class);
if (disconnectedErr != null) {
assert disconnectedErr.reconnectFuture() != null : disconnectedErr;
e = disconnectedErr;
}
if (e.hasCause(CacheWriterException.class))
return new CacheWriterException(U.convertExceptionNoWrap(e));
if (e instanceof CachePartialUpdateCheckedException)
return new CachePartialUpdateException((CachePartialUpdateCheckedException) e);
else if (e.hasCause(ClusterTopologyServerNotFoundException.class))
return new CacheServerNotFoundException(e.getMessage(), e);
else if (e instanceof SchemaOperationException)
return new CacheException(e.getMessage(), e);
CacheException ce = X.cause(e, CacheException.class);
if (ce != null)
return ce;
if (e.getCause() instanceof NullPointerException)
return (NullPointerException) e.getCause();
if (e.getCause() instanceof SecurityException)
return (SecurityException) e.getCause();
C1<IgniteCheckedException, IgniteException> converter = U.getExceptionConverter(e.getClass());
return converter != null ? new CacheException(converter.apply(e)) : new CacheException(e);
}
Aggregations