use of org.apache.ignite.internal.processors.query.QueryField in project ignite by apache.
the class H2DynamicColumnsAbstractBasicSelfTest method testDroppedColumnMeta.
/**
* @throws Exception if failed.
*/
public void testDroppedColumnMeta() throws Exception {
try {
run("CREATE TABLE test (id INT PRIMARY KEY, a INT, b CHAR)");
QueryField fld = getColumnMeta(grid(nodeIndex()), QueryUtils.DFLT_SCHEMA, "TEST", "A");
assertEquals("A", fld.name());
assertEquals(Integer.class.getName(), fld.typeName());
run("ALTER TABLE test DROP COLUMN a");
assertNull(getColumnMeta(grid(nodeIndex()), QueryUtils.DFLT_SCHEMA, "TEST", "A"));
} finally {
run("DROP TABLE IF EXISTS test");
}
}
use of org.apache.ignite.internal.processors.query.QueryField in project ignite by apache.
the class H2DynamicColumnsAbstractBasicSelfTest method testAddColumnToNonDynamicCache.
/**
* Test that we can add columns dynamically to tables associated with non dynamic caches as well.
*/
public void testAddColumnToNonDynamicCache() throws SQLException {
run("ALTER TABLE \"idx\".PERSON ADD COLUMN CITY varchar");
doSleep(500);
QueryField c = c("CITY", String.class.getName());
checkTableState("idx", "PERSON", c);
}
use of org.apache.ignite.internal.processors.query.QueryField in project ignite by apache.
the class H2DynamicColumnsAbstractBasicSelfTest method testDropColumnFromNonDynamicCacheWithRealValueType.
/**
* Test that we can drop columns dynamically from tables associated
* with non dynamic caches storing user types as well.
*
* @throws SQLException if failed.
*/
@SuppressWarnings("unchecked")
public void testDropColumnFromNonDynamicCacheWithRealValueType() throws SQLException {
CacheConfiguration<Integer, City> ccfg = defaultCacheConfiguration().setName("City").setIndexedTypes(Integer.class, City.class);
IgniteCache<Integer, ?> cache = ignite(nodeIndex()).getOrCreateCache(ccfg);
run(cache, "INSERT INTO \"City\".City (_key, id, name, state_name) VALUES " + "(1, 1, 'Washington', 'DC')");
run(cache, "ALTER TABLE \"City\".City DROP COLUMN state_name");
doSleep(500);
QueryField c = c("NAME", String.class.getName());
checkTableState("City", "CITY", c);
run(cache, "INSERT INTO \"City\".City (_key, id, name) VALUES " + "(2, 2, 'New York')");
assertThrowsAnyCause("SELECT state_name FROM \"City\".City", JdbcSQLException.class, "Column \"STATE_NAME\" not found");
List<List<?>> res = run(cache, "SELECT _key, id, name FROM \"City\".City WHERE id = 1");
assertEquals(Collections.singletonList(Arrays.asList(1, 1, "Washington")), res);
res = run(cache, "SELECT * FROM \"City\".City WHERE id = 2");
assertEquals(Collections.singletonList(Arrays.asList(2, "New York")), res);
if (!Boolean.valueOf(GridTestProperties.getProperty(BINARY_MARSHALLER_USE_SIMPLE_NAME_MAPPER))) {
City city = (City) cache.get(1);
assertEquals(1, city.id());
assertEquals("Washington", city.name());
assertEquals("DC", city.state());
city = (City) cache.get(2);
assertEquals(2, city.id());
assertEquals("New York", city.name());
assertEquals(null, city.state());
} else {
BinaryObject city = (BinaryObject) cache.withKeepBinary().get(1);
assertEquals(1, (int) city.field("id"));
assertEquals("Washington", (String) city.field("name"));
assertEquals("DC", (String) city.field("state"));
city = (BinaryObject) cache.withKeepBinary().get(2);
assertEquals(2, (int) city.field("id"));
assertEquals("New York", (String) city.field("name"));
assertEquals(null, (String) city.field("state"));
}
cache.destroy();
}
use of org.apache.ignite.internal.processors.query.QueryField in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Execute DDL statement.
*
* @param sql SQL.
* @param prepared Prepared.
* @return Cursor on query results.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings({ "unchecked", "ThrowableResultOfMethodCallIgnored" })
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, Prepared prepared) throws IgniteCheckedException {
IgniteInternalFuture fut = null;
try {
GridSqlStatement stmt0 = new GridSqlQueryParser(false).parse(prepared);
if (stmt0 instanceof GridSqlCreateIndex) {
GridSqlCreateIndex cmd = (GridSqlCreateIndex) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
assert tbl.rowDescriptor() != null;
isDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd.index().getName());
newIdx.setIndexType(cmd.index().getIndexType());
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (Map.Entry<String, Boolean> e : cmd.index().getFields().entrySet()) {
GridQueryProperty prop = typeDesc.property(e.getKey());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, e.getKey());
flds.put(prop.name(), e.getValue());
}
newIdx.setFields(flds);
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd.ifNotExists(), 0);
} else if (stmt0 instanceof GridSqlDropIndex) {
GridSqlDropIndex cmd = (GridSqlDropIndex) stmt0;
GridH2Table tbl = idx.dataTableForIndex(cmd.schemaName(), cmd.indexName());
if (tbl != null) {
isDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd.schemaName(), cmd.indexName(), cmd.ifExists());
} else {
if (cmd.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd.indexName());
}
} else if (stmt0 instanceof GridSqlCreateTable) {
GridSqlCreateTable cmd = (GridSqlCreateTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("CREATE TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl != null) {
if (!cmd.ifNotExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_EXISTS, cmd.tableName());
} else {
QueryEntity e = toQueryEntity(cmd);
CacheConfiguration<?, ?> ccfg = new CacheConfiguration<>(cmd.tableName());
ccfg.setQueryEntities(Collections.singleton(e));
ccfg.setSqlSchema(cmd.schemaName());
SchemaOperationException err = QueryUtils.checkQueryEntityConflicts(ccfg, ctx.cache().cacheDescriptors().values());
if (err != null)
throw err;
ctx.query().dynamicTableCreate(cmd.schemaName(), e, cmd.templateName(), cmd.cacheName(), cmd.cacheGroup(), cmd.dataRegionName(), cmd.affinityKey(), cmd.atomicityMode(), cmd.writeSynchronizationMode(), cmd.backups(), cmd.ifNotExists());
}
} else if (stmt0 instanceof GridSqlDropTable) {
GridSqlDropTable cmd = (GridSqlDropTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("DROP TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else
ctx.query().dynamicTableDrop(tbl.cacheName(), cmd.tableName(), cmd.ifExists());
} else if (stmt0 instanceof GridSqlAlterTableAddColumn) {
GridSqlAlterTableAddColumn cmd = (GridSqlAlterTableAddColumn) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifTableExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot add column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<QueryField> cols = new ArrayList<>(cmd.columns().length);
boolean allFieldsNullable = true;
for (GridSqlColumn col : cmd.columns()) {
if (tbl.doesColumnExist(col.columnName())) {
if ((!cmd.ifNotExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_EXISTS, col.columnName());
} else {
cols = null;
break;
}
}
QueryField field = new QueryField(col.columnName(), DataType.getTypeClassName(col.column().getType()), col.column().isNullable(), col.defaultValue());
cols.add(field);
allFieldsNullable &= field.isNullable();
}
if (cols != null) {
assert tbl.rowDescriptor() != null;
if (!allFieldsNullable)
QueryUtils.checkNotNullAllowed(tbl.cache().config());
fut = ctx.query().dynamicColumnAdd(tbl.cacheName(), cmd.schemaName(), tbl.rowDescriptor().type().tableName(), cols, cmd.ifTableExists(), cmd.ifNotExists());
}
}
} else if (stmt0 instanceof GridSqlAlterTableDropColumn) {
GridSqlAlterTableDropColumn cmd = (GridSqlAlterTableDropColumn) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null && cmd.ifTableExists()) {
ctx.cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
assert tbl.rowDescriptor() != null;
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot drop column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<String> cols = new ArrayList<>(cmd.columns().length);
GridQueryTypeDescriptor type = tbl.rowDescriptor().type();
for (String colName : cmd.columns()) {
if (!tbl.doesColumnExist(colName)) {
if ((!cmd.ifExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, colName);
} else {
cols = null;
break;
}
}
SchemaOperationException err = QueryUtils.validateDropColumn(type, colName);
if (err != null)
throw err;
cols.add(colName);
}
if (cols != null) {
fut = ctx.query().dynamicColumnRemove(tbl.cacheName(), cmd.schemaName(), type.tableName(), cols, cmd.ifTableExists(), cmd.ifExists());
}
}
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(Collections.singletonList(Collections.singletonList(0L)), null, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
return resCur;
} catch (SchemaOperationException e) {
U.error(null, "DDL operation failure", e);
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.QueryField in project ignite by apache.
the class DynamicColumnsAbstractConcurrentSelfTest method checkClientReconnect.
/**
* Make sure that client receives schema changes made while it was disconnected, optionally with cache restart
* in the interim.
*
* @param restartCache Whether cache needs to be recreated during client's absence.
* @param dynamicCache Whether recreate, if needed, should be done on dynamic or static cache.
* @throws Exception If failed.
*/
private void checkClientReconnect(final boolean restartCache, boolean dynamicCache) throws Exception {
// Start complex topology.
final IgniteEx srv = ignitionStart(serverConfiguration(1));
ignitionStart(serverConfiguration(2));
ignitionStart(serverConfiguration(3, true));
final Ignite cli = ignitionStart(clientConfiguration(4));
if (dynamicCache) {
createSqlCache(cli);
run(cli, createSql);
}
final String schemaName = dynamicCache ? QueryUtils.DFLT_SCHEMA : "idx";
final QueryField[] cols = new QueryField[] { c("age", Integer.class.getName()), c("city", String.class.getName()) };
// Check index create.
reconnectClientNode(srv, cli, restartCache, dynamicCache, new RunnableX() {
@Override
public void run() throws Exception {
addCols(srv, schemaName, cols).get();
dropCols(srv, schemaName, "NAME").get();
}
});
checkTableState(srv, schemaName, TBL_NAME, cols);
}
Aggregations