use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class UpdatePlan method processRow.
/**
* Convert a row into key-value pair.
*
* @param row Row to process.
* @throws IgniteCheckedException if failed.
*/
public IgniteBiTuple<?, ?> processRow(List<?> row) throws IgniteCheckedException {
if (mode != BULK_LOAD && row.size() != colNames.length)
throw new IgniteSQLException("Not enough values in a row: " + row.size() + " instead of " + colNames.length, IgniteQueryErrorCode.ENTRY_PROCESSING);
GridH2RowDescriptor rowDesc = tbl.rowDescriptor();
GridQueryTypeDescriptor desc = rowDesc.type();
GridCacheContext cctx = rowDesc.context();
Object key = keySupplier.apply(row);
if (QueryUtils.isSqlType(desc.keyClass())) {
assert keyColIdx != -1;
key = DmlUtils.convert(key, rowDesc, desc.keyClass(), colTypes[keyColIdx], colNames[keyColIdx]);
}
Object val = valSupplier.apply(row);
if (QueryUtils.isSqlType(desc.valueClass())) {
assert valColIdx != -1;
val = DmlUtils.convert(val, rowDesc, desc.valueClass(), colTypes[valColIdx], colNames[valColIdx]);
}
if (key == null) {
if (F.isEmpty(desc.keyFieldName()))
throw new IgniteSQLException("Key for INSERT, COPY, or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY);
else
throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", IgniteQueryErrorCode.NULL_KEY);
}
if (val == null) {
if (F.isEmpty(desc.valueFieldName()))
throw new IgniteSQLException("Value for INSERT, COPY, MERGE, or UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE);
else
throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", IgniteQueryErrorCode.NULL_VALUE);
}
int actualColCnt = Math.min(colNames.length, row.size());
Map<String, Object> newColVals = new HashMap<>();
for (int i = 0; i < actualColCnt; i++) {
if (i == keyColIdx || i == valColIdx)
continue;
String colName = colNames[i];
GridQueryProperty prop = desc.property(colName);
assert prop != null;
Class<?> expCls = prop.type();
newColVals.put(colName, DmlUtils.convert(row.get(i), rowDesc, expCls, colTypes[i], colNames[i]));
}
desc.setDefaults(key, val);
// We update columns in the order specified by the table for a reason - table's
// column order preserves their precedence for correct update of nested properties.
Column[] tblCols = tbl.getColumns();
// First 2 columns are _key and _val Skip 'em.
for (int i = QueryUtils.DEFAULT_COLUMNS_COUNT; i < tblCols.length; i++) {
if (tbl.rowDescriptor().isKeyValueOrVersionColumn(i))
continue;
String colName = tblCols[i].getName();
if (!newColVals.containsKey(colName))
continue;
Object colVal = newColVals.get(colName);
desc.setValue(colName, key, val, colVal);
}
if (cctx.binaryMarshaller()) {
if (key instanceof BinaryObjectBuilder)
key = ((BinaryObjectBuilder) key).build();
if (val instanceof BinaryObjectBuilder)
val = ((BinaryObjectBuilder) val).build();
}
desc.validateKeyAndValue(key, val);
return new IgniteBiTuple<>(key, val);
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class H2DynamicTableSelfTest method assertAffinityCacheConfiguration.
/**
* Check that dynamic cache created with {@code CREATE TABLE} is correctly configured affinity wise.
* @param cacheName Cache name to check.
* @param affKeyFieldName Expected affinity key field name.
*/
private void assertAffinityCacheConfiguration(String cacheName, String affKeyFieldName) {
String actualCacheName = cacheName(cacheName);
Collection<GridQueryTypeDescriptor> types = client().context().query().types(actualCacheName);
assertEquals(1, types.size());
GridQueryTypeDescriptor type = types.iterator().next();
assertTrue(type.name().startsWith(actualCacheName));
assertEquals(cacheName, type.tableName());
assertEquals(affKeyFieldName, type.affinityKey());
GridH2Table tbl = ((IgniteH2Indexing) queryProcessor(client()).getIndexing()).schemaManager().dataTable("PUBLIC", cacheName);
assertNotNull(tbl);
assertNotNull(tbl.getAffinityKeyColumn());
assertEquals(affKeyFieldName, tbl.getAffinityKeyColumn().columnName);
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class CommandProcessor method runCommandNativeDdl.
/**
* Run DDL statement.
*
* @param sql Original SQL.
* @param cmd Command.
*/
private void runCommandNativeDdl(String sql, SqlCommand cmd) {
IgniteInternalFuture fut = null;
try {
isDdlOnSchemaSupported(cmd.schemaName());
finishActiveTxIfNecessary();
if (cmd instanceof SqlCreateIndexCommand) {
SqlCreateIndexCommand cmd0 = (SqlCreateIndexCommand) cmd;
GridH2Table tbl = schemaMgr.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
assert tbl.rowDescriptor() != null;
ensureDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd0.indexName());
newIdx.setIndexType(cmd0.spatial() ? QueryIndexType.GEOSPATIAL : QueryIndexType.SORTED);
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (SqlIndexColumn col : cmd0.columns()) {
GridQueryProperty prop = typeDesc.property(col.name());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, col.name());
flds.put(prop.name(), !col.descending());
}
newIdx.setFields(flds);
newIdx.setInlineSize(cmd0.inlineSize());
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd0.ifNotExists(), cmd0.parallel());
} else if (cmd instanceof SqlDropIndexCommand) {
SqlDropIndexCommand cmd0 = (SqlDropIndexCommand) cmd;
GridH2Table tbl = schemaMgr.dataTableForIndex(cmd0.schemaName(), cmd0.indexName());
if (tbl != null) {
ensureDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd0.schemaName(), cmd0.indexName(), cmd0.ifExists());
} else {
if (cmd0.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd0.indexName());
}
} else if (cmd instanceof SqlAlterTableCommand) {
SqlAlterTableCommand cmd0 = (SqlAlterTableCommand) cmd;
GridH2Table tbl = schemaMgr.dataTable(cmd0.schemaName(), cmd0.tableName());
if (tbl == null) {
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
}
Boolean logging = cmd0.logging();
assert logging != null : "Only LOGGING/NOLOGGING are supported at the moment.";
IgniteCluster cluster = ctx.grid().cluster();
if (logging) {
boolean res = cluster.enableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already enabled for table: " + cmd0.tableName());
} else {
boolean res = cluster.disableWal(tbl.cacheName());
if (!res)
throw new IgniteSQLException("Logging already disabled for table: " + cmd0.tableName());
}
fut = new GridFinishedFuture();
} else if (cmd instanceof SqlCreateUserCommand) {
SqlCreateUserCommand addCmd = (SqlCreateUserCommand) cmd;
ctx.security().createUser(addCmd.userName(), addCmd.password().toCharArray());
} else if (cmd instanceof SqlAlterUserCommand) {
SqlAlterUserCommand altCmd = (SqlAlterUserCommand) cmd;
ctx.security().alterUser(altCmd.userName(), altCmd.password().toCharArray());
} else if (cmd instanceof SqlDropUserCommand) {
SqlDropUserCommand dropCmd = (SqlDropUserCommand) cmd;
ctx.security().dropUser(dropCmd.userName());
} else if (cmd instanceof SqlAnalyzeCommand)
processAnalyzeCommand((SqlAnalyzeCommand) cmd);
else if (cmd instanceof SqlRefreshStatitsicsCommand)
processRefreshStatisticsCommand((SqlRefreshStatitsicsCommand) cmd);
else if (cmd instanceof SqlDropStatisticsCommand)
processDropStatisticsCommand((SqlDropStatisticsCommand) cmd);
else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
} catch (SchemaOperationException e) {
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class H2Utils method unwrapKeyColumns.
/**
* Create list of index columns. Where possible _KEY columns will be unwrapped.
*
* @param tbl GridH2Table instance
* @param idxCols List of index columns.
*
* @return Array of key and affinity columns. Key's, if it possible, splitted into simple components.
*/
@NotNull
public static IndexColumn[] unwrapKeyColumns(GridH2Table tbl, IndexColumn[] idxCols) {
ArrayList<IndexColumn> keyCols = new ArrayList<>();
boolean isSql = tbl.rowDescriptor().tableDescriptor().sql();
if (!isSql)
return idxCols;
GridQueryTypeDescriptor type = tbl.rowDescriptor().type();
for (IndexColumn idxCol : idxCols) {
if (idxCol.column.getColumnId() == KEY_COL) {
if (QueryUtils.isSqlType(type.keyClass())) {
int altKeyColId = tbl.rowDescriptor().getAlternativeColumnId(QueryUtils.KEY_COL);
// Remap simple key to alternative column.
IndexColumn idxKeyCol = new IndexColumn();
idxKeyCol.column = tbl.getColumn(altKeyColId);
idxKeyCol.columnName = idxKeyCol.column.getName();
idxKeyCol.sortType = idxCol.sortType;
keyCols.add(idxKeyCol);
} else {
boolean added = false;
for (String propName : type.fields().keySet()) {
GridQueryProperty prop = type.property(propName);
if (prop.key()) {
added = true;
Column col = tbl.getColumn(propName);
keyCols.add(tbl.indexColumn(col.getColumnId(), SortOrder.ASCENDING));
}
}
// we have to fall back to whole-key index.
if (!added)
keyCols.add(idxCol);
}
} else
keyCols.add(idxCol);
}
return keyCols.toArray(new IndexColumn[0]);
}
use of org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor in project ignite by apache.
the class AbstractSchemaSelfTest method types.
/**
* Get available types on the given node for the given cache.
*
* @param node Node.
* @param cacheName Cache name.
* @return Map from table name to type.
*/
protected static Map<String, QueryTypeDescriptorImpl> types(IgniteEx node, String cacheName) {
Map<String, QueryTypeDescriptorImpl> res = new HashMap<>();
Collection<GridQueryTypeDescriptor> descs = node.context().query().types(cacheName);
for (GridQueryTypeDescriptor desc : descs) {
QueryTypeDescriptorImpl desc0 = (QueryTypeDescriptorImpl) desc;
res.put(desc0.tableName(), desc0);
}
return res;
}
Aggregations