use of org.h2.command.dml.Replace in project ignite by apache.
the class DdlStatementsProcessor method runDdlStatement.
/**
* Execute DDL statement.
*
* @param sql SQL.
* @param stmt H2 statement to parse and execute.
*/
@SuppressWarnings("unchecked")
public FieldsQueryCursor<List<?>> runDdlStatement(String sql, PreparedStatement stmt) throws IgniteCheckedException {
assert stmt instanceof JdbcPreparedStatement;
IgniteInternalFuture fut = null;
try {
GridSqlStatement stmt0 = new GridSqlQueryParser(false).parse(GridSqlQueryParser.prepared(stmt));
if (stmt0 instanceof GridSqlCreateIndex) {
GridSqlCreateIndex cmd = (GridSqlCreateIndex) stmt0;
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
assert tbl.rowDescriptor() != null;
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd.index().getName());
newIdx.setIndexType(cmd.index().getIndexType());
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (Map.Entry<String, Boolean> e : cmd.index().getFields().entrySet()) {
GridQueryProperty prop = typeDesc.property(e.getKey());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, e.getKey());
flds.put(prop.name(), e.getValue());
}
newIdx.setFields(flds);
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd.ifNotExists());
} else if (stmt0 instanceof GridSqlDropIndex) {
GridSqlDropIndex cmd = (GridSqlDropIndex) stmt0;
GridH2Table tbl = idx.dataTableForIndex(cmd.schemaName(), cmd.indexName());
if (tbl != null) {
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd.schemaName(), cmd.indexName(), cmd.ifExists());
} else {
if (cmd.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd.indexName());
}
} else if (stmt0 instanceof GridSqlCreateTable) {
GridSqlCreateTable cmd = (GridSqlCreateTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("CREATE TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl != null) {
if (!cmd.ifNotExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_EXISTS, cmd.tableName());
} else {
ctx.query().dynamicTableCreate(cmd.schemaName(), toQueryEntity(cmd), cmd.templateName(), cmd.atomicityMode(), cmd.backups(), cmd.ifNotExists());
}
} else if (stmt0 instanceof GridSqlDropTable) {
GridSqlDropTable cmd = (GridSqlDropTable) stmt0;
if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
throw new SchemaOperationException("DROP TABLE can only be executed on " + QueryUtils.DFLT_SCHEMA + " schema.");
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
if (!cmd.ifExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else
ctx.query().dynamicTableDrop(tbl.cacheName(), cmd.tableName(), cmd.ifExists());
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(Collections.singletonList(Collections.singletonList(0L)), null, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
return resCur;
} catch (SchemaOperationException e) {
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException("Unexpected DLL operation failure: " + e.getMessage(), e);
}
}
use of org.h2.command.dml.Replace in project ignite by apache.
the class GridH2SpatialIndex method put.
/**
* {@inheritDoc}
*/
@Override
public GridH2Row put(GridH2Row row) {
assert row instanceof GridH2KeyValueRowOnheap : "requires key to be at 0";
Lock l = lock.writeLock();
l.lock();
try {
checkClosed();
Value key = row.getValue(KEY_COL);
assert key != null;
final int seg = segmentForRow(row);
Long rowId = keyToId.get(key);
if (rowId != null) {
Long oldRowId = segments[seg].remove(getEnvelope(idToRow.get(rowId), rowId));
assert rowId.equals(oldRowId);
} else {
rowId = ++rowIds;
keyToId.put(key, rowId);
}
GridH2Row old = idToRow.put(rowId, row);
segments[seg].put(getEnvelope(row, rowId), rowId);
if (old == null)
// No replace.
rowCnt++;
return old;
} finally {
l.unlock();
}
}
use of org.h2.command.dml.Replace in project ignite by apache.
the class GridReduceQueryExecutor method partitionedUnstableDataNodes.
/**
* Calculates partition mapping for partitioned cache on unstable topology.
*
* @param cacheIds Cache IDs.
* @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
*/
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
// If the main cache is replicated, just replace it with the first partitioned.
GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
final int partsCnt = cctx.affinity().partitions();
if (cacheIds.size() > 1) {
// Check correct number of partitions for partitioned caches.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
int parts = extraCctx.affinity().partitions();
if (parts != partsCnt)
throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
}
}
Set<ClusterNode>[] partLocs = new Set[partsCnt];
// Fill partition locations for main cache.
for (int p = 0; p < partsCnt; p++) {
List<ClusterNode> owners = cctx.topology().owners(p);
if (F.isEmpty(owners)) {
// Handle special case: no mapping is configured for a partition.
if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
// Mark unmapped partition.
partLocs[p] = UNMAPPED_PARTS;
continue;
} else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
}
partLocs[p] = new HashSet<>(owners);
}
if (cacheIds.size() > 1) {
// We need this for logical collocation between different partitioned caches with the same affinity.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
// This is possible if we have replaced a replicated cache with a partitioned one earlier.
if (cctx == extraCctx)
continue;
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
List<ClusterNode> owners = extraCctx.topology().owners(p);
if (partLocs[p] == UNMAPPED_PARTS)
// Skip unmapped partitions.
continue;
if (F.isEmpty(owners)) {
if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
}
if (partLocs[p] == null)
partLocs[p] = new HashSet<>(owners);
else {
// Intersection of owners.
partLocs[p].retainAll(owners);
if (partLocs[p].isEmpty())
// Intersection is empty -> retry.
return null;
}
}
}
// Filter nodes where not all the replicated caches loaded.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (!extraCctx.isReplicated())
continue;
Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
if (F.isEmpty(dataNodes))
// Retry.
return null;
for (Set<ClusterNode> partLoc : partLocs) {
if (partLoc == UNMAPPED_PARTS)
// Skip unmapped partition.
continue;
partLoc.retainAll(dataNodes);
if (partLoc.isEmpty())
// Retry.
return null;
}
}
}
// Collect the final partitions mapping.
Map<ClusterNode, IntArray> res = new HashMap<>();
// Here partitions in all IntArray's will be sorted in ascending order, this is important.
for (int p = 0; p < partLocs.length; p++) {
Set<ClusterNode> pl = partLocs[p];
// Skip unmapped partitions.
if (pl == UNMAPPED_PARTS)
continue;
assert !F.isEmpty(pl) : pl;
ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
IntArray parts = res.get(n);
if (parts == null)
res.put(n, parts = new IntArray());
parts.add(p);
}
return res;
}
use of org.h2.command.dml.Replace in project jackrabbit-oak by apache.
the class BnfSyntax method getLink.
/**
* Get the HTML link to the given token.
*
* @param bnf the BNF
* @param token the token
* @return the HTML link
*/
String getLink(Bnf bnf, String token) {
RuleHead found = null;
String key = Bnf.getRuleMapKey(token);
for (int i = 0; i < token.length(); i++) {
String test = StringUtils.toLowerEnglish(key.substring(i));
RuleHead r = bnf.getRuleHead(test);
if (r != null) {
found = r;
break;
}
}
if (found == null) {
return token;
}
if (found.getRule() instanceof RuleFixed) {
found.getRule().accept(this);
return html;
}
String link = found.getTopic().toLowerCase().replace(' ', '_');
link = "#" + StringUtils.urlEncode(link);
return "<a href=\"" + link + "\">" + token + "</a>";
}
use of org.h2.command.dml.Replace in project h2database by h2database.
the class Parser method parseCreate.
private Prepared parseCreate() {
boolean orReplace = false;
if (readIf("OR")) {
read("REPLACE");
orReplace = true;
}
boolean force = readIf("FORCE");
if (readIf("VIEW")) {
return parseCreateView(force, orReplace);
} else if (readIf("ALIAS")) {
return parseCreateFunctionAlias(force);
} else if (readIf("SEQUENCE")) {
return parseCreateSequence();
} else if (readIf("USER")) {
return parseCreateUser();
} else if (readIf("TRIGGER")) {
return parseCreateTrigger(force);
} else if (readIf("ROLE")) {
return parseCreateRole();
} else if (readIf("SCHEMA")) {
return parseCreateSchema();
} else if (readIf("CONSTANT")) {
return parseCreateConstant();
} else if (readIf("DOMAIN")) {
return parseCreateUserDataType();
} else if (readIf("TYPE")) {
return parseCreateUserDataType();
} else if (readIf("DATATYPE")) {
return parseCreateUserDataType();
} else if (readIf("AGGREGATE")) {
return parseCreateAggregate(force);
} else if (readIf("LINKED")) {
return parseCreateLinkedTable(false, false, force);
}
// tables or linked tables
boolean memory = false, cached = false;
if (readIf("MEMORY")) {
memory = true;
} else if (readIf("CACHED")) {
cached = true;
}
if (readIf("LOCAL")) {
read("TEMPORARY");
if (readIf("LINKED")) {
return parseCreateLinkedTable(true, false, force);
}
read("TABLE");
return parseCreateTable(true, false, cached);
} else if (readIf("GLOBAL")) {
read("TEMPORARY");
if (readIf("LINKED")) {
return parseCreateLinkedTable(true, true, force);
}
read("TABLE");
return parseCreateTable(true, true, cached);
} else if (readIf("TEMP") || readIf("TEMPORARY")) {
if (readIf("LINKED")) {
return parseCreateLinkedTable(true, true, force);
}
read("TABLE");
return parseCreateTable(true, true, cached);
} else if (readIf("TABLE")) {
if (!cached && !memory) {
cached = database.getDefaultTableType() == Table.TYPE_CACHED;
}
return parseCreateTable(false, false, cached);
} else if (readIf("SYNONYM")) {
return parseCreateSynonym(orReplace);
} else {
boolean hash = false, primaryKey = false;
boolean unique = false, spatial = false;
String indexName = null;
Schema oldSchema = null;
boolean ifNotExists = false;
if (readIf("PRIMARY")) {
read("KEY");
if (readIf("HASH")) {
hash = true;
}
primaryKey = true;
if (!isToken("ON")) {
ifNotExists = readIfNotExists();
indexName = readIdentifierWithSchema(null);
oldSchema = getSchema();
}
} else {
if (readIf("UNIQUE")) {
unique = true;
}
if (readIf("HASH")) {
hash = true;
}
if (readIf("SPATIAL")) {
spatial = true;
}
if (readIf("INDEX")) {
if (!isToken("ON")) {
ifNotExists = readIfNotExists();
indexName = readIdentifierWithSchema(null);
oldSchema = getSchema();
}
} else {
throw getSyntaxError();
}
}
read("ON");
String tableName = readIdentifierWithSchema();
checkSchema(oldSchema);
CreateIndex command = new CreateIndex(session, getSchema());
command.setIfNotExists(ifNotExists);
command.setPrimaryKey(primaryKey);
command.setTableName(tableName);
command.setUnique(unique);
command.setIndexName(indexName);
command.setComment(readCommentIf());
read("(");
command.setIndexColumns(parseIndexColumnList());
if (readIf("USING")) {
if (hash) {
throw getSyntaxError();
}
if (spatial) {
throw getSyntaxError();
}
if (readIf("BTREE")) {
// default
} else if (readIf("RTREE")) {
spatial = true;
} else if (readIf("HASH")) {
hash = true;
} else {
throw getSyntaxError();
}
}
command.setHash(hash);
command.setSpatial(spatial);
return command;
}
}
Aggregations