use of org.h2.engine.Mode in project ignite by apache.
the class GridH2ValueCacheObject method compareSecure.
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
protected int compareSecure(Value v, CompareMode mode) {
Object o1 = getObject();
Object o2 = v.getObject();
boolean o1Comparable = o1 instanceof Comparable;
boolean o2Comparable = o2 instanceof Comparable;
if (o1Comparable && o2Comparable && Utils.haveCommonComparableSuperclass(o1.getClass(), o2.getClass())) {
Comparable<Object> c1 = (Comparable<Object>) o1;
return c1.compareTo(o2);
}
// Group by types.
if (o1.getClass() != o2.getClass()) {
if (o1Comparable != o2Comparable)
return o1Comparable ? -1 : 1;
return o1.getClass().getName().compareTo(o2.getClass().getName());
}
// Compare hash codes.
int h1 = hashCode();
int h2 = v.hashCode();
if (h1 == h2) {
if (o1.equals(o2))
return 0;
return Utils.compareNotNullSigned(getBytesNoCopy(), v.getBytesNoCopy());
}
return h1 > h2 ? 1 : -1;
}
use of org.h2.engine.Mode in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
* @param lazy Streaming flag.
*/
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
if (lazy && MapQueryLazyWorker.currentWorker() == null) {
// Lazy queries must be re-submitted to dedicated workers.
MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
worker.submit(new Runnable() {
@Override
public void run() {
onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
}
});
if (lazyWorkerBusyLock.enterBusy()) {
try {
MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
if (oldWorker != null)
oldWorker.stop();
IgniteThread thread = new IgniteThread(worker);
thread.start();
} finally {
lazyWorkerBusyLock.leaveBusy();
}
} else
log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
return;
}
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
// Unregister lazy worker because re-try may never reach this node again.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.cancelled()) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
// All request results are in the memory in result set already, so it's ok to release partitions.
if (!lazy)
releaseReservations();
} catch (Throwable e) {
releaseReservations();
throw e;
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
// Unregister worker after possible cancellation.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.h2.engine.Mode in project jdbi by jdbi.
the class TestTransactional method setUp.
@Before
public void setUp() throws Exception {
final JdbcDataSource ds = new JdbcDataSource() {
private static final long serialVersionUID = 1L;
@Override
public Connection getConnection() throws SQLException {
final Connection real = super.getConnection();
return (Connection) Proxy.newProxyInstance(real.getClass().getClassLoader(), new Class<?>[] { Connection.class }, new TxnIsolationCheckingInvocationHandler(real));
}
};
// in MVCC mode h2 doesn't shut down immediately on all connections closed, so need random db name
ds.setURL(String.format("jdbc:h2:mem:%s;MVCC=TRUE", UUID.randomUUID()));
db = Jdbi.create(ds);
db.installPlugin(new SqlObjectPlugin());
db.registerRowMapper(new SomethingMapper());
handle = db.open();
handle.execute("create table something (id int primary key, name varchar(100))");
}
use of org.h2.engine.Mode in project h2database by h2database.
the class Parser method parseColumnForTable.
private Column parseColumnForTable(String columnName, boolean defaultNullable) {
Column column;
boolean isIdentity = readIf("IDENTITY");
if (isIdentity || readIf("BIGSERIAL")) {
// Check if any of them are disallowed in the current Mode
if (isIdentity && database.getMode().disallowedTypes.contains("IDENTITY")) {
throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, currentToken);
}
column = new Column(columnName, Value.LONG);
column.setOriginalSQL("IDENTITY");
parseAutoIncrement(column);
// PostgreSQL compatibility
if (!database.getMode().serialColumnIsNotPK) {
column.setPrimaryKey(true);
}
} else if (readIf("SERIAL")) {
column = new Column(columnName, Value.INT);
column.setOriginalSQL("SERIAL");
parseAutoIncrement(column);
// PostgreSQL compatibility
if (!database.getMode().serialColumnIsNotPK) {
column.setPrimaryKey(true);
}
} else {
column = parseColumnWithType(columnName);
}
if (readIf("INVISIBLE")) {
column.setVisible(false);
} else if (readIf("VISIBLE")) {
column.setVisible(true);
}
NullConstraintType nullConstraint = parseNotNullConstraint();
switch(nullConstraint) {
case NULL_IS_ALLOWED:
column.setNullable(true);
break;
case NULL_IS_NOT_ALLOWED:
column.setNullable(false);
break;
case NO_NULL_CONSTRAINT_FOUND:
// domains may be defined as not nullable
column.setNullable(defaultNullable & column.isNullable());
break;
default:
throw DbException.get(ErrorCode.UNKNOWN_MODE_1, "Internal Error - unhandled case: " + nullConstraint.name());
}
if (readIf("AS")) {
if (isIdentity) {
getSyntaxError();
}
Expression expr = readExpression();
column.setComputedExpression(expr);
} else if (readIf("DEFAULT")) {
Expression defaultExpression = readExpression();
column.setDefaultExpression(session, defaultExpression);
} else if (readIf("GENERATED")) {
if (!readIf("ALWAYS")) {
read("BY");
read("DEFAULT");
}
read("AS");
read("IDENTITY");
long start = 1, increment = 1;
if (readIf("(")) {
read("START");
readIf("WITH");
start = readLong();
readIf(",");
if (readIf("INCREMENT")) {
readIf("BY");
increment = readLong();
}
read(")");
}
column.setPrimaryKey(true);
column.setAutoIncrement(true, start, increment);
}
if (readIf("ON")) {
read("UPDATE");
Expression onUpdateExpression = readExpression();
column.setOnUpdateExpression(session, onUpdateExpression);
}
if (NullConstraintType.NULL_IS_NOT_ALLOWED == parseNotNullConstraint()) {
column.setNullable(false);
}
if (readIf("AUTO_INCREMENT") || readIf("BIGSERIAL") || readIf("SERIAL")) {
parseAutoIncrement(column);
parseNotNullConstraint();
} else if (readIf("IDENTITY")) {
parseAutoIncrement(column);
column.setPrimaryKey(true);
parseNotNullConstraint();
}
if (readIf("NULL_TO_DEFAULT")) {
column.setConvertNullToDefault(true);
}
if (readIf("SEQUENCE")) {
Sequence sequence = readSequence();
column.setSequence(sequence);
}
if (readIf("SELECTIVITY")) {
int value = readPositiveInt();
column.setSelectivity(value);
}
String comment = readCommentIf();
if (comment != null) {
column.setComment(comment);
}
return column;
}
use of org.h2.engine.Mode in project h2database by h2database.
the class Parser method parseColumnWithType.
private Column parseColumnWithType(String columnName) {
String original = currentToken;
boolean regular = false;
int originalScale = -1;
if (readIf("LONG")) {
if (readIf("RAW")) {
original += " RAW";
}
} else if (readIf("DOUBLE")) {
if (readIf("PRECISION")) {
original += " PRECISION";
}
} else if (readIf("CHARACTER")) {
if (readIf("VARYING")) {
original += " VARYING";
}
} else if (readIf("TIME")) {
if (readIf("(")) {
originalScale = readPositiveInt();
if (originalScale > ValueTime.MAXIMUM_SCALE) {
throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(originalScale));
}
read(")");
}
if (readIf("WITHOUT")) {
read("TIME");
read("ZONE");
original += " WITHOUT TIME ZONE";
}
} else if (readIf("TIMESTAMP")) {
if (readIf("(")) {
originalScale = readPositiveInt();
// Allow non-standard TIMESTAMP(..., ...) syntax
if (readIf(",")) {
originalScale = readPositiveInt();
}
if (originalScale > ValueTimestamp.MAXIMUM_SCALE) {
throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(originalScale));
}
read(")");
}
if (readIf("WITH")) {
read("TIME");
read("ZONE");
original += " WITH TIME ZONE";
} else if (readIf("WITHOUT")) {
read("TIME");
read("ZONE");
original += " WITHOUT TIME ZONE";
}
} else {
regular = true;
}
long precision = -1;
int displaySize = -1;
String[] enumerators = null;
int scale = -1;
String comment = null;
Column templateColumn = null;
DataType dataType;
if (!identifiersToUpper) {
original = StringUtils.toUpperEnglish(original);
}
UserDataType userDataType = database.findUserDataType(original);
if (userDataType != null) {
templateColumn = userDataType.getColumn();
dataType = DataType.getDataType(templateColumn.getType());
comment = templateColumn.getComment();
original = templateColumn.getOriginalSQL();
precision = templateColumn.getPrecision();
displaySize = templateColumn.getDisplaySize();
scale = templateColumn.getScale();
enumerators = templateColumn.getEnumerators();
} else {
Mode mode = database.getMode();
dataType = DataType.getTypeByName(original, mode);
if (dataType == null || mode.disallowedTypes.contains(original)) {
throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, currentToken);
}
}
if (database.getIgnoreCase() && dataType.type == Value.STRING && !equalsToken("VARCHAR_CASESENSITIVE", original)) {
original = "VARCHAR_IGNORECASE";
dataType = DataType.getTypeByName(original, database.getMode());
}
if (regular) {
read();
}
precision = precision == -1 ? dataType.defaultPrecision : precision;
displaySize = displaySize == -1 ? dataType.defaultDisplaySize : displaySize;
scale = scale == -1 ? dataType.defaultScale : scale;
if (dataType.supportsPrecision || dataType.supportsScale) {
int t = dataType.type;
if (t == Value.TIME || t == Value.TIMESTAMP || t == Value.TIMESTAMP_TZ) {
if (originalScale >= 0) {
scale = originalScale;
switch(t) {
case Value.TIME:
if (original.equals("TIME WITHOUT TIME ZONE")) {
original = "TIME(" + originalScale + ") WITHOUT TIME ZONE";
} else {
original = original + '(' + originalScale + ')';
}
precision = displaySize = ValueTime.getDisplaySize(originalScale);
break;
case Value.TIMESTAMP:
if (original.equals("TIMESTAMP WITHOUT TIME ZONE")) {
original = "TIMESTAMP(" + originalScale + ") WITHOUT TIME ZONE";
} else {
original = original + '(' + originalScale + ')';
}
precision = displaySize = ValueTimestamp.getDisplaySize(originalScale);
break;
case Value.TIMESTAMP_TZ:
original = "TIMESTAMP(" + originalScale + ") WITH TIME ZONE";
precision = displaySize = ValueTimestampTimeZone.getDisplaySize(originalScale);
break;
}
}
} else if (readIf("(")) {
if (!readIf("MAX")) {
long p = readLong();
if (readIf("K")) {
p *= 1024;
} else if (readIf("M")) {
p *= 1024 * 1024;
} else if (readIf("G")) {
p *= 1024 * 1024 * 1024;
}
if (p > Long.MAX_VALUE) {
p = Long.MAX_VALUE;
}
original += "(" + p;
// Oracle syntax
if (!readIf("CHAR")) {
readIf("BYTE");
}
if (dataType.supportsScale) {
if (readIf(",")) {
scale = readInt();
original += ", " + scale;
} else {
scale = 0;
}
}
precision = p;
displaySize = MathUtils.convertLongToInt(precision);
original += ")";
}
read(")");
}
} else if (dataType.type == Value.DOUBLE && original.equals("FLOAT")) {
if (readIf("(")) {
int p = readPositiveInt();
read(")");
if (p > 53) {
throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(p));
}
if (p <= 24) {
dataType = DataType.getDataType(Value.FLOAT);
}
original = original + '(' + p + ')';
}
} else if (dataType.type == Value.ENUM) {
if (readIf("(")) {
java.util.List<String> enumeratorList = new ArrayList<>();
original += '(';
String enumerator0 = readString();
enumeratorList.add(enumerator0);
original += "'" + enumerator0 + "'";
while (readIfMore(true)) {
original += ',';
String enumeratorN = readString();
original += "'" + enumeratorN + "'";
enumeratorList.add(enumeratorN);
}
original += ')';
enumerators = enumeratorList.toArray(new String[0]);
}
try {
ValueEnum.check(enumerators);
} catch (DbException e) {
throw e.addSQL(original);
}
} else if (readIf("(")) {
// Support for MySQL: INT(11), MEDIUMINT(8) and so on.
// Just ignore the precision.
readPositiveInt();
read(")");
}
if (readIf("FOR")) {
read("BIT");
read("DATA");
if (dataType.type == Value.STRING) {
dataType = DataType.getTypeByName("BINARY", database.getMode());
}
}
// MySQL compatibility
readIf("UNSIGNED");
int type = dataType.type;
if (scale > precision) {
throw DbException.get(ErrorCode.INVALID_VALUE_SCALE_PRECISION, Integer.toString(scale), Long.toString(precision));
}
Column column = new Column(columnName, type, precision, scale, displaySize, enumerators);
if (templateColumn != null) {
column.setNullable(templateColumn.isNullable());
column.setDefaultExpression(session, templateColumn.getDefaultExpression());
int selectivity = templateColumn.getSelectivity();
if (selectivity != Constants.SELECTIVITY_DEFAULT) {
column.setSelectivity(selectivity);
}
Expression checkConstraint = templateColumn.getCheckConstraint(session, columnName);
column.addCheckConstraint(session, checkConstraint);
}
column.setComment(comment);
column.setOriginalSQL(original);
return column;
}
Aggregations