use of org.h2.dev.util.BinaryArithmeticStream.In in project che by eclipse.
the class JpaTckModule method configure.
@Override
protected void configure() {
H2DBTestServer server = H2DBTestServer.startDefault();
install(new PersistTestModuleBuilder().setDriver(Driver.class).runningOn(server).addEntityClasses(UserImpl.class, ProfileImpl.class, PreferenceEntity.class, AccountImpl.class).setExceptionHandler(H2ExceptionHandler.class).build());
bind(DBInitializer.class).asEagerSingleton();
bind(SchemaInitializer.class).toInstance(new FlywaySchemaInitializer(server.getDataSource(), "che-schema"));
bind(TckResourcesCleaner.class).toInstance(new H2JpaCleaner(server.getDataSource()));
bind(new TypeLiteral<TckRepository<UserImpl>>() {
}).to(UserJpaTckRepository.class);
bind(new TypeLiteral<TckRepository<ProfileImpl>>() {
}).toInstance(new JpaTckRepository<>(ProfileImpl.class));
bind(new TypeLiteral<TckRepository<Pair<String, Map<String, String>>>>() {
}).to(PreferenceJpaTckRepository.class);
bind(UserDao.class).to(JpaUserDao.class);
bind(ProfileDao.class).to(JpaProfileDao.class);
bind(PreferenceDao.class).to(JpaPreferenceDao.class);
// SHA-512 encryptor is faster than PBKDF2 so it is better for testing
bind(PasswordEncryptor.class).to(SHA512PasswordEncryptor.class).in(Singleton.class);
}
use of org.h2.dev.util.BinaryArithmeticStream.In in project che by eclipse.
the class H2TestHelper method inMemoryDefault.
/**
* Creates new default datasource to in memory database
* with url {@value #DEFAULT_IN_MEMORY_DB_URL}.
* Boots database if this is invoked first time, database
* won't be shutdown until 'SHUTDOWN' query is executed
* or {@link #shutdownDefault()} is called directly.
*
* @return datasource to the in memory database
* @deprecated use {@link H2DBTestServer}.
*/
@Deprecated
public static DataSource inMemoryDefault() {
final JdbcDataSource dataSource = new JdbcDataSource();
dataSource.setUrl(DEFAULT_IN_MEMORY_DB_URL);
return dataSource;
}
use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.
the class DmlStatementsProcessor method streamUpdateQuery.
/**
* Perform given statement against given data streamer. Only rows based INSERT and MERGE are supported
* as well as key bound UPDATE and DELETE (ones with filter {@code WHERE _key = ?}).
*
* @param streamer Streamer to feed data to.
* @param stmt Statement.
* @param args Statement arguments.
* @return Number of rows in given statement for INSERT and MERGE, {@code 1} otherwise.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Object[] args) throws IgniteCheckedException {
args = U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY);
Prepared p = GridSqlQueryParser.prepared(stmt);
assert p != null;
UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, null);
if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name()))
throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (plan.mode == UpdateMode.INSERT && plan.rowsNum > 0) {
assert plan.isLocSubqry;
final GridCacheContext cctx = plan.tbl.rowDescriptor().context();
QueryCursorImpl<List<?>> cur;
final ArrayList<List<?>> data = new ArrayList<>(plan.rowsNum);
final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQry, F.asList(args), null, false, 0, null);
QueryCursorImpl<List<?>> stepCur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), cctx.keepBinary());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, null);
data.addAll(stepCur.getAll());
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
return data.iterator();
}
}, null);
if (plan.rowsNum == 1) {
IgniteBiTuple t = rowToKeyValue(cctx, cur.iterator().next(), plan);
streamer.addData(t.getKey(), t.getValue());
return 1;
}
Map<Object, Object> rows = new LinkedHashMap<>(plan.rowsNum);
for (List<?> row : cur) {
final IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
rows.put(t.getKey(), t.getValue());
}
streamer.addData(rows);
return rows.size();
} else
throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.
the class IgniteH2Indexing method queryDistributedSqlFields.
/** {@inheritDoc} */
@Override
public FieldsQueryCursor<List<?>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId) {
final String sqlQry = qry.getSql();
Connection c = connectionForSchema(schemaName);
final boolean enforceJoinOrder = qry.isEnforceJoinOrder();
final boolean distributedJoins = qry.isDistributedJoins();
final boolean grpByCollocated = qry.isCollocated();
final DistributedJoinMode distributedJoinMode = distributedJoinMode(qry.isLocal(), distributedJoins);
GridCacheTwoStepQuery twoStepQry = null;
List<GridQueryFieldMetadata> meta;
final H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, distributedJoins, enforceJoinOrder, qry.isLocal());
H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey);
if (cachedQry != null) {
twoStepQry = cachedQry.query().copy();
meta = cachedQry.meta();
} else {
final UUID locNodeId = ctx.localNodeId();
// Here we will just parse the statement, no need to optimize it at all.
H2Utils.setupConnection(c, /*distributedJoins*/
false, /*enforceJoinOrder*/
true);
GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE).distributedJoinMode(distributedJoinMode));
PreparedStatement stmt = null;
Prepared prepared;
boolean cachesCreated = false;
try {
try {
while (true) {
try {
// Do not cache this statement because the whole query object will be cached later on.
stmt = prepareStatement(c, sqlQry, false);
break;
} catch (SQLException e) {
if (!cachesCreated && (e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1)) {
try {
ctx.cache().createMissingQueryCaches();
} catch (IgniteCheckedException ignored) {
throw new CacheException("Failed to create missing caches.", e);
}
cachesCreated = true;
} else
throw new IgniteSQLException("Failed to parse query: " + sqlQry, IgniteQueryErrorCode.PARSING, e);
}
}
prepared = GridSqlQueryParser.prepared(stmt);
if (qry instanceof JdbcSqlFieldsQuery && ((JdbcSqlFieldsQuery) qry).isQuery() != prepared.isQuery())
throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", IgniteQueryErrorCode.STMT_TYPE_MISMATCH);
if (prepared.isQuery()) {
bindParameters(stmt, F.asList(qry.getArgs()));
twoStepQry = GridSqlQuerySplitter.split((JdbcPreparedStatement) stmt, qry.getArgs(), grpByCollocated, distributedJoins, enforceJoinOrder, this);
assert twoStepQry != null;
}
} finally {
GridH2QueryContext.clearThreadLocal();
}
// It is a DML statement if we did not create a twoStepQuery.
if (twoStepQry == null) {
if (DmlStatementsProcessor.isDmlStatement(prepared)) {
try {
return dmlProc.updateSqlFieldsDistributed(schemaName, stmt, qry, cancel);
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
if (DdlStatementsProcessor.isDdlStatement(prepared)) {
try {
return ddlProc.runDdlStatement(sqlQry, stmt);
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
}
}
}
LinkedHashSet<Integer> caches0 = new LinkedHashSet<>();
assert twoStepQry != null;
int tblCnt = twoStepQry.tablesCount();
if (mainCacheId != null)
caches0.add(mainCacheId);
if (tblCnt > 0) {
for (QueryTable tblKey : twoStepQry.tables()) {
GridH2Table tbl = dataTable(tblKey);
int cacheId = CU.cacheId(tbl.cacheName());
caches0.add(cacheId);
}
}
if (caches0.isEmpty())
twoStepQry.local(true);
else {
//Prohibit usage indices with different numbers of segments in same query.
List<Integer> cacheIds = new ArrayList<>(caches0);
checkCacheIndexSegmentation(cacheIds);
twoStepQry.cacheIds(cacheIds);
twoStepQry.local(qry.isLocal());
}
meta = H2Utils.meta(stmt.getMetaData());
} catch (IgniteCheckedException e) {
throw new CacheException("Failed to bind parameters: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
} catch (SQLException e) {
throw new IgniteSQLException(e);
} finally {
U.close(stmt, log);
}
}
if (log.isDebugEnabled())
log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry);
twoStepQry.pageSize(qry.getPageSize());
if (cancel == null)
cancel = new GridQueryCancel();
int[] partitions = qry.getPartitions();
if (partitions == null && twoStepQry.derivedPartitions() != null) {
try {
partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), qry.getArgs());
} catch (IgniteCheckedException e) {
throw new CacheException("Failed to calculate derived partitions: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(runQueryTwoStep(schemaName, twoStepQry, keepBinary, enforceJoinOrder, qry.getTimeout(), cancel, qry.getArgs(), partitions), cancel);
cursor.fieldsMeta(meta);
if (cachedQry == null && !twoStepQry.explain()) {
cachedQry = new H2TwoStepCachedQuery(meta, twoStepQry.copy());
twoStepCache.putIfAbsent(cachedQryKey, cachedQry);
}
return cursor;
}
use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.
the class GridSqlQuerySplitter method split.
/**
* @param stmt Prepared statement.
* @param params Parameters.
* @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
* @param distributedJoins If distributed joins enabled.
* @param enforceJoinOrder Enforce join order.
* @param h2 Indexing.
* @return Two step query.
* @throws SQLException If failed.
* @throws IgniteCheckedException If failed.
*/
public static GridCacheTwoStepQuery split(JdbcPreparedStatement stmt, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
if (params == null)
params = GridCacheSqlQuery.EMPTY_PARAMS;
// Here we will just do initial query parsing. Do not use optimized
// subqueries because we do not have unique FROM aliases yet.
GridSqlQuery qry = parse(prepared(stmt), false);
String originalSql = qry.getSQL();
final boolean explain = qry.explain();
qry.explain(false);
GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
// Normalization will generate unique aliases for all the table filters in FROM.
// Also it will collect all tables and schemas from the query.
splitter.normalizeQuery(qry);
Connection conn = stmt.getConnection();
// Here we will have correct normalized AST with optimized join order.
// The distributedJoins parameter is ignored because it is not relevant for
// the REDUCE query optimization.
qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
// Do the actual query split. We will update the original query AST, need to be careful.
splitter.splitQuery(qry);
// We must have at least one map query.
assert !F.isEmpty(splitter.mapSqlQrys) : "map";
// We must have a reduce query.
assert splitter.rdcSqlQry != null : "rdc";
// distributed joins at all.
if (distributedJoins) {
boolean allCollocated = true;
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
Prepared prepared = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
allCollocated &= isCollocated((Query) prepared);
mapSqlQry.query(parse(prepared, true).getSQL());
}
// We do not need distributed joins if all MAP queries are collocated.
if (allCollocated)
distributedJoins = false;
}
// Setup resulting two step query and return it.
GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
twoStepQry.reduceQuery(splitter.rdcSqlQry);
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
twoStepQry.skipMergeTable(splitter.rdcQrySimple);
twoStepQry.explain(explain);
twoStepQry.distributedJoins(distributedJoins);
// all map queries must have non-empty derivedPartitions to use this feature.
twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
return twoStepQry;
}
Aggregations