Search in sources :

Example 1 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project che by eclipse.

the class JpaTckModule method configure.

@Override
protected void configure() {
    H2DBTestServer server = H2DBTestServer.startDefault();
    install(new PersistTestModuleBuilder().setDriver(Driver.class).runningOn(server).addEntityClasses(UserImpl.class, ProfileImpl.class, PreferenceEntity.class, AccountImpl.class).setExceptionHandler(H2ExceptionHandler.class).build());
    bind(DBInitializer.class).asEagerSingleton();
    bind(SchemaInitializer.class).toInstance(new FlywaySchemaInitializer(server.getDataSource(), "che-schema"));
    bind(TckResourcesCleaner.class).toInstance(new H2JpaCleaner(server.getDataSource()));
    bind(new TypeLiteral<TckRepository<UserImpl>>() {
    }).to(UserJpaTckRepository.class);
    bind(new TypeLiteral<TckRepository<ProfileImpl>>() {
    }).toInstance(new JpaTckRepository<>(ProfileImpl.class));
    bind(new TypeLiteral<TckRepository<Pair<String, Map<String, String>>>>() {
    }).to(PreferenceJpaTckRepository.class);
    bind(UserDao.class).to(JpaUserDao.class);
    bind(ProfileDao.class).to(JpaProfileDao.class);
    bind(PreferenceDao.class).to(JpaPreferenceDao.class);
    // SHA-512 encryptor is faster than PBKDF2 so it is better for testing
    bind(PasswordEncryptor.class).to(SHA512PasswordEncryptor.class).in(Singleton.class);
}
Also used : TckResourcesCleaner(org.eclipse.che.commons.test.tck.TckResourcesCleaner) H2DBTestServer(org.eclipse.che.commons.test.db.H2DBTestServer) AccountImpl(org.eclipse.che.account.spi.AccountImpl) Driver(org.h2.Driver) PreferenceDao(org.eclipse.che.api.user.server.spi.PreferenceDao) SHA512PasswordEncryptor(org.eclipse.che.security.SHA512PasswordEncryptor) H2JpaCleaner(org.eclipse.che.commons.test.db.H2JpaCleaner) PersistTestModuleBuilder(org.eclipse.che.commons.test.db.PersistTestModuleBuilder) SchemaInitializer(org.eclipse.che.core.db.schema.SchemaInitializer) FlywaySchemaInitializer(org.eclipse.che.core.db.schema.impl.flyway.FlywaySchemaInitializer) FlywaySchemaInitializer(org.eclipse.che.core.db.schema.impl.flyway.FlywaySchemaInitializer) ProfileDao(org.eclipse.che.api.user.server.spi.ProfileDao) TypeLiteral(com.google.inject.TypeLiteral) UserDao(org.eclipse.che.api.user.server.spi.UserDao) ProfileImpl(org.eclipse.che.api.user.server.model.impl.ProfileImpl) DBInitializer(org.eclipse.che.core.db.DBInitializer) UserImpl(org.eclipse.che.api.user.server.model.impl.UserImpl) Pair(org.eclipse.che.commons.lang.Pair)

Example 2 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project che by eclipse.

the class H2TestHelper method inMemoryDefault.

/**
     * Creates new default datasource to in memory database
     * with url {@value #DEFAULT_IN_MEMORY_DB_URL}.
     * Boots database if this is invoked first time, database
     * won't be shutdown until 'SHUTDOWN' query is executed
     * or {@link #shutdownDefault()} is called directly.
     *
     * @return datasource to the in memory database
     * @deprecated use {@link H2DBTestServer}.
     */
@Deprecated
public static DataSource inMemoryDefault() {
    final JdbcDataSource dataSource = new JdbcDataSource();
    dataSource.setUrl(DEFAULT_IN_MEMORY_DB_URL);
    return dataSource;
}
Also used : JdbcDataSource(org.h2.jdbcx.JdbcDataSource)

Example 3 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class DmlStatementsProcessor method streamUpdateQuery.

/**
     * Perform given statement against given data streamer. Only rows based INSERT and MERGE are supported
     * as well as key bound UPDATE and DELETE (ones with filter {@code WHERE _key = ?}).
     *
     * @param streamer Streamer to feed data to.
     * @param stmt Statement.
     * @param args Statement arguments.
     * @return Number of rows in given statement for INSERT and MERGE, {@code 1} otherwise.
     * @throws IgniteCheckedException if failed.
     */
@SuppressWarnings({ "unchecked", "ConstantConditions" })
long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Object[] args) throws IgniteCheckedException {
    args = U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY);
    Prepared p = GridSqlQueryParser.prepared(stmt);
    assert p != null;
    UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, null);
    if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name()))
        throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
    if (plan.mode == UpdateMode.INSERT && plan.rowsNum > 0) {
        assert plan.isLocSubqry;
        final GridCacheContext cctx = plan.tbl.rowDescriptor().context();
        QueryCursorImpl<List<?>> cur;
        final ArrayList<List<?>> data = new ArrayList<>(plan.rowsNum);
        final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQry, F.asList(args), null, false, 0, null);
        QueryCursorImpl<List<?>> stepCur = new QueryCursorImpl<>(new Iterable<List<?>>() {

            @Override
            public Iterator<List<?>> iterator() {
                try {
                    return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), cctx.keepBinary());
                } catch (IgniteCheckedException e) {
                    throw new IgniteException(e);
                }
            }
        }, null);
        data.addAll(stepCur.getAll());
        cur = new QueryCursorImpl<>(new Iterable<List<?>>() {

            @Override
            public Iterator<List<?>> iterator() {
                return data.iterator();
            }
        }, null);
        if (plan.rowsNum == 1) {
            IgniteBiTuple t = rowToKeyValue(cctx, cur.iterator().next(), plan);
            streamer.addData(t.getKey(), t.getValue());
            return 1;
        }
        Map<Object, Object> rows = new LinkedHashMap<>(plan.rowsNum);
        for (List<?> row : cur) {
            final IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
            rows.put(t.getKey(), t.getValue());
        }
        streamer.addData(rows);
        return rows.size();
    } else
        throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) Prepared(org.h2.command.Prepared) ArrayList(java.util.ArrayList) QueryCursorImpl(org.apache.ignite.internal.processors.cache.QueryCursorImpl) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) GridQueryFieldsResult(org.apache.ignite.internal.processors.query.GridQueryFieldsResult) LinkedHashMap(java.util.LinkedHashMap) GridBoundedConcurrentLinkedHashMap(org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) IgniteSingletonIterator(org.apache.ignite.internal.util.lang.IgniteSingletonIterator) Iterator(java.util.Iterator) List(java.util.List) ArrayList(java.util.ArrayList) BinaryObject(org.apache.ignite.binary.BinaryObject) UpdatePlan(org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan)

Example 4 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class IgniteH2Indexing method queryDistributedSqlFields.

/** {@inheritDoc} */
@Override
public FieldsQueryCursor<List<?>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId) {
    final String sqlQry = qry.getSql();
    Connection c = connectionForSchema(schemaName);
    final boolean enforceJoinOrder = qry.isEnforceJoinOrder();
    final boolean distributedJoins = qry.isDistributedJoins();
    final boolean grpByCollocated = qry.isCollocated();
    final DistributedJoinMode distributedJoinMode = distributedJoinMode(qry.isLocal(), distributedJoins);
    GridCacheTwoStepQuery twoStepQry = null;
    List<GridQueryFieldMetadata> meta;
    final H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, distributedJoins, enforceJoinOrder, qry.isLocal());
    H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey);
    if (cachedQry != null) {
        twoStepQry = cachedQry.query().copy();
        meta = cachedQry.meta();
    } else {
        final UUID locNodeId = ctx.localNodeId();
        // Here we will just parse the statement, no need to optimize it at all.
        H2Utils.setupConnection(c, /*distributedJoins*/
        false, /*enforceJoinOrder*/
        true);
        GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE).distributedJoinMode(distributedJoinMode));
        PreparedStatement stmt = null;
        Prepared prepared;
        boolean cachesCreated = false;
        try {
            try {
                while (true) {
                    try {
                        // Do not cache this statement because the whole query object will be cached later on.
                        stmt = prepareStatement(c, sqlQry, false);
                        break;
                    } catch (SQLException e) {
                        if (!cachesCreated && (e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1)) {
                            try {
                                ctx.cache().createMissingQueryCaches();
                            } catch (IgniteCheckedException ignored) {
                                throw new CacheException("Failed to create missing caches.", e);
                            }
                            cachesCreated = true;
                        } else
                            throw new IgniteSQLException("Failed to parse query: " + sqlQry, IgniteQueryErrorCode.PARSING, e);
                    }
                }
                prepared = GridSqlQueryParser.prepared(stmt);
                if (qry instanceof JdbcSqlFieldsQuery && ((JdbcSqlFieldsQuery) qry).isQuery() != prepared.isQuery())
                    throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", IgniteQueryErrorCode.STMT_TYPE_MISMATCH);
                if (prepared.isQuery()) {
                    bindParameters(stmt, F.asList(qry.getArgs()));
                    twoStepQry = GridSqlQuerySplitter.split((JdbcPreparedStatement) stmt, qry.getArgs(), grpByCollocated, distributedJoins, enforceJoinOrder, this);
                    assert twoStepQry != null;
                }
            } finally {
                GridH2QueryContext.clearThreadLocal();
            }
            // It is a DML statement if we did not create a twoStepQuery.
            if (twoStepQry == null) {
                if (DmlStatementsProcessor.isDmlStatement(prepared)) {
                    try {
                        return dmlProc.updateSqlFieldsDistributed(schemaName, stmt, qry, cancel);
                    } catch (IgniteCheckedException e) {
                        throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
                    }
                }
                if (DdlStatementsProcessor.isDdlStatement(prepared)) {
                    try {
                        return ddlProc.runDdlStatement(sqlQry, stmt);
                    } catch (IgniteCheckedException e) {
                        throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
                    }
                }
            }
            LinkedHashSet<Integer> caches0 = new LinkedHashSet<>();
            assert twoStepQry != null;
            int tblCnt = twoStepQry.tablesCount();
            if (mainCacheId != null)
                caches0.add(mainCacheId);
            if (tblCnt > 0) {
                for (QueryTable tblKey : twoStepQry.tables()) {
                    GridH2Table tbl = dataTable(tblKey);
                    int cacheId = CU.cacheId(tbl.cacheName());
                    caches0.add(cacheId);
                }
            }
            if (caches0.isEmpty())
                twoStepQry.local(true);
            else {
                //Prohibit usage indices with different numbers of segments in same query.
                List<Integer> cacheIds = new ArrayList<>(caches0);
                checkCacheIndexSegmentation(cacheIds);
                twoStepQry.cacheIds(cacheIds);
                twoStepQry.local(qry.isLocal());
            }
            meta = H2Utils.meta(stmt.getMetaData());
        } catch (IgniteCheckedException e) {
            throw new CacheException("Failed to bind parameters: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
        } catch (SQLException e) {
            throw new IgniteSQLException(e);
        } finally {
            U.close(stmt, log);
        }
    }
    if (log.isDebugEnabled())
        log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry);
    twoStepQry.pageSize(qry.getPageSize());
    if (cancel == null)
        cancel = new GridQueryCancel();
    int[] partitions = qry.getPartitions();
    if (partitions == null && twoStepQry.derivedPartitions() != null) {
        try {
            partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), qry.getArgs());
        } catch (IgniteCheckedException e) {
            throw new CacheException("Failed to calculate derived partitions: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
        }
    }
    QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(runQueryTwoStep(schemaName, twoStepQry, keepBinary, enforceJoinOrder, qry.getTimeout(), cancel, qry.getArgs(), partitions), cancel);
    cursor.fieldsMeta(meta);
    if (cachedQry == null && !twoStepQry.explain()) {
        cachedQry = new H2TwoStepCachedQuery(meta, twoStepQry.copy());
        twoStepCache.putIfAbsent(cachedQryKey, cachedQry);
    }
    return cursor;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) CacheException(javax.cache.CacheException) Prepared(org.h2.command.Prepared) ArrayList(java.util.ArrayList) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) IgniteSystemProperties.getString(org.apache.ignite.IgniteSystemProperties.getString) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) JdbcPreparedStatement(org.h2.jdbc.JdbcPreparedStatement) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext) Connection(java.sql.Connection) PreparedStatement(java.sql.PreparedStatement) JdbcPreparedStatement(org.h2.jdbc.JdbcPreparedStatement) QueryCursorImpl(org.apache.ignite.internal.processors.cache.QueryCursorImpl) QueryTable(org.apache.ignite.internal.processors.cache.query.QueryTable) IgniteSystemProperties.getInteger(org.apache.ignite.IgniteSystemProperties.getInteger) DistributedJoinMode(org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridQueryCancel(org.apache.ignite.internal.processors.query.GridQueryCancel) JdbcSqlFieldsQuery(org.apache.ignite.internal.jdbc2.JdbcSqlFieldsQuery)

Example 5 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class GridSqlQuerySplitter method split.

/**
     * @param stmt Prepared statement.
     * @param params Parameters.
     * @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
     * @param distributedJoins If distributed joins enabled.
     * @param enforceJoinOrder Enforce join order.
     * @param h2 Indexing.
     * @return Two step query.
     * @throws SQLException If failed.
     * @throws IgniteCheckedException If failed.
     */
public static GridCacheTwoStepQuery split(JdbcPreparedStatement stmt, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
    if (params == null)
        params = GridCacheSqlQuery.EMPTY_PARAMS;
    // Here we will just do initial query parsing. Do not use optimized
    // subqueries because we do not have unique FROM aliases yet.
    GridSqlQuery qry = parse(prepared(stmt), false);
    String originalSql = qry.getSQL();
    final boolean explain = qry.explain();
    qry.explain(false);
    GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
    // Normalization will generate unique aliases for all the table filters in FROM.
    // Also it will collect all tables and schemas from the query.
    splitter.normalizeQuery(qry);
    Connection conn = stmt.getConnection();
    // Here we will have correct normalized AST with optimized join order.
    // The distributedJoins parameter is ignored because it is not relevant for
    // the REDUCE query optimization.
    qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
    // Do the actual query split. We will update the original query AST, need to be careful.
    splitter.splitQuery(qry);
    // We must have at least one map query.
    assert !F.isEmpty(splitter.mapSqlQrys) : "map";
    // We must have a reduce query.
    assert splitter.rdcSqlQry != null : "rdc";
    // distributed joins at all.
    if (distributedJoins) {
        boolean allCollocated = true;
        for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
            Prepared prepared = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
            allCollocated &= isCollocated((Query) prepared);
            mapSqlQry.query(parse(prepared, true).getSQL());
        }
        // We do not need distributed joins if all MAP queries are collocated.
        if (allCollocated)
            distributedJoins = false;
    }
    // Setup resulting two step query and return it.
    GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
    twoStepQry.reduceQuery(splitter.rdcSqlQry);
    for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
    twoStepQry.skipMergeTable(splitter.rdcQrySimple);
    twoStepQry.explain(explain);
    twoStepQry.distributedJoins(distributedJoins);
    // all map queries must have non-empty derivedPartitions to use this feature.
    twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
    return twoStepQry;
}
Also used : GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) Query(org.h2.command.dml.Query) Connection(java.sql.Connection) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)

Aggregations

SQLException (java.sql.SQLException)63 Connection (java.sql.Connection)59 DbException (org.h2.message.DbException)56 PreparedStatement (java.sql.PreparedStatement)54 ResultSet (java.sql.ResultSet)47 Statement (java.sql.Statement)44 Value (org.h2.value.Value)40 IOException (java.io.IOException)39 ByteArrayInputStream (java.io.ByteArrayInputStream)30 InputStream (java.io.InputStream)29 Column (org.h2.table.Column)24 ArrayList (java.util.ArrayList)23 SimpleResultSet (org.h2.tools.SimpleResultSet)23 Random (java.util.Random)19 Expression (org.h2.expression.Expression)18 JdbcConnection (org.h2.jdbc.JdbcConnection)18 Index (org.h2.index.Index)16 ValueString (org.h2.value.ValueString)16 ByteArrayOutputStream (java.io.ByteArrayOutputStream)15 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)15