Search in sources :

Example 26 with Connection

use of java.sql.Connection in project hive by apache.

the class TxnDbUtil method countLockComponents.

/**
   * A tool to count the number of partitions, tables,
   * and databases locked by a particular lockId.
   *
   * @param lockId lock id to look for lock components
   *
   * @return number of components, or 0 if there is no lock
   */
public static int countLockComponents(long lockId) throws Exception {
    Connection conn = null;
    PreparedStatement stmt = null;
    ResultSet rs = null;
    try {
        conn = getConnection();
        stmt = conn.prepareStatement("SELECT count(*) FROM hive_locks WHERE hl_lock_ext_id = ?");
        stmt.setLong(1, lockId);
        rs = stmt.executeQuery();
        if (!rs.next()) {
            return 0;
        }
        return rs.getInt(1);
    } finally {
        closeResources(conn, stmt, rs);
    }
}
Also used : Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement)

Example 27 with Connection

use of java.sql.Connection in project hive by apache.

the class CompactionTxnHandler method revokeTimedoutWorkers.

/**
   * This call will return all compaction queue
   * entries assigned to a worker but over the timeout back to the initiated state.
   * This should be called by the initiator on start up and occasionally when running to clean up
   * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
   * first.
   * @param timeout number of milliseconds since start time that should elapse before a worker is
   *                declared dead.
   */
@Override
@RetrySemantics.Idempotent
public void revokeTimedoutWorkers(long timeout) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            long latestValidStart = getDbTime(dbConn) - timeout;
            stmt = dbConn.createStatement();
            String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '" + INITIATED_STATE + "' where cq_state = '" + WORKING_STATE + "' and cq_start < " + latestValidStart;
            LOG.debug("Going to execute update <" + s + ">");
            // It isn't an error if the following returns no rows, as the local workers could have died
            // with  nothing assigned to them.
            stmt.executeUpdate(s);
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to change dead worker's records back to initiated state " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "revokeTimedoutWorkers(timeout:" + timeout + ")");
            throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeStmt(stmt);
            closeDbConn(dbConn);
        }
    } catch (RetryException e) {
        revokeTimedoutWorkers(timeout);
    }
}
Also used : SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 28 with Connection

use of java.sql.Connection in project hive by apache.

the class CompactionTxnHandler method markFailed.

/**
   * If there is an entry in compaction_queue with ci.id, remove it
   * Make entry in completed_compactions with status 'f'.
   * If there is no entry in compaction_queue, it means Initiator failed to even schedule a compaction,
   * which we record as ATTEMPTED_STATE entry in history.
   */
@Override
@RetrySemantics.CannotRetry
public void markFailed(CompactionInfo ci) throws MetaException {
    //todo: this should take "comment" as parameter to set in CC_META_INFO to provide some context for the failure
    try {
        Connection dbConn = null;
        Statement stmt = null;
        PreparedStatement pStmt = null;
        ResultSet rs = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = " + ci.id);
            if (rs.next()) {
                ci = CompactionInfo.loadFullFromCompactionQueue(rs);
                String s = "delete from COMPACTION_QUEUE where cq_id = " + ci.id;
                LOG.debug("Going to execute update <" + s + ">");
                int updCnt = stmt.executeUpdate(s);
            } else {
                if (ci.id > 0) {
                    //the record with valid CQ_ID has disappeared - this is a sign of something wrong
                    throw new IllegalStateException("No record with CQ_ID=" + ci.id + " found in COMPACTION_QUEUE");
                }
            }
            if (ci.id == 0) {
                //The failure occurred before we even made an entry in COMPACTION_QUEUE
                //generate ID so that we can make an entry in COMPLETED_COMPACTIONS
                ci.id = generateCompactionQueueId(stmt);
                //mostly this indicates that the Initiator is paying attention to some table even though
                //compactions are not happening.
                ci.state = ATTEMPTED_STATE;
                //this is not strictly accurate, but 'type' cannot be null.
                if (ci.type == null) {
                    ci.type = CompactionType.MINOR;
                }
                ci.start = getDbTime(dbConn);
            } else {
                ci.state = FAILED_STATE;
            }
            close(rs, stmt, null);
            pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
            CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
            int updCount = pStmt.executeUpdate();
            LOG.debug("Going to commit");
            closeStmt(pStmt);
            dbConn.commit();
        } catch (SQLException e) {
            LOG.warn("markFailed(" + ci.id + "):" + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            try {
                checkRetryable(dbConn, e, "markFailed(" + ci + ")");
            } catch (MetaException ex) {
                LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
            }
            LOG.error("markFailed(" + ci + ") failed: " + e.getMessage(), e);
        } finally {
            close(rs, stmt, null);
            close(null, pStmt, dbConn);
        }
    } catch (RetryException e) {
        markFailed(ci);
    }
}
Also used : SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 29 with Connection

use of java.sql.Connection in project hive by apache.

the class CompactionTxnHandler method findPotentialCompactions.

/**
   * This will look through the completed_txn_components table and look for partitions or tables
   * that may be ready for compaction.  Also, look through txns and txn_components tables for
   * aborted transactions that we should add to the list.
   * @param maxAborted Maximum number of aborted queries to allow before marking this as a
   *                   potential compaction.
   * @return list of CompactionInfo structs.  These will not have id, type,
   * or runAs set since these are only potential compactions not actual ones.
   */
@Override
@RetrySemantics.ReadOnly
public Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException {
    Connection dbConn = null;
    Set<CompactionInfo> response = new HashSet<CompactionInfo>();
    Statement stmt = null;
    ResultSet rs = null;
    try {
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            // Check for completed transactions
            String s = "select distinct ctc_database, ctc_table, " + "ctc_partition from COMPLETED_TXN_COMPONENTS";
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            while (rs.next()) {
                CompactionInfo info = new CompactionInfo();
                info.dbname = rs.getString(1);
                info.tableName = rs.getString(2);
                info.partName = rs.getString(3);
                response.add(info);
            }
            rs.close();
            // Check for aborted txns
            s = "select tc_database, tc_table, tc_partition " + "from TXNS, TXN_COMPONENTS " + "where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' " + "group by tc_database, tc_table, tc_partition " + "having count(*) > " + maxAborted;
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            while (rs.next()) {
                CompactionInfo info = new CompactionInfo();
                info.dbname = rs.getString(1);
                info.tableName = rs.getString(2);
                info.partName = rs.getString(3);
                info.tooManyAborts = true;
                response.add(info);
            }
            LOG.debug("Going to rollback");
            dbConn.rollback();
        } catch (SQLException e) {
            LOG.error("Unable to connect to transaction database " + e.getMessage());
            checkRetryable(dbConn, e, "findPotentialCompactions(maxAborted:" + maxAborted + ")");
        } finally {
            close(rs, stmt, dbConn);
        }
        return response;
    } catch (RetryException e) {
        return findPotentialCompactions(maxAborted);
    }
}
Also used : SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) HashSet(java.util.HashSet)

Example 30 with Connection

use of java.sql.Connection in project hive by apache.

the class CompactionTxnHandler method findReadyToClean.

/**
   * Find entries in the queue that are ready to
   * be cleaned.
   * @return information on the entry in the queue.
   */
@Override
@RetrySemantics.ReadOnly
public List<CompactionInfo> findReadyToClean() throws MetaException {
    Connection dbConn = null;
    List<CompactionInfo> rc = new ArrayList<CompactionInfo>();
    Statement stmt = null;
    ResultSet rs = null;
    try {
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            String s = "select cq_id, cq_database, cq_table, cq_partition, " + "cq_type, cq_run_as, cq_highest_txn_id from COMPACTION_QUEUE where cq_state = '" + READY_FOR_CLEANING + "'";
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            while (rs.next()) {
                CompactionInfo info = new CompactionInfo();
                info.id = rs.getLong(1);
                info.dbname = rs.getString(2);
                info.tableName = rs.getString(3);
                info.partName = rs.getString(4);
                switch(rs.getString(5).charAt(0)) {
                    case MAJOR_TYPE:
                        info.type = CompactionType.MAJOR;
                        break;
                    case MINOR_TYPE:
                        info.type = CompactionType.MINOR;
                        break;
                    default:
                        throw new MetaException("Unexpected compaction type " + rs.getString(5));
                }
                info.runAs = rs.getString(6);
                info.highestTxnId = rs.getLong(7);
                rc.add(info);
            }
            LOG.debug("Going to rollback");
            dbConn.rollback();
            return rc;
        } catch (SQLException e) {
            LOG.error("Unable to select next element for cleaning, " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "findReadyToClean");
            throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            close(rs, stmt, dbConn);
        }
    } catch (RetryException e) {
        return findReadyToClean();
    }
}
Also used : SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) ArrayList(java.util.ArrayList) ResultSet(java.sql.ResultSet) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Aggregations

Connection (java.sql.Connection)6326 PreparedStatement (java.sql.PreparedStatement)2793 ResultSet (java.sql.ResultSet)2657 Test (org.junit.Test)2455 SQLException (java.sql.SQLException)2267 Properties (java.util.Properties)1188 Statement (java.sql.Statement)1078 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)689 ArrayList (java.util.ArrayList)397 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)232 DataSource (javax.sql.DataSource)211 BaseTest (org.apache.phoenix.query.BaseTest)201 CallableStatement (java.sql.CallableStatement)192 IOException (java.io.IOException)158 Reader (java.io.Reader)144 DatabaseMetaData (java.sql.DatabaseMetaData)144 SqlSessionFactoryBuilder (org.apache.ibatis.session.SqlSessionFactoryBuilder)134 HashMap (java.util.HashMap)123 ScriptRunner (org.apache.ibatis.jdbc.ScriptRunner)114 Timestamp (java.sql.Timestamp)113