Search in sources :

Example 1 with DatabaseProduct

use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.

the class TestTxnUtils method testCustomRDBMS.

@Test
public void testCustomRDBMS() throws Exception {
    MetastoreConf.setBoolVar(conf, ConfVars.USE_CUSTOM_RDBMS, true);
    MetastoreConf.setVar(conf, ConfVars.CUSTOM_RDBMS_CLASSNAME, DummyCustomRDBMS.class.getName());
    DatabaseProduct.reset();
    DatabaseProduct db = DatabaseProduct.determineDatabaseProduct(DatabaseProduct.UNDEFINED_NAME, conf);
    Assert.assertEquals(db.getHiveSchemaPostfix(), "DummyPostfix");
    Assert.assertEquals(db.getDBTime(), "values current_timestamp");
    Configuration c = db.getConf();
    Assert.assertEquals(c.get("DummyKey"), "DummyValue");
    // Cleanup conf
    MetastoreConf.setBoolVar(conf, ConfVars.USE_CUSTOM_RDBMS, false);
    MetastoreConf.setVar(conf, ConfVars.CUSTOM_RDBMS_CLASSNAME, "");
}
Also used : DummyCustomRDBMS(org.apache.hadoop.hive.metastore.DummyCustomRDBMS) Configuration(org.apache.hadoop.conf.Configuration) DatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct) Test(org.junit.Test) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)

Example 2 with DatabaseProduct

use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.

the class TestTxnDbUtil method prepDb.

/**
 * Prepares the metastore database for unit tests.
 * Runs the latest init schema against the database configured in the CONNECT_URL_KEY param.
 * Ignores any duplication (table, index etc.) So it can be called multiple times for the same database.
 * @param conf Metastore configuration
 * @throws Exception Initialization failure
 */
public static synchronized void prepDb(Configuration conf) throws Exception {
    LOG.info("Creating transactional tables");
    Connection conn = null;
    Statement stmt = null;
    try {
        conn = getConnection(conf);
        String s = conn.getMetaData().getDatabaseProductName();
        DatabaseProduct dbProduct = determineDatabaseProduct(s, conf);
        stmt = conn.createStatement();
        if (checkDbPrepared(stmt)) {
            return;
        }
        String schemaRootPath = getSchemaRootPath();
        IMetaStoreSchemaInfo metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, schemaRootPath, dbProduct.getHiveSchemaPostfix());
        String initFile = metaStoreSchemaInfo.generateInitFileName(null);
        try (InputStream is = new FileInputStream(metaStoreSchemaInfo.getMetaStoreScriptDir() + File.separator + initFile)) {
            LOG.info("Reinitializing the metastore db with {} on the database {}", initFile, MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY));
            importSQL(stmt, is);
        }
    } catch (SQLException e) {
        try {
            if (conn != null) {
                conn.rollback();
            }
        } catch (SQLException re) {
            LOG.error("Error rolling back: " + re.getMessage());
        }
        // This might be a deadlock, if so, let's retry
        if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
            LOG.warn("Caught deadlock, retrying db creation");
            prepDb(conf);
        } else {
            throw e;
        }
    } finally {
        deadlockCnt = 0;
        closeResources(conn, stmt, null);
    }
}
Also used : SQLTransactionRollbackException(java.sql.SQLTransactionRollbackException) SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) Connection(java.sql.Connection) IMetaStoreSchemaInfo(org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo) FileInputStream(java.io.FileInputStream) DatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct) DatabaseProduct.determineDatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct.determineDatabaseProduct)

Example 3 with DatabaseProduct

use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.

the class TestTxnDbUtil method truncateTable.

private static boolean truncateTable(Connection conn, Configuration conf, Statement stmt, String name) {
    try {
        String dbProduct = conn.getMetaData().getDatabaseProductName();
        DatabaseProduct databaseProduct = determineDatabaseProduct(dbProduct, conf);
        try {
            // We can not use actual truncate due to some foreign keys, but we don't expect much data during tests
            String s = databaseProduct.getTruncateStatement(name);
            stmt.execute(s);
            LOG.debug("Successfully truncated table " + name);
            return true;
        } catch (SQLException e) {
            if (databaseProduct.isTableNotExistsError(e)) {
                LOG.debug("Not truncating " + name + " because it doesn't exist");
                return true;
            }
            LOG.error("Unable to truncate table " + name, e);
        }
    } catch (SQLException e) {
        LOG.error("Unable determine database product ", e);
    }
    return false;
}
Also used : SQLException(java.sql.SQLException) DatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct) DatabaseProduct.determineDatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct.determineDatabaseProduct)

Example 4 with DatabaseProduct

use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.

the class TestTxnDbUtil method getConnection.

public static Connection getConnection(Configuration conf) throws Exception {
    String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
    Driver driver = (Driver) Class.forName(jdbcDriver).newInstance();
    Properties prop = new Properties();
    String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY);
    String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME);
    String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
    prop.setProperty("user", user);
    prop.setProperty("password", passwd);
    Connection conn = driver.connect(driverUrl, prop);
    conn.setAutoCommit(true);
    DatabaseProduct dbProduct = determineDatabaseProduct(conn.getMetaData().getDatabaseProductName(), conf);
    String initSql = dbProduct.getPrepareTxnStmt();
    if (initSql != null) {
        try (Statement stmt = conn.createStatement()) {
            stmt.execute(initSql);
        }
    }
    return conn;
}
Also used : PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) Driver(java.sql.Driver) Properties(java.util.Properties) DatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct) DatabaseProduct.determineDatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct.determineDatabaseProduct)

Example 5 with DatabaseProduct

use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.

the class TestTxnDbUtil method cleanDb.

public static void cleanDb(Configuration conf) throws Exception {
    LOG.info("Cleaning transactional tables");
    boolean success = true;
    Connection conn = null;
    Statement stmt = null;
    try {
        conn = getConnection(conf);
        stmt = conn.createStatement();
        if (!checkDbPrepared(stmt)) {
            // Nothing to clean
            return;
        }
        // We want to try these, whether they succeed or fail.
        success &= truncateTable(conn, conf, stmt, "TXN_COMPONENTS");
        success &= truncateTable(conn, conf, stmt, "COMPLETED_TXN_COMPONENTS");
        success &= truncateTable(conn, conf, stmt, "TXNS");
        success &= truncateTable(conn, conf, stmt, "TXN_TO_WRITE_ID");
        success &= truncateTable(conn, conf, stmt, "NEXT_WRITE_ID");
        success &= truncateTable(conn, conf, stmt, "HIVE_LOCKS");
        success &= truncateTable(conn, conf, stmt, "NEXT_LOCK_ID");
        success &= truncateTable(conn, conf, stmt, "COMPACTION_QUEUE");
        success &= truncateTable(conn, conf, stmt, "NEXT_COMPACTION_QUEUE_ID");
        success &= truncateTable(conn, conf, stmt, "COMPLETED_COMPACTIONS");
        success &= truncateTable(conn, conf, stmt, "AUX_TABLE");
        success &= truncateTable(conn, conf, stmt, "WRITE_SET");
        success &= truncateTable(conn, conf, stmt, "REPL_TXN_MAP");
        success &= truncateTable(conn, conf, stmt, "MATERIALIZATION_REBUILD_LOCKS");
        success &= truncateTable(conn, conf, stmt, "MIN_HISTORY_LEVEL");
        success &= truncateTable(conn, conf, stmt, "COMPACTION_METRICS_CACHE");
        try {
            String dbProduct = conn.getMetaData().getDatabaseProductName();
            DatabaseProduct databaseProduct = determineDatabaseProduct(dbProduct, conf);
            try {
                resetTxnSequence(databaseProduct, stmt);
                stmt.executeUpdate("INSERT INTO \"NEXT_LOCK_ID\" VALUES(1)");
                stmt.executeUpdate("INSERT INTO \"NEXT_COMPACTION_QUEUE_ID\" VALUES(1)");
            } catch (SQLException e) {
                if (!databaseProduct.isTableNotExistsError(e)) {
                    LOG.error("Error initializing sequence values", e);
                    success = false;
                }
            }
        } catch (SQLException e) {
            LOG.error("Unable determine database product ", e);
            success = false;
        }
    /*
       * Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
       * table which are not txn related to generate primary key. So if these tables are dropped
       *  and other tables are not dropped, then it will create key duplicate error while inserting
       *  to other table.
       */
    } finally {
        closeResources(conn, stmt, null);
    }
    if (success) {
        return;
    }
    throw new RuntimeException("Failed to clean up txn tables");
}
Also used : SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) DatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct) DatabaseProduct.determineDatabaseProduct(org.apache.hadoop.hive.metastore.DatabaseProduct.determineDatabaseProduct)

Aggregations

DatabaseProduct (org.apache.hadoop.hive.metastore.DatabaseProduct)8 DatabaseProduct.determineDatabaseProduct (org.apache.hadoop.hive.metastore.DatabaseProduct.determineDatabaseProduct)5 SQLException (java.sql.SQLException)4 Connection (java.sql.Connection)3 PreparedStatement (java.sql.PreparedStatement)3 Statement (java.sql.Statement)3 Map (java.util.Map)2 Properties (java.util.Properties)2 HikariConfig (com.zaxxer.hikari.HikariConfig)1 HikariDataSource (com.zaxxer.hikari.HikariDataSource)1 FileInputStream (java.io.FileInputStream)1 InputStream (java.io.InputStream)1 Driver (java.sql.Driver)1 SQLTransactionRollbackException (java.sql.SQLTransactionRollbackException)1 BasicDataSource (org.apache.commons.dbcp2.BasicDataSource)1 ConnectionFactory (org.apache.commons.dbcp2.ConnectionFactory)1 DataSourceConnectionFactory (org.apache.commons.dbcp2.DataSourceConnectionFactory)1 PoolableConnectionFactory (org.apache.commons.dbcp2.PoolableConnectionFactory)1 PoolingDataSource (org.apache.commons.dbcp2.PoolingDataSource)1 GenericObjectPool (org.apache.commons.pool2.impl.GenericObjectPool)1