use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.
the class TestTxnUtils method testCustomRDBMS.
@Test
public void testCustomRDBMS() throws Exception {
MetastoreConf.setBoolVar(conf, ConfVars.USE_CUSTOM_RDBMS, true);
MetastoreConf.setVar(conf, ConfVars.CUSTOM_RDBMS_CLASSNAME, DummyCustomRDBMS.class.getName());
DatabaseProduct.reset();
DatabaseProduct db = DatabaseProduct.determineDatabaseProduct(DatabaseProduct.UNDEFINED_NAME, conf);
Assert.assertEquals(db.getHiveSchemaPostfix(), "DummyPostfix");
Assert.assertEquals(db.getDBTime(), "values current_timestamp");
Configuration c = db.getConf();
Assert.assertEquals(c.get("DummyKey"), "DummyValue");
// Cleanup conf
MetastoreConf.setBoolVar(conf, ConfVars.USE_CUSTOM_RDBMS, false);
MetastoreConf.setVar(conf, ConfVars.CUSTOM_RDBMS_CLASSNAME, "");
}
use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.
the class TestTxnDbUtil method prepDb.
/**
* Prepares the metastore database for unit tests.
* Runs the latest init schema against the database configured in the CONNECT_URL_KEY param.
* Ignores any duplication (table, index etc.) So it can be called multiple times for the same database.
* @param conf Metastore configuration
* @throws Exception Initialization failure
*/
public static synchronized void prepDb(Configuration conf) throws Exception {
LOG.info("Creating transactional tables");
Connection conn = null;
Statement stmt = null;
try {
conn = getConnection(conf);
String s = conn.getMetaData().getDatabaseProductName();
DatabaseProduct dbProduct = determineDatabaseProduct(s, conf);
stmt = conn.createStatement();
if (checkDbPrepared(stmt)) {
return;
}
String schemaRootPath = getSchemaRootPath();
IMetaStoreSchemaInfo metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, schemaRootPath, dbProduct.getHiveSchemaPostfix());
String initFile = metaStoreSchemaInfo.generateInitFileName(null);
try (InputStream is = new FileInputStream(metaStoreSchemaInfo.getMetaStoreScriptDir() + File.separator + initFile)) {
LOG.info("Reinitializing the metastore db with {} on the database {}", initFile, MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY));
importSQL(stmt, is);
}
} catch (SQLException e) {
try {
if (conn != null) {
conn.rollback();
}
} catch (SQLException re) {
LOG.error("Error rolling back: " + re.getMessage());
}
// This might be a deadlock, if so, let's retry
if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
LOG.warn("Caught deadlock, retrying db creation");
prepDb(conf);
} else {
throw e;
}
} finally {
deadlockCnt = 0;
closeResources(conn, stmt, null);
}
}
use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.
the class TestTxnDbUtil method truncateTable.
private static boolean truncateTable(Connection conn, Configuration conf, Statement stmt, String name) {
try {
String dbProduct = conn.getMetaData().getDatabaseProductName();
DatabaseProduct databaseProduct = determineDatabaseProduct(dbProduct, conf);
try {
// We can not use actual truncate due to some foreign keys, but we don't expect much data during tests
String s = databaseProduct.getTruncateStatement(name);
stmt.execute(s);
LOG.debug("Successfully truncated table " + name);
return true;
} catch (SQLException e) {
if (databaseProduct.isTableNotExistsError(e)) {
LOG.debug("Not truncating " + name + " because it doesn't exist");
return true;
}
LOG.error("Unable to truncate table " + name, e);
}
} catch (SQLException e) {
LOG.error("Unable determine database product ", e);
}
return false;
}
use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.
the class TestTxnDbUtil method getConnection.
public static Connection getConnection(Configuration conf) throws Exception {
String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
Driver driver = (Driver) Class.forName(jdbcDriver).newInstance();
Properties prop = new Properties();
String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY);
String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME);
String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
prop.setProperty("user", user);
prop.setProperty("password", passwd);
Connection conn = driver.connect(driverUrl, prop);
conn.setAutoCommit(true);
DatabaseProduct dbProduct = determineDatabaseProduct(conn.getMetaData().getDatabaseProductName(), conf);
String initSql = dbProduct.getPrepareTxnStmt();
if (initSql != null) {
try (Statement stmt = conn.createStatement()) {
stmt.execute(initSql);
}
}
return conn;
}
use of org.apache.hadoop.hive.metastore.DatabaseProduct in project hive by apache.
the class TestTxnDbUtil method cleanDb.
public static void cleanDb(Configuration conf) throws Exception {
LOG.info("Cleaning transactional tables");
boolean success = true;
Connection conn = null;
Statement stmt = null;
try {
conn = getConnection(conf);
stmt = conn.createStatement();
if (!checkDbPrepared(stmt)) {
// Nothing to clean
return;
}
// We want to try these, whether they succeed or fail.
success &= truncateTable(conn, conf, stmt, "TXN_COMPONENTS");
success &= truncateTable(conn, conf, stmt, "COMPLETED_TXN_COMPONENTS");
success &= truncateTable(conn, conf, stmt, "TXNS");
success &= truncateTable(conn, conf, stmt, "TXN_TO_WRITE_ID");
success &= truncateTable(conn, conf, stmt, "NEXT_WRITE_ID");
success &= truncateTable(conn, conf, stmt, "HIVE_LOCKS");
success &= truncateTable(conn, conf, stmt, "NEXT_LOCK_ID");
success &= truncateTable(conn, conf, stmt, "COMPACTION_QUEUE");
success &= truncateTable(conn, conf, stmt, "NEXT_COMPACTION_QUEUE_ID");
success &= truncateTable(conn, conf, stmt, "COMPLETED_COMPACTIONS");
success &= truncateTable(conn, conf, stmt, "AUX_TABLE");
success &= truncateTable(conn, conf, stmt, "WRITE_SET");
success &= truncateTable(conn, conf, stmt, "REPL_TXN_MAP");
success &= truncateTable(conn, conf, stmt, "MATERIALIZATION_REBUILD_LOCKS");
success &= truncateTable(conn, conf, stmt, "MIN_HISTORY_LEVEL");
success &= truncateTable(conn, conf, stmt, "COMPACTION_METRICS_CACHE");
try {
String dbProduct = conn.getMetaData().getDatabaseProductName();
DatabaseProduct databaseProduct = determineDatabaseProduct(dbProduct, conf);
try {
resetTxnSequence(databaseProduct, stmt);
stmt.executeUpdate("INSERT INTO \"NEXT_LOCK_ID\" VALUES(1)");
stmt.executeUpdate("INSERT INTO \"NEXT_COMPACTION_QUEUE_ID\" VALUES(1)");
} catch (SQLException e) {
if (!databaseProduct.isTableNotExistsError(e)) {
LOG.error("Error initializing sequence values", e);
success = false;
}
}
} catch (SQLException e) {
LOG.error("Unable determine database product ", e);
success = false;
}
/*
* Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
* table which are not txn related to generate primary key. So if these tables are dropped
* and other tables are not dropped, then it will create key duplicate error while inserting
* to other table.
*/
} finally {
closeResources(conn, stmt, null);
}
if (success) {
return;
}
throw new RuntimeException("Failed to clean up txn tables");
}
Aggregations