use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method doCreateUser.
private void doCreateUser() throws HiveMetaException {
testConnectionToMetastore();
System.out.println("Starting user creation");
String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
String protoCreateFile = metaStoreSchemaInfo.getCreateUserScript();
try {
File createFile = subUserAndPassword(scriptDir, protoCreateFile);
System.out.println("Creation script " + createFile.getAbsolutePath());
if (!dryRun) {
if ("oracle".equals(dbType))
oracleCreateUserHack(createFile);
else
runSqlLine(createFile.getParent(), createFile.getName());
System.out.println("User creation completed");
}
} catch (IOException e) {
throw new HiveMetaException("User creation FAILED!" + " Metastore unusable !!", e);
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method checkMetaStoreSkewedColumnsLocation.
private boolean checkMetaStoreSkewedColumnsLocation(Connection conn, URI[] defaultServers) throws HiveMetaException {
String skewedColLoc, skewedColIDRange;
boolean isValid = true;
int numOfInvalid = 0;
if (needsQuotedIdentifier) {
skewedColIDRange = "select max(\"STRING_LIST_ID_KID\"), min(\"STRING_LIST_ID_KID\") from \"SKEWED_COL_VALUE_LOC_MAP\" ";
} else {
skewedColIDRange = "select max(STRING_LIST_ID_KID), min(STRING_LIST_ID_KID) from SKEWED_COL_VALUE_LOC_MAP";
}
if (needsQuotedIdentifier) {
skewedColLoc = "select t.\"TBL_NAME\", t.\"TBL_ID\", sk.\"STRING_LIST_ID_KID\", sk.\"LOCATION\", db.\"NAME\", db.\"DB_ID\" " + " from \"TBLS\" t, \"SDS\" s, \"DBS\" db, \"SKEWED_COL_VALUE_LOC_MAP\" sk " + "where sk.\"SD_ID\" = s.\"SD_ID\" and s.\"SD_ID\" = t.\"SD_ID\" and t.\"DB_ID\" = db.\"DB_ID\" and " + "sk.\"STRING_LIST_ID_KID\" >= ? and sk.\"STRING_LIST_ID_KID\" <= ? order by t.\"TBL_ID\" ";
} else {
skewedColLoc = "select t.TBL_NAME, t.TBL_ID, sk.STRING_LIST_ID_KID, sk.LOCATION, db.NAME, db.DB_ID from TBLS t, SDS s, DBS db, SKEWED_COL_VALUE_LOC_MAP sk " + "where sk.SD_ID = s.SD_ID and s.SD_ID = t.SD_ID and t.DB_ID = db.DB_ID and sk.STRING_LIST_ID_KID >= ? and sk.STRING_LIST_ID_KID <= ? order by t.TBL_ID ";
}
long maxID = 0, minID = 0;
long rtnSize = 2000;
try {
Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(skewedColIDRange);
if (res.next()) {
maxID = res.getLong(1);
minID = res.getLong(2);
}
res.close();
stmt.close();
PreparedStatement pStmt = conn.prepareStatement(skewedColLoc);
while (minID <= maxID) {
pStmt.setLong(1, minID);
pStmt.setLong(2, minID + rtnSize);
res = pStmt.executeQuery();
while (res.next()) {
String locValue = res.getString(4);
String entity = "Database " + getNameOrID(res, 5, 6) + ", Table " + getNameOrID(res, 1, 2) + ", String list " + res.getString(3);
if (!checkLocation(entity, locValue, defaultServers)) {
numOfInvalid++;
}
}
res.close();
minID += rtnSize + 1;
}
pStmt.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to get skewed columns location info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
}
return isValid;
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method run.
public static int run(String[] args) {
LOG.debug("Going to run command: " + StringUtils.join(args, " "));
CommandLineParser parser = new GnuParser();
CommandLine line;
String dbType;
String schemaVer;
Options cmdLineOptions = new Options();
// Argument handling
initOptions(cmdLineOptions);
try {
line = parser.parse(cmdLineOptions, args);
} catch (ParseException e) {
logAndPrintToError("HiveSchemaTool:Parsing failed. Reason: " + e.getLocalizedMessage());
return usage(cmdLineOptions);
}
assert line != null;
if (line.hasOption("help")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("schemaTool", cmdLineOptions);
return 1;
}
if (line.hasOption("dbType")) {
dbType = line.getOptionValue("dbType");
if ((!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_DERBY) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MSSQL) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MYSQL) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_POSTGRACE) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_ORACLE))) {
logAndPrintToError("Unsupported dbType " + dbType);
return usage(cmdLineOptions);
}
} else {
logAndPrintToError("no dbType supplied");
return usage(cmdLineOptions);
}
System.setProperty(ConfVars.SCHEMA_VERIFICATION.toString(), "true");
try {
MetastoreSchemaTool schemaTool = new MetastoreSchemaTool(dbType);
if (line.hasOption("userName")) {
schemaTool.setUserName(line.getOptionValue("userName"));
} else {
schemaTool.setUserName(MetastoreConf.getVar(schemaTool.getConf(), ConfVars.CONNECTION_USER_NAME));
}
if (line.hasOption("passWord")) {
schemaTool.setPassWord(line.getOptionValue("passWord"));
} else {
try {
schemaTool.setPassWord(MetastoreConf.getPassword(schemaTool.getConf(), ConfVars.PWD));
} catch (IOException err) {
throw new HiveMetaException("Error getting metastore password", err);
}
}
if (line.hasOption("hiveUser")) {
schemaTool.setHiveUser(line.getOptionValue("hiveUser"));
}
if (line.hasOption("hivePassword")) {
schemaTool.setHivePasswd(line.getOptionValue("hivePassword"));
}
if (line.hasOption("hiveDb")) {
schemaTool.setHiveDb(line.getOptionValue("hiveDb"));
}
if (line.hasOption("url")) {
schemaTool.setUrl(line.getOptionValue("url"));
}
if (line.hasOption("driver")) {
schemaTool.setDriver(line.getOptionValue("driver"));
}
if (line.hasOption("dryRun")) {
schemaTool.setDryRun(true);
}
if (line.hasOption("verbose")) {
schemaTool.setVerbose(true);
}
if (line.hasOption("dbOpts")) {
schemaTool.setDbOpts(line.getOptionValue("dbOpts"));
}
if (line.hasOption("validate") && line.hasOption("servers")) {
schemaTool.setValidationServers(line.getOptionValue("servers"));
}
if (line.hasOption("info")) {
schemaTool.showInfo();
} else if (line.hasOption("upgradeSchema")) {
schemaTool.doUpgrade();
} else if (line.hasOption("upgradeSchemaFrom")) {
schemaVer = line.getOptionValue("upgradeSchemaFrom");
schemaTool.doUpgrade(schemaVer);
} else if (line.hasOption("initSchema")) {
schemaTool.doInit();
} else if (line.hasOption("initSchemaTo")) {
schemaVer = line.getOptionValue("initSchemaTo");
schemaTool.doInit(schemaVer);
} else if (line.hasOption("validate")) {
return schemaTool.doValidate();
} else if (line.hasOption("createUser")) {
schemaTool.doCreateUser();
} else {
logAndPrintToError("no valid option supplied");
return usage(cmdLineOptions);
}
} catch (HiveMetaException e) {
logAndPrintToError(e.getMessage());
if (e.getCause() != null) {
Throwable t = e.getCause();
logAndPrintToError("Underlying cause: " + t.getClass().getName() + " : " + t.getMessage());
if (e.getCause() instanceof SQLException) {
logAndPrintToError("SQL Error code: " + ((SQLException) t).getErrorCode());
}
}
if (line.hasOption("verbose")) {
e.printStackTrace();
} else {
logAndPrintToError("Use --verbose for detailed stacktrace.");
}
logAndPrintToError("*** schemaTool failed ***");
return 1;
}
System.out.println("schemaTool completed");
return 0;
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class TestSchemaToolForMetastore method testSchemaUpgrade.
/**
* Test schema upgrade
*/
@Test
public void testSchemaUpgrade() throws Exception {
boolean foundException = false;
// Initialize 1.2.0 schema
schemaTool.doInit("1.2.0");
// verify that driver fails due to older version schema
try {
schemaTool.verifySchemaVersion();
} catch (HiveMetaException e) {
// Expected to fail due to old schema
foundException = true;
}
if (!foundException) {
throw new Exception("Hive operations shouldn't pass with older version schema");
}
// Generate dummy pre-upgrade script with errors
String invalidPreUpgradeScript = writeDummyPreUpgradeScript(0, "upgrade-2.3.0-to-3.0.0.derby.sql", "foo bar;");
// Generate dummy pre-upgrade scripts with valid SQL
String validPreUpgradeScript0 = writeDummyPreUpgradeScript(1, "upgrade-2.3.0-to-3.0.0.derby.sql", "CREATE TABLE schema_test0 (id integer);");
String validPreUpgradeScript1 = writeDummyPreUpgradeScript(2, "upgrade-2.3.0-to-3.0.0.derby.sql", "CREATE TABLE schema_test1 (id integer);");
// Capture system out and err
schemaTool.setVerbose(true);
OutputStream stderr = new ByteArrayOutputStream();
PrintStream errPrintStream = new PrintStream(stderr);
System.setErr(errPrintStream);
OutputStream stdout = new ByteArrayOutputStream();
PrintStream outPrintStream = new PrintStream(stdout);
System.setOut(outPrintStream);
// Upgrade schema from 0.7.0 to latest
schemaTool.doUpgrade("1.2.0");
LOG.info("stdout is " + stdout.toString());
LOG.info("stderr is " + stderr.toString());
// Verify that the schemaTool ran pre-upgrade scripts and ignored errors
Assert.assertTrue(stderr.toString().contains(invalidPreUpgradeScript));
Assert.assertTrue(stderr.toString().contains("foo"));
Assert.assertFalse(stderr.toString().contains(validPreUpgradeScript0));
Assert.assertFalse(stderr.toString().contains(validPreUpgradeScript1));
Assert.assertTrue(stdout.toString().contains(validPreUpgradeScript0));
Assert.assertTrue(stdout.toString().contains(validPreUpgradeScript1));
// Verify that driver works fine with latest schema
schemaTool.verifySchemaVersion();
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class TestSchemaToolForMetastore method validateMetastoreDbPropertiesTable.
private void validateMetastoreDbPropertiesTable() throws HiveMetaException, IOException {
boolean isValid = schemaTool.validateSchemaTables(conn);
Assert.assertTrue(isValid);
// adding same property key twice should throw unique key constraint violation exception
String[] scripts = new String[] { "insert into METASTORE_DB_PROPERTIES values ('guid', 'test-uuid-1', 'dummy uuid 1')", "insert into METASTORE_DB_PROPERTIES values ('guid', 'test-uuid-2', 'dummy uuid 2')" };
File scriptFile = generateTestScript(scripts);
Exception ex = null;
try {
schemaTool.runSqlLine(scriptFile.getPath());
} catch (Exception iox) {
ex = iox;
}
Assert.assertTrue(ex != null && ex instanceof IOException);
}
Aggregations