use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class HiveSchemaTool method checkMetaStoreTableLocation.
private boolean checkMetaStoreTableLocation(Connection conn, URI[] defaultServers) throws HiveMetaException {
String tabLoc, tabIDRange;
boolean isValid = true;
int numOfInvalid = 0;
if (needsQuotedIdentifier) {
tabIDRange = "select max(\"TBL_ID\"), min(\"TBL_ID\") from \"TBLS\" ";
} else {
tabIDRange = "select max(TBL_ID), min(TBL_ID) from TBLS";
}
if (needsQuotedIdentifier) {
tabLoc = "select tbl.\"TBL_ID\", tbl.\"TBL_NAME\", sd.\"LOCATION\", dbt.\"DB_ID\", dbt.\"NAME\" from \"TBLS\" tbl inner join " + "\"SDS\" sd on tbl.\"SD_ID\" = sd.\"SD_ID\" and tbl.\"TBL_TYPE\" != '" + TableType.VIRTUAL_VIEW + "' and tbl.\"TBL_ID\" >= ? and tbl.\"TBL_ID\"<= ? " + "inner join \"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" order by tbl.\"TBL_ID\" ";
} else {
tabLoc = "select tbl.TBL_ID, tbl.TBL_NAME, sd.LOCATION, dbt.DB_ID, dbt.NAME from TBLS tbl join SDS sd on tbl.SD_ID = sd.SD_ID and tbl.TBL_TYPE !='" + TableType.VIRTUAL_VIEW + "' and tbl.TBL_ID >= ? and tbl.TBL_ID <= ? inner join DBS dbt on tbl.DB_ID = dbt.DB_ID order by tbl.TBL_ID";
}
long maxID = 0, minID = 0;
long rtnSize = 2000;
try {
Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(tabIDRange);
if (res.next()) {
maxID = res.getLong(1);
minID = res.getLong(2);
}
res.close();
stmt.close();
PreparedStatement pStmt = conn.prepareStatement(tabLoc);
while (minID <= maxID) {
pStmt.setLong(1, minID);
pStmt.setLong(2, minID + rtnSize);
res = pStmt.executeQuery();
while (res.next()) {
String locValue = res.getString(3);
String entity = "Database " + getNameOrID(res, 5, 4) + ", Table " + getNameOrID(res, 2, 1);
if (!checkLocation(entity, locValue, defaultServers)) {
numOfInvalid++;
}
}
res.close();
minID += rtnSize + 1;
}
pStmt.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to get Table Location Info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
}
return isValid;
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class HiveSchemaTool method validateSequences.
boolean validateSequences(Connection conn) throws HiveMetaException {
Map<String, Pair<String, String>> seqNameToTable = new ImmutableMap.Builder<String, Pair<String, String>>().put("MDatabase", Pair.of("DBS", "DB_ID")).put("MRole", Pair.of("ROLES", "ROLE_ID")).put("MGlobalPrivilege", Pair.of("GLOBAL_PRIVS", "USER_GRANT_ID")).put("MTable", Pair.of("TBLS", "TBL_ID")).put("MStorageDescriptor", Pair.of("SDS", "SD_ID")).put("MSerDeInfo", Pair.of("SERDES", "SERDE_ID")).put("MColumnDescriptor", Pair.of("CDS", "CD_ID")).put("MTablePrivilege", Pair.of("TBL_PRIVS", "TBL_GRANT_ID")).put("MTableColumnStatistics", Pair.of("TAB_COL_STATS", "CS_ID")).put("MPartition", Pair.of("PARTITIONS", "PART_ID")).put("MPartitionColumnStatistics", Pair.of("PART_COL_STATS", "CS_ID")).put("MFunction", Pair.of("FUNCS", "FUNC_ID")).put("MIndex", Pair.of("IDXS", "INDEX_ID")).put("MStringList", Pair.of("SKEWED_STRING_LIST", "STRING_LIST_ID")).build();
System.out.println("Validating sequence number for SEQUENCE_TABLE");
boolean isValid = true;
try {
Statement stmt = conn.createStatement();
for (String seqName : seqNameToTable.keySet()) {
String tableName = seqNameToTable.get(seqName).getLeft();
String tableKey = seqNameToTable.get(seqName).getRight();
String fullSequenceName = "org.apache.hadoop.hive.metastore.model." + seqName;
String seqQuery = needsQuotedIdentifier ? ("select t.\"NEXT_VAL\" from \"SEQUENCE_TABLE\" t WHERE t.\"SEQUENCE_NAME\"=? order by t.\"SEQUENCE_NAME\" ") : ("select t.NEXT_VAL from SEQUENCE_TABLE t WHERE t.SEQUENCE_NAME=? order by t.SEQUENCE_NAME ");
String maxIdQuery = needsQuotedIdentifier ? ("select max(\"" + tableKey + "\") from \"" + tableName + "\"") : ("select max(" + tableKey + ") from " + tableName);
ResultSet res = stmt.executeQuery(maxIdQuery);
if (res.next()) {
long maxId = res.getLong(1);
if (maxId > 0) {
PreparedStatement pStmt = conn.prepareStatement(seqQuery);
pStmt.setString(1, fullSequenceName);
ResultSet resSeq = pStmt.executeQuery();
if (!resSeq.next()) {
isValid = false;
System.err.println("Missing SEQUENCE_NAME " + seqName + " from SEQUENCE_TABLE");
} else if (resSeq.getLong(1) < maxId) {
isValid = false;
System.err.println("NEXT_VAL for " + seqName + " in SEQUENCE_TABLE < max(" + tableKey + ") in " + tableName);
}
}
}
}
System.out.println((isValid ? "Succeeded" : "Failed") + " in sequence number validation for SEQUENCE_TABLE.");
return isValid;
} catch (SQLException e) {
throw new HiveMetaException("Failed to validate sequence number for SEQUENCE_TABLE", e);
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class HiveSchemaTool method doUpgrade.
/**
* Perform metastore schema upgrade
*
* @param fromSchemaVer
* Existing version of the metastore. If null, then read from the metastore
* @throws MetaException
*/
public void doUpgrade(String fromSchemaVer) throws HiveMetaException {
if (metaStoreSchemaInfo.getHiveSchemaVersion().equals(fromSchemaVer)) {
System.out.println("No schema upgrade required from version " + fromSchemaVer);
return;
}
// Find the list of scripts to execute for this upgrade
List<String> upgradeScripts = metaStoreSchemaInfo.getUpgradeScripts(fromSchemaVer);
testConnectionToMetastore();
System.out.println("Starting upgrade metastore schema from version " + fromSchemaVer + " to " + metaStoreSchemaInfo.getHiveSchemaVersion());
String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
try {
for (String scriptFile : upgradeScripts) {
System.out.println("Upgrade script " + scriptFile);
if (!dryRun) {
runPreUpgrade(scriptDir, scriptFile);
runBeeLine(scriptDir, scriptFile);
System.out.println("Completed " + scriptFile);
}
}
} catch (IOException eIO) {
throw new HiveMetaException("Upgrade FAILED! Metastore state would be inconsistent !!", eIO);
}
// Revalidated the new version after upgrade
verifySchemaVersion();
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class HiveSchemaTool method checkMetaStoreDBLocation.
private boolean checkMetaStoreDBLocation(Connection conn, URI[] defaultServers) throws HiveMetaException {
String dbLoc;
boolean isValid = true;
int numOfInvalid = 0;
if (needsQuotedIdentifier) {
dbLoc = "select dbt.\"DB_ID\", dbt.\"NAME\", dbt.\"DB_LOCATION_URI\" from \"DBS\" dbt order by dbt.\"DB_ID\" ";
} else {
dbLoc = "select dbt.DB_ID, dbt.NAME, dbt.DB_LOCATION_URI from DBS dbt order by dbt.DB_ID";
}
try (Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(dbLoc)) {
while (res.next()) {
String locValue = res.getString(3);
String dbName = getNameOrID(res, 2, 1);
if (!checkLocation("Database " + dbName, locValue, defaultServers)) {
numOfInvalid++;
}
}
} catch (SQLException e) {
throw new HiveMetaException("Failed to get DB Location Info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
}
return isValid;
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class HiveSchemaTool method main.
public static void main(String[] args) {
CommandLineParser parser = new GnuParser();
CommandLine line = null;
String dbType = null;
String metaDbType = null;
String schemaVer = null;
Options cmdLineOptions = new Options();
// Argument handling
initOptions(cmdLineOptions);
try {
line = parser.parse(cmdLineOptions, args);
} catch (ParseException e) {
System.err.println("HiveSchemaTool:Parsing failed. Reason: " + e.getLocalizedMessage());
printAndExit(cmdLineOptions);
}
if (line.hasOption("help")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("schemaTool", cmdLineOptions);
return;
}
if (line.hasOption("dbType")) {
dbType = line.getOptionValue("dbType");
if ((!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_DERBY) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_HIVE) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MSSQL) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MYSQL) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_POSTGRACE) && !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_ORACLE))) {
System.err.println("Unsupported dbType " + dbType);
printAndExit(cmdLineOptions);
}
} else {
System.err.println("no dbType supplied");
printAndExit(cmdLineOptions);
}
if (line.hasOption("metaDbType")) {
metaDbType = line.getOptionValue("metaDbType");
if (!dbType.equals(HiveSchemaHelper.DB_HIVE)) {
System.err.println("metaDbType only supported for dbType = hive");
printAndExit(cmdLineOptions);
}
if (!metaDbType.equalsIgnoreCase(HiveSchemaHelper.DB_DERBY) && !metaDbType.equalsIgnoreCase(HiveSchemaHelper.DB_MSSQL) && !metaDbType.equalsIgnoreCase(HiveSchemaHelper.DB_MYSQL) && !metaDbType.equalsIgnoreCase(HiveSchemaHelper.DB_POSTGRACE) && !metaDbType.equalsIgnoreCase(HiveSchemaHelper.DB_ORACLE)) {
System.err.println("Unsupported metaDbType " + metaDbType);
printAndExit(cmdLineOptions);
}
} else if (dbType.equalsIgnoreCase(HiveSchemaHelper.DB_HIVE)) {
System.err.println("no metaDbType supplied");
printAndExit(cmdLineOptions);
}
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "true");
try {
HiveSchemaTool schemaTool = new HiveSchemaTool(dbType, metaDbType);
if (line.hasOption("userName")) {
schemaTool.setUserName(line.getOptionValue("userName"));
} else {
schemaTool.setUserName(schemaTool.getHiveConf().get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname));
}
if (line.hasOption("passWord")) {
schemaTool.setPassWord(line.getOptionValue("passWord"));
} else {
try {
schemaTool.setPassWord(ShimLoader.getHadoopShims().getPassword(schemaTool.getHiveConf(), HiveConf.ConfVars.METASTOREPWD.varname));
} catch (IOException err) {
throw new HiveMetaException("Error getting metastore password", err);
}
}
if (line.hasOption("url")) {
schemaTool.setUrl(line.getOptionValue("url"));
}
if (line.hasOption("driver")) {
schemaTool.setDriver(line.getOptionValue("driver"));
}
if (line.hasOption("dryRun")) {
schemaTool.setDryRun(true);
}
if (line.hasOption("verbose")) {
schemaTool.setVerbose(true);
}
if (line.hasOption("dbOpts")) {
schemaTool.setDbOpts(line.getOptionValue("dbOpts"));
}
if (line.hasOption("validate") && line.hasOption("servers")) {
schemaTool.setValidationServers(line.getOptionValue("servers"));
}
if (line.hasOption("info")) {
schemaTool.showInfo();
} else if (line.hasOption("upgradeSchema")) {
schemaTool.doUpgrade();
} else if (line.hasOption("upgradeSchemaFrom")) {
schemaVer = line.getOptionValue("upgradeSchemaFrom");
schemaTool.doUpgrade(schemaVer);
} else if (line.hasOption("initSchema")) {
schemaTool.doInit();
} else if (line.hasOption("initSchemaTo")) {
schemaVer = line.getOptionValue("initSchemaTo");
schemaTool.doInit(schemaVer);
} else if (line.hasOption("validate")) {
schemaTool.doValidate();
} else {
System.err.println("no valid option supplied");
printAndExit(cmdLineOptions);
}
} catch (HiveMetaException e) {
System.err.println(e);
if (e.getCause() != null) {
Throwable t = e.getCause();
System.err.println("Underlying cause: " + t.getClass().getName() + " : " + t.getMessage());
if (e.getCause() instanceof SQLException) {
System.err.println("SQL Error code: " + ((SQLException) t).getErrorCode());
}
}
if (line.hasOption("verbose")) {
e.printStackTrace();
} else {
System.err.println("Use --verbose for detailed stacktrace.");
}
System.err.println("*** schemaTool failed ***");
System.exit(1);
}
System.out.println("schemaTool completed");
}
Aggregations