use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class HiveSchemaTool method validateColumnNullValues.
boolean validateColumnNullValues(Connection conn) throws HiveMetaException {
System.out.println("Validating columns for incorrect NULL values.");
boolean isValid = true;
try {
Statement stmt = conn.createStatement();
String tblQuery = needsQuotedIdentifier ? ("select t.* from \"TBLS\" t WHERE t.\"SD_ID\" IS NULL and (t.\"TBL_TYPE\"='" + TableType.EXTERNAL_TABLE + "' or t.\"TBL_TYPE\"='" + TableType.MANAGED_TABLE + "') order by t.\"TBL_ID\" ") : ("select t.* from TBLS t WHERE t.SD_ID IS NULL and (t.TBL_TYPE='" + TableType.EXTERNAL_TABLE + "' or t.TBL_TYPE='" + TableType.MANAGED_TABLE + "') order by t.TBL_ID ");
ResultSet res = stmt.executeQuery(tblQuery);
while (res.next()) {
long tableId = res.getLong("TBL_ID");
String tableName = res.getString("TBL_NAME");
String tableType = res.getString("TBL_TYPE");
isValid = false;
System.err.println("SD_ID in TBLS should not be NULL for Table Name=" + tableName + ", Table ID=" + tableId + ", Table Type=" + tableType);
}
System.out.println((isValid ? "Succeeded" : "Failed") + " in column validation for incorrect NULL values.");
return isValid;
} catch (SQLException e) {
throw new HiveMetaException("Failed to validate columns for incorrect NULL values", e);
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method checkMetaStorePartitionLocation.
private boolean checkMetaStorePartitionLocation(Connection conn, URI[] defaultServers) throws HiveMetaException {
String partLoc, partIDRange;
boolean isValid = true;
int numOfInvalid = 0;
if (needsQuotedIdentifier) {
partIDRange = "select max(\"PART_ID\"), min(\"PART_ID\") from \"PARTITIONS\" ";
} else {
partIDRange = "select max(PART_ID), min(PART_ID) from PARTITIONS";
}
if (needsQuotedIdentifier) {
partLoc = "select pt.\"PART_ID\", pt.\"PART_NAME\", sd.\"LOCATION\", tbl.\"TBL_ID\", tbl.\"TBL_NAME\",dbt.\"DB_ID\", dbt.\"NAME\" from \"PARTITIONS\" pt " + "inner join \"SDS\" sd on pt.\"SD_ID\" = sd.\"SD_ID\" and pt.\"PART_ID\" >= ? and pt.\"PART_ID\"<= ? " + " inner join \"TBLS\" tbl on pt.\"TBL_ID\" = tbl.\"TBL_ID\" inner join " + "\"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" order by tbl.\"TBL_ID\" ";
} else {
partLoc = "select pt.PART_ID, pt.PART_NAME, sd.LOCATION, tbl.TBL_ID, tbl.TBL_NAME, dbt.DB_ID, dbt.NAME from PARTITIONS pt " + "inner join SDS sd on pt.SD_ID = sd.SD_ID and pt.PART_ID >= ? and pt.PART_ID <= ? " + "inner join TBLS tbl on tbl.TBL_ID = pt.TBL_ID inner join DBS dbt on tbl.DB_ID = dbt.DB_ID order by tbl.TBL_ID ";
}
long maxID = 0, minID = 0;
long rtnSize = 2000;
try {
Statement stmt = conn.createStatement();
ResultSet res = stmt.executeQuery(partIDRange);
if (res.next()) {
maxID = res.getLong(1);
minID = res.getLong(2);
}
res.close();
stmt.close();
PreparedStatement pStmt = conn.prepareStatement(partLoc);
while (minID <= maxID) {
pStmt.setLong(1, minID);
pStmt.setLong(2, minID + rtnSize);
res = pStmt.executeQuery();
while (res.next()) {
String locValue = res.getString(3);
String entity = "Database " + getNameOrID(res, 7, 6) + ", Table " + getNameOrID(res, 5, 4) + ", Partition " + getNameOrID(res, 2, 1);
if (!checkLocation(entity, locValue, defaultServers)) {
numOfInvalid++;
}
}
res.close();
minID += rtnSize + 1;
}
pStmt.close();
} catch (SQLException e) {
throw new HiveMetaException("Failed to get Partiton Location Info.", e);
}
if (numOfInvalid > 0) {
isValid = false;
}
return isValid;
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method doUpgrade.
/**
* Perform metastore schema upgrade
*
* @param fromSchemaVer
* Existing version of the metastore. If null, then read from the metastore
*/
void doUpgrade(String fromSchemaVer) throws HiveMetaException {
if (metaStoreSchemaInfo.getHiveSchemaVersion().equals(fromSchemaVer)) {
System.out.println("No schema upgrade required from version " + fromSchemaVer);
return;
}
// Find the list of scripts to execute for this upgrade
List<String> upgradeScripts = metaStoreSchemaInfo.getUpgradeScripts(fromSchemaVer);
testConnectionToMetastore();
System.out.println("Starting upgrade metastore schema from version " + fromSchemaVer + " to " + metaStoreSchemaInfo.getHiveSchemaVersion());
String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
try {
for (String scriptFile : upgradeScripts) {
System.out.println("Upgrade script " + scriptFile);
if (!dryRun) {
runPreUpgrade(scriptDir, scriptFile);
runSqlLine(scriptDir, scriptFile);
System.out.println("Completed " + scriptFile);
}
}
} catch (IOException eIO) {
throw new HiveMetaException("Upgrade FAILED! Metastore state would be inconsistent !!", eIO);
}
// Revalidated the new version after upgrade
verifySchemaVersion();
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method doInit.
/**
* Initialize the metastore schema
*
* @param toVersion
* If null then current hive version is used
*/
void doInit(String toVersion) throws HiveMetaException {
testConnectionToMetastore();
System.out.println("Starting metastore schema initialization to " + toVersion);
String initScriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
String initScriptFile = metaStoreSchemaInfo.generateInitFileName(toVersion);
try {
System.out.println("Initialization script " + initScriptFile);
if (!dryRun) {
runSqlLine(initScriptDir, initScriptFile);
System.out.println("Initialization script completed");
}
} catch (IOException e) {
throw new HiveMetaException("Schema initialization FAILED!" + " Metastore state would be inconsistent !!", e);
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaException in project hive by apache.
the class MetastoreSchemaTool method validateSequences.
boolean validateSequences(Connection conn) throws HiveMetaException {
Map<String, Pair<String, String>> seqNameToTable = new ImmutableMap.Builder<String, Pair<String, String>>().put("MDatabase", Pair.of("DBS", "DB_ID")).put("MRole", Pair.of("ROLES", "ROLE_ID")).put("MGlobalPrivilege", Pair.of("GLOBAL_PRIVS", "USER_GRANT_ID")).put("MTable", Pair.of("TBLS", "TBL_ID")).put("MStorageDescriptor", Pair.of("SDS", "SD_ID")).put("MSerDeInfo", Pair.of("SERDES", "SERDE_ID")).put("MColumnDescriptor", Pair.of("CDS", "CD_ID")).put("MTablePrivilege", Pair.of("TBL_PRIVS", "TBL_GRANT_ID")).put("MTableColumnStatistics", Pair.of("TAB_COL_STATS", "CS_ID")).put("MPartition", Pair.of("PARTITIONS", "PART_ID")).put("MPartitionColumnStatistics", Pair.of("PART_COL_STATS", "CS_ID")).put("MFunction", Pair.of("FUNCS", "FUNC_ID")).put("MIndex", Pair.of("IDXS", "INDEX_ID")).put("MStringList", Pair.of("SKEWED_STRING_LIST", "STRING_LIST_ID")).build();
System.out.println("Validating sequence number for SEQUENCE_TABLE");
boolean isValid = true;
try {
Statement stmt = conn.createStatement();
for (String seqName : seqNameToTable.keySet()) {
String tableName = seqNameToTable.get(seqName).getLeft();
String tableKey = seqNameToTable.get(seqName).getRight();
String fullSequenceName = "org.apache.hadoop.hive.metastore.model." + seqName;
String seqQuery = needsQuotedIdentifier ? ("select t.\"NEXT_VAL\" from \"SEQUENCE_TABLE\" t WHERE t.\"SEQUENCE_NAME\"=? order by t.\"SEQUENCE_NAME\" ") : ("select t.NEXT_VAL from SEQUENCE_TABLE t WHERE t.SEQUENCE_NAME=? order by t.SEQUENCE_NAME ");
String maxIdQuery = needsQuotedIdentifier ? ("select max(\"" + tableKey + "\") from \"" + tableName + "\"") : ("select max(" + tableKey + ") from " + tableName);
ResultSet res = stmt.executeQuery(maxIdQuery);
if (res.next()) {
long maxId = res.getLong(1);
if (maxId > 0) {
PreparedStatement pStmt = conn.prepareStatement(seqQuery);
pStmt.setString(1, fullSequenceName);
ResultSet resSeq = pStmt.executeQuery();
if (!resSeq.next()) {
isValid = false;
logAndPrintToError("Missing SEQUENCE_NAME " + seqName + " from SEQUENCE_TABLE");
} else if (resSeq.getLong(1) < maxId) {
isValid = false;
logAndPrintToError("NEXT_VAL for " + seqName + " in SEQUENCE_TABLE < max(" + tableKey + ") in " + tableName);
}
}
}
}
System.out.println((isValid ? "Succeeded" : "Failed") + " in sequence number validation for SEQUENCE_TABLE.");
return isValid;
} catch (SQLException e) {
throw new HiveMetaException("Failed to validate sequence number for SEQUENCE_TABLE", e);
}
}
Aggregations