use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class ObjectStore method alterCatalog.
@Override
public void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException {
if (!cat.getName().equals(catName)) {
throw new InvalidOperationException("You cannot change a catalog's name");
}
boolean committed = false;
try {
MCatalog mCat = getMCatalog(catName);
if (org.apache.commons.lang3.StringUtils.isNotBlank(cat.getLocationUri())) {
mCat.setLocationUri(cat.getLocationUri());
}
if (org.apache.commons.lang3.StringUtils.isNotBlank(cat.getDescription())) {
mCat.setDescription(cat.getDescription());
}
openTransaction();
pm.makePersistent(mCat);
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
}
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class TestAlterPartitions method testAlterPartitionsUnknownPartition.
@Test
public void testAlterPartitionsUnknownPartition() throws Exception {
Partition part1 = null;
try {
createTable4PartColsParts(client);
Table t = client.getTable(DB_NAME, TABLE_NAME);
PartitionBuilder builder = new PartitionBuilder();
Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
makeTestChangesOnPartition(part1);
client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
fail("Should have thrown InvalidOperationException");
} catch (InvalidOperationException e) {
part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
assertPartitionUnchanged(part1, part1.getValues(), PARTCOL_SCHEMA);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class Hive method loadTable.
/**
* Load a directory into a Hive Table. - Alters existing content of table with
* the contents of loadPath. - If table does not exist - an exception is
* thrown - files in loadPath are moved into Hive. But the directory itself is
* not removed.
*
* @param loadPath
* Directory containing files to load into Table
* @param tableName
* name of table to be loaded.
* @param replace
* if true - replace files in the table, otherwise add files to table
* @param isSrcLocal
* If the source directory is LOCAL
* @param isSkewedStoreAsSubdir
* if list bucketing enabled
* @param hasFollowingStatsTask
* if there is any following stats task
* @param isAcid true if this is an ACID based write
*/
public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
List<Path> newFiles = null;
Table tbl = getTable(tableName);
HiveConf sessionConf = SessionState.getSessionConf();
if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
newFiles = Collections.synchronizedList(new ArrayList<Path>());
}
if (replace) {
Path tableDest = tbl.getPath();
replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal);
} else {
FileSystem fs;
try {
fs = tbl.getDataLocation().getFileSystem(sessionConf);
copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles);
} catch (IOException e) {
throw new HiveException("addFiles: filesystem error in check phase", e);
}
}
if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
}
//column stats will be inaccurate
StatsSetupConst.clearColumnStatsState(tbl.getParameters());
try {
if (isSkewedStoreAsSubdir) {
SkewedInfo skewedInfo = tbl.getSkewedInfo();
// Construct list bucketing location mappings from sub-directory name.
Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(tbl.getPath(), skewedInfo);
// Add list bucketing location mappings.
skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
}
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
EnvironmentContext environmentContext = null;
if (hasFollowingStatsTask) {
environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
try {
alterTable(tableName, tbl, environmentContext);
} catch (InvalidOperationException e) {
throw new HiveException(e);
}
fireInsertEvent(tbl, null, newFiles);
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class DDLTask method alterIndex.
private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine");
}
String baseTableName = alterIndex.getBaseTableName();
String indexName = alterIndex.getIndexName();
Index idx = db.getIndex(baseTableName, indexName);
switch(alterIndex.getOp()) {
case ADDPROPS:
idx.getParameters().putAll(alterIndex.getProps());
break;
case UPDATETIMESTAMP:
try {
Map<String, String> props = new HashMap<String, String>();
Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, String>, Long>();
Table baseTbl = db.getTable(baseTableName);
if (baseTbl.isPartitioned()) {
List<Partition> baseParts;
if (alterIndex.getSpec() != null) {
baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
} else {
baseParts = db.getPartitions(baseTbl);
}
if (baseParts != null) {
for (Partition p : baseParts) {
Path dataLocation = p.getDataLocation();
FileSystem fs = dataLocation.getFileSystem(db.getConf());
FileStatus fss = fs.getFileStatus(dataLocation);
long lastModificationTime = fss.getModificationTime();
FileStatus[] parts = fs.listStatus(dataLocation, FileUtils.HIDDEN_FILES_PATH_FILTER);
if (parts != null && parts.length > 0) {
for (FileStatus status : parts) {
if (status.getModificationTime() > lastModificationTime) {
lastModificationTime = status.getModificationTime();
}
}
}
basePartTs.put(p.getSpec(), lastModificationTime);
}
}
} else {
FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
FileStatus fss = fs.getFileStatus(baseTbl.getPath());
basePartTs.put(null, fss.getModificationTime());
}
for (Map<String, String> spec : basePartTs.keySet()) {
if (spec != null) {
props.put(spec.toString(), basePartTs.get(spec).toString());
} else {
props.put("base_timestamp", basePartTs.get(null).toString());
}
}
idx.getParameters().putAll(props);
} catch (HiveException e) {
throw new HiveException("ERROR: Failed to update index timestamps");
} catch (IOException e) {
throw new HiveException("ERROR: Failed to look up timestamps on filesystem");
}
break;
default:
console.printError("Unsupported Alter command");
return 1;
}
// set last modified by properties
if (!updateModifiedParameters(idx.getParameters(), conf)) {
return 1;
}
try {
db.alterIndex(baseTableName, indexName, idx);
} catch (InvalidOperationException e) {
console.printError("Invalid alter operation: " + e.getMessage());
LOG.info("alter index: " + stringifyException(e));
return 1;
} catch (HiveException e) {
console.printError("Invalid alter operation: " + e.getMessage());
return 1;
}
return 0;
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class LoadDatabase method existEmptyDb.
private boolean existEmptyDb(String dbName) throws InvalidOperationException, HiveException {
Database db = context.hiveDb.getDatabase(dbName);
if (db == null) {
return false;
}
List<String> allTables = context.hiveDb.getAllTables(dbName);
List<String> allFunctions = context.hiveDb.getFunctions(dbName, "*");
if (allTables.isEmpty() && allFunctions.isEmpty()) {
return true;
}
throw new InvalidOperationException("Database " + db.getName() + " is not empty. One or more tables/functions exist.");
}
Aggregations