Search in sources :

Example 26 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class ObjectStore method alterCatalog.

@Override
public void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException {
    if (!cat.getName().equals(catName)) {
        throw new InvalidOperationException("You cannot change a catalog's name");
    }
    boolean committed = false;
    try {
        MCatalog mCat = getMCatalog(catName);
        if (org.apache.commons.lang3.StringUtils.isNotBlank(cat.getLocationUri())) {
            mCat.setLocationUri(cat.getLocationUri());
        }
        if (org.apache.commons.lang3.StringUtils.isNotBlank(cat.getDescription())) {
            mCat.setDescription(cat.getDescription());
        }
        openTransaction();
        pm.makePersistent(mCat);
        committed = commitTransaction();
    } finally {
        if (!committed) {
            rollbackTransaction();
        }
    }
}
Also used : InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) MCatalog(org.apache.hadoop.hive.metastore.model.MCatalog)

Example 27 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class TestAlterPartitions method testAlterPartitionsUnknownPartition.

@Test
public void testAlterPartitionsUnknownPartition() throws Exception {
    Partition part1 = null;
    try {
        createTable4PartColsParts(client);
        Table t = client.getTable(DB_NAME, TABLE_NAME);
        PartitionBuilder builder = new PartitionBuilder();
        Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
        part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
        makeTestChangesOnPartition(part1);
        client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
        fail("Should have thrown InvalidOperationException");
    } catch (InvalidOperationException e) {
        part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
        assertPartitionUnchanged(part1, part1.getValues(), PARTCOL_SCHEMA);
    }
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 28 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class Hive method loadTable.

/**
   * Load a directory into a Hive Table. - Alters existing content of table with
   * the contents of loadPath. - If table does not exist - an exception is
   * thrown - files in loadPath are moved into Hive. But the directory itself is
   * not removed.
   *
   * @param loadPath
   *          Directory containing files to load into Table
   * @param tableName
   *          name of table to be loaded.
   * @param replace
   *          if true - replace files in the table, otherwise add files to table
   * @param isSrcLocal
   *          If the source directory is LOCAL
   * @param isSkewedStoreAsSubdir
   *          if list bucketing enabled
   * @param hasFollowingStatsTask
   *          if there is any following stats task
   * @param isAcid true if this is an ACID based write
   */
public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
    List<Path> newFiles = null;
    Table tbl = getTable(tableName);
    HiveConf sessionConf = SessionState.getSessionConf();
    if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
        newFiles = Collections.synchronizedList(new ArrayList<Path>());
    }
    if (replace) {
        Path tableDest = tbl.getPath();
        replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal);
    } else {
        FileSystem fs;
        try {
            fs = tbl.getDataLocation().getFileSystem(sessionConf);
            copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles);
        } catch (IOException e) {
            throw new HiveException("addFiles: filesystem error in check phase", e);
        }
    }
    if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
        StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
    }
    //column stats will be inaccurate
    StatsSetupConst.clearColumnStatsState(tbl.getParameters());
    try {
        if (isSkewedStoreAsSubdir) {
            SkewedInfo skewedInfo = tbl.getSkewedInfo();
            // Construct list bucketing location mappings from sub-directory name.
            Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(tbl.getPath(), skewedInfo);
            // Add list bucketing location mappings.
            skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
        }
    } catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
    EnvironmentContext environmentContext = null;
    if (hasFollowingStatsTask) {
        environmentContext = new EnvironmentContext();
        environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
    }
    try {
        alterTable(tableName, tbl, environmentContext);
    } catch (InvalidOperationException e) {
        throw new HiveException(e);
    }
    fireInsertEvent(tbl, null, newFiles);
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) IOException(java.io.IOException) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList)

Example 29 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class DDLTask method alterIndex.

private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
    if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
        throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine");
    }
    String baseTableName = alterIndex.getBaseTableName();
    String indexName = alterIndex.getIndexName();
    Index idx = db.getIndex(baseTableName, indexName);
    switch(alterIndex.getOp()) {
        case ADDPROPS:
            idx.getParameters().putAll(alterIndex.getProps());
            break;
        case UPDATETIMESTAMP:
            try {
                Map<String, String> props = new HashMap<String, String>();
                Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, String>, Long>();
                Table baseTbl = db.getTable(baseTableName);
                if (baseTbl.isPartitioned()) {
                    List<Partition> baseParts;
                    if (alterIndex.getSpec() != null) {
                        baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
                    } else {
                        baseParts = db.getPartitions(baseTbl);
                    }
                    if (baseParts != null) {
                        for (Partition p : baseParts) {
                            Path dataLocation = p.getDataLocation();
                            FileSystem fs = dataLocation.getFileSystem(db.getConf());
                            FileStatus fss = fs.getFileStatus(dataLocation);
                            long lastModificationTime = fss.getModificationTime();
                            FileStatus[] parts = fs.listStatus(dataLocation, FileUtils.HIDDEN_FILES_PATH_FILTER);
                            if (parts != null && parts.length > 0) {
                                for (FileStatus status : parts) {
                                    if (status.getModificationTime() > lastModificationTime) {
                                        lastModificationTime = status.getModificationTime();
                                    }
                                }
                            }
                            basePartTs.put(p.getSpec(), lastModificationTime);
                        }
                    }
                } else {
                    FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
                    FileStatus fss = fs.getFileStatus(baseTbl.getPath());
                    basePartTs.put(null, fss.getModificationTime());
                }
                for (Map<String, String> spec : basePartTs.keySet()) {
                    if (spec != null) {
                        props.put(spec.toString(), basePartTs.get(spec).toString());
                    } else {
                        props.put("base_timestamp", basePartTs.get(null).toString());
                    }
                }
                idx.getParameters().putAll(props);
            } catch (HiveException e) {
                throw new HiveException("ERROR: Failed to update index timestamps");
            } catch (IOException e) {
                throw new HiveException("ERROR: Failed to look up timestamps on filesystem");
            }
            break;
        default:
            console.printError("Unsupported Alter command");
            return 1;
    }
    // set last modified by properties
    if (!updateModifiedParameters(idx.getParameters(), conf)) {
        return 1;
    }
    try {
        db.alterIndex(baseTableName, indexName, idx);
    } catch (InvalidOperationException e) {
        console.printError("Invalid alter operation: " + e.getMessage());
        LOG.info("alter index: " + stringifyException(e));
        return 1;
    } catch (HiveException e) {
        console.printError("Invalid alter operation: " + e.getMessage());
        return 1;
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) Table(org.apache.hadoop.hive.ql.metadata.Table) FileStatus(org.apache.hadoop.fs.FileStatus) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Index(org.apache.hadoop.hive.metastore.api.Index) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 30 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class LoadDatabase method existEmptyDb.

private boolean existEmptyDb(String dbName) throws InvalidOperationException, HiveException {
    Database db = context.hiveDb.getDatabase(dbName);
    if (db == null) {
        return false;
    }
    List<String> allTables = context.hiveDb.getAllTables(dbName);
    List<String> allFunctions = context.hiveDb.getFunctions(dbName, "*");
    if (allTables.isEmpty() && allFunctions.isEmpty()) {
        return true;
    }
    throw new InvalidOperationException("Database " + db.getName() + " is not empty. One or more tables/functions exist.");
}
Also used : Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException)

Aggregations

InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)51 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)26 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)23 IOException (java.io.IOException)19 ArrayList (java.util.ArrayList)18 Table (org.apache.hadoop.hive.metastore.api.Table)17 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)16 TException (org.apache.thrift.TException)15 Partition (org.apache.hadoop.hive.metastore.api.Partition)14 FileSystem (org.apache.hadoop.fs.FileSystem)12 Path (org.apache.hadoop.fs.Path)12 List (java.util.List)10 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)10 InvalidInputException (org.apache.hadoop.hive.metastore.api.InvalidInputException)10 MWMResourcePlan (org.apache.hadoop.hive.metastore.model.MWMResourcePlan)9 SQLException (java.sql.SQLException)8 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)8 Test (org.junit.Test)8 LinkedList (java.util.LinkedList)7 Database (org.apache.hadoop.hive.metastore.api.Database)7