Search in sources :

Example 61 with NoSuchObjectException

use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.

the class HiveAlterHandler method alterTableUpdateTableColumnStats.

@VisibleForTesting
void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable) throws MetaException, InvalidObjectException {
    String dbName = oldTable.getDbName().toLowerCase();
    String tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(oldTable.getTableName());
    String newDbName = newTable.getDbName().toLowerCase();
    String newTableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(newTable.getTableName());
    try {
        List<FieldSchema> oldCols = oldTable.getSd().getCols();
        List<FieldSchema> newCols = newTable.getSd().getCols();
        List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
        ColumnStatistics colStats = null;
        boolean updateColumnStats = true;
        // Nothing to update if everything is the same
        if (newDbName.equals(dbName) && newTableName.equals(tableName) && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
            updateColumnStats = false;
        }
        if (updateColumnStats) {
            List<String> oldColNames = new ArrayList<>(oldCols.size());
            for (FieldSchema oldCol : oldCols) {
                oldColNames.add(oldCol.getName());
            }
            // Collect column stats which need to be rewritten and remove old stats
            colStats = msdb.getTableColumnStatistics(dbName, tableName, oldColNames);
            if (colStats == null) {
                updateColumnStats = false;
            } else {
                List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
                if (statsObjs != null) {
                    List<String> deletedCols = new ArrayList<>();
                    for (ColumnStatisticsObj statsObj : statsObjs) {
                        boolean found = false;
                        for (FieldSchema newCol : newCols) {
                            if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
                                found = true;
                                break;
                            }
                        }
                        if (found) {
                            if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
                                msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
                                newStatsObjs.add(statsObj);
                                deletedCols.add(statsObj.getColName());
                            }
                        } else {
                            msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
                            deletedCols.add(statsObj.getColName());
                        }
                    }
                    StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
                }
            }
        }
        // Change to new table and append stats for the new table
        msdb.alterTable(dbName, tableName, newTable);
        if (updateColumnStats && !newStatsObjs.isEmpty()) {
            ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
            statsDesc.setDbName(newDbName);
            statsDesc.setTableName(newTableName);
            colStats.setStatsObj(newStatsObjs);
            msdb.updateTableColumnStatistics(colStats);
        }
    } catch (NoSuchObjectException nsoe) {
        LOG.debug("Could not find db entry." + nsoe);
    } catch (InvalidInputException e) {
        // should not happen since the input were verified before passed in
        throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
    }
}
Also used : ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) ColumnStatisticsDesc(org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 62 with NoSuchObjectException

use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.

the class Hive method getPartition.

/**
 * Returns partition metadata
 *
 * @param tbl
 *          the partition's table
 * @param partSpec
 *          partition keys and values
 * @param forceCreate
 *          if this is true and partition doesn't exist then a partition is
 *          created
 * @param partPath the path where the partition data is located
 * @param inheritTableSpecs whether to copy over the table specs for if/of/serde
 * @param newFiles An optional list of new files that were moved into this partition.  If
 *                 non-null these will be included in the DML event sent to the metastore.
 * @return result partition object or null if there is no partition
 * @throws HiveException
 */
public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException {
    tbl.validatePartColumnNames(partSpec, true);
    List<String> pvals = new ArrayList<String>();
    for (FieldSchema field : tbl.getPartCols()) {
        String val = partSpec.get(field.getName());
        // enable dynamic partitioning
        if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) || (val != null && val.length() == 0)) {
            throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty");
        } else if (val != null) {
            pvals.add(val);
        }
    }
    org.apache.hadoop.hive.metastore.api.Partition tpart = null;
    try {
        tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
    } catch (NoSuchObjectException nsoe) {
        // this means no partition exists for the given partition
        // key value pairs - thrift cannot handle null return values, hence
        // getPartition() throws NoSuchObjectException to indicate null partition
        tpart = null;
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
    try {
        if (forceCreate) {
            if (tpart == null) {
                LOG.debug("creating partition for table " + tbl.getTableName() + " with partition spec : " + partSpec);
                try {
                    tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
                } catch (AlreadyExistsException aee) {
                    LOG.debug("Caught already exists exception, trying to alter partition instead");
                    tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
                    alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
                } catch (Exception e) {
                    if (CheckJDOException.isJDODataStoreException(e)) {
                        // Using utility method above, so that JDODataStoreException doesn't
                        // have to be used here. This helps avoid adding jdo dependency for
                        // hcatalog client uses
                        LOG.debug("Caught JDO exception, trying to alter partition instead");
                        tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
                        if (tpart == null) {
                            // in creating the partition, since the partition still doesn't exist.
                            throw e;
                        }
                        alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
                    } else {
                        throw e;
                    }
                }
            } else {
                alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
                fireInsertEvent(tbl, partSpec, true, null);
            }
        }
        if (tpart == null) {
            return null;
        }
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
    return new Partition(tbl, tpart);
}
Also used : AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 63 with NoSuchObjectException

use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.

the class Hive method createPartitions.

public List<Partition> createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException {
    Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
    int size = addPartitionDesc.getPartitionCount();
    List<org.apache.hadoop.hive.metastore.api.Partition> in = new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size);
    for (int i = 0; i < size; ++i) {
        in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i)));
    }
    List<Partition> out = new ArrayList<Partition>();
    try {
        if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()) {
            // TODO: normally, the result is not necessary; might make sense to pass false
            for (org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) {
                out.add(new Partition(tbl, outPart));
            }
        } else {
            // For replication add-ptns, we need to follow a insert-if-not-exist, alter-if-exists scenario.
            // TODO : ideally, we should push this mechanism to the metastore, because, otherwise, we have
            // no choice but to iterate over the partitions here.
            List<org.apache.hadoop.hive.metastore.api.Partition> partsToAdd = new ArrayList<>();
            List<org.apache.hadoop.hive.metastore.api.Partition> partsToAlter = new ArrayList<>();
            List<String> part_names = new ArrayList<>();
            for (org.apache.hadoop.hive.metastore.api.Partition p : in) {
                part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues()));
                try {
                    org.apache.hadoop.hive.metastore.api.Partition ptn = getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues());
                    if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn.getParameters())) {
                        partsToAlter.add(p);
                    }
                // else ptn already exists, but we do nothing with it.
                } catch (NoSuchObjectException nsoe) {
                    // if the object does not exist, we want to add it.
                    partsToAdd.add(p);
                }
            }
            for (org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) {
                out.add(new Partition(tbl, outPart));
            }
            getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), partsToAlter, null);
            for (org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), part_names)) {
                out.add(new Partition(tbl, outPart));
            }
        }
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
    return out;
}
Also used : RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) ArrayList(java.util.ArrayList) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 64 with NoSuchObjectException

use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.

the class SessionHiveMetaStoreClient method dropTempTable.

private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
    String dbName = table.getDbName().toLowerCase();
    String tableName = table.getTableName().toLowerCase();
    // Determine the temp table path
    Path tablePath = null;
    String pathStr = table.getSd().getLocation();
    if (pathStr != null) {
        try {
            tablePath = new Path(table.getSd().getLocation());
            if (!getWh().isWritable(tablePath.getParent())) {
                throw new MetaException("Table metadata not deleted since " + tablePath.getParent() + " is not writable by " + SecurityUtils.getUser());
            }
        } catch (IOException err) {
            MetaException metaException = new MetaException("Error checking temp table path for " + table.getTableName());
            metaException.initCause(err);
            throw metaException;
        }
    }
    // Remove table entry from SessionState
    Map<String, Table> tables = getTempTablesForDatabase(dbName);
    if (tables == null || tables.remove(tableName) == null) {
        throw new MetaException("Could not find temp table entry for " + StatsUtils.getFullyQualifiedTableName(dbName, tableName));
    }
    // Delete table data
    if (deleteData && !MetaStoreUtils.isExternalTable(table)) {
        try {
            boolean ifPurge = false;
            if (envContext != null) {
                ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
            }
            getWh().deleteDir(tablePath, true, ifPurge, false);
        } catch (Exception err) {
            LOG.error("Failed to delete temp table directory: " + tablePath, err);
        // Forgive error
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) UnknownTableException(org.apache.hadoop.hive.metastore.api.UnknownTableException) TException(org.apache.thrift.TException) IOException(java.io.IOException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 65 with NoSuchObjectException

use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.

the class DDLTask method dropDatabase.

/**
 * Drop a Database
 * @param db
 * @param dropDb
 * @return Always returns 0
 * @throws HiveException
 */
private int dropDatabase(Hive db, DropDatabaseDesc dropDb) throws HiveException {
    try {
        String dbName = dropDb.getDatabaseName();
        ReplicationSpec replicationSpec = dropDb.getReplicationSpec();
        if (replicationSpec.isInReplicationScope()) {
            Database database = db.getDatabase(dbName);
            if (database == null || !replicationSpec.allowEventReplacementInto(database.getParameters())) {
                return 0;
            }
        }
        db.dropDatabase(dbName, true, dropDb.getIfExists(), dropDb.isCasdade());
        // Unregister the functions as well
        if (dropDb.isCasdade()) {
            FunctionRegistry.unregisterPermanentFunctions(dbName);
        }
    } catch (NoSuchObjectException ex) {
        throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, dropDb.getDatabaseName());
    }
    return 0;
}
Also used : ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Database(org.apache.hadoop.hive.metastore.api.Database) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Aggregations

NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)144 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)74 TException (org.apache.thrift.TException)55 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)47 Table (org.apache.hadoop.hive.metastore.api.Table)45 Partition (org.apache.hadoop.hive.metastore.api.Partition)44 ArrayList (java.util.ArrayList)42 IOException (java.io.IOException)39 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)36 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)30 Test (org.junit.Test)24 Database (org.apache.hadoop.hive.metastore.api.Database)22 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)21 InvalidInputException (org.apache.hadoop.hive.metastore.api.InvalidInputException)20 UnknownDBException (org.apache.hadoop.hive.metastore.api.UnknownDBException)20 Path (org.apache.hadoop.fs.Path)19 Query (javax.jdo.Query)17 SQLException (java.sql.SQLException)16 ConnectorException (com.netflix.metacat.common.server.connectors.exception.ConnectorException)13 InvalidMetaException (com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)13