Search in sources :

Example 16 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class DefaultIncompatibleTableChangeHandler method checkColTypeChangeCompatible.

private void checkColTypeChangeCompatible(Configuration conf, Table oldTable, Table newTable) throws InvalidOperationException {
    List<FieldSchema> oldCols = oldTable.getSd().getCols();
    List<FieldSchema> newCols = newTable.getSd().getCols();
    List<String> incompatibleCols = new ArrayList<>();
    int maxCols = Math.min(oldCols.size(), newCols.size());
    for (int i = 0; i < maxCols; i++) {
        if (!ColumnType.areColTypesCompatible(ColumnType.getTypeName(oldCols.get(i).getType()), ColumnType.getTypeName(newCols.get(i).getType()))) {
            incompatibleCols.add(newCols.get(i).getName());
        }
    }
    if (!incompatibleCols.isEmpty()) {
        Collection<String> exceptedTableSerdes = MetastoreConf.getStringCollection(conf, MetastoreConf.ConfVars.ALLOW_INCOMPATIBLE_COL_TYPE_CHANGES_TABLE_SERDES);
        SerDeInfo serDeInfo = oldTable.getSd().getSerdeInfo();
        String serializationLib = serDeInfo == null ? null : serDeInfo.getSerializationLib();
        if (exceptedTableSerdes.contains(serializationLib)) {
            LOG.info("Allowing incompatible column type change of {} for table {}" + " since the table serde {} is in excepted list of serdes", incompatibleCols, (oldTable.getDbName() + "." + oldTable.getTableName()), serializationLib);
            return;
        }
        throw new InvalidOperationException("The following columns have types incompatible with the existing " + "columns in their respective positions :\n" + org.apache.commons.lang.StringUtils.join(incompatibleCols, ','));
    }
}
Also used : FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException)

Example 17 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class ObjectStore method alterPool.

@Override
public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
    boolean commited = false;
    try {
        openTransaction();
        MWMResourcePlan resourcePlan = getMWMResourcePlan(pool.getResourcePlanName(), pool.getNs(), true);
        MWMPool mPool = getPool(resourcePlan, poolPath);
        pm.retrieve(mPool);
        if (pool.isSetAllocFraction()) {
            mPool.setAllocFraction(pool.getAllocFraction());
        }
        if (pool.isSetQueryParallelism()) {
            mPool.setQueryParallelism(pool.getQueryParallelism());
        }
        if (pool.isSetIsSetSchedulingPolicy() && pool.isIsSetSchedulingPolicy()) {
            if (pool.isSetSchedulingPolicy()) {
                String policy = pool.getSchedulingPolicy();
                if (!MetaStoreUtils.isValidSchedulingPolicy(policy)) {
                    throw new InvalidOperationException("Invalid scheduling policy " + policy);
                }
                mPool.setSchedulingPolicy(pool.getSchedulingPolicy());
            } else {
                mPool.setSchedulingPolicy(null);
            }
        }
        if (pool.isSetPoolPath() && !pool.getPoolPath().equals(mPool.getPath())) {
            moveDescendents(resourcePlan, mPool.getPath(), pool.getPoolPath());
            mPool.setPath(pool.getPoolPath());
        }
        commited = commitTransaction();
    } finally {
        rollbackAndCleanup(commited, (Query) null);
    }
}
Also used : MWMPool(org.apache.hadoop.hive.metastore.model.MWMPool) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) MWMResourcePlan(org.apache.hadoop.hive.metastore.model.MWMResourcePlan)

Example 18 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class ObjectStore method getMWMResourcePlan.

private MWMResourcePlan getMWMResourcePlan(String name, String ns, boolean editCheck, boolean mustExist) throws NoSuchObjectException, InvalidOperationException {
    MWMResourcePlan resourcePlan;
    boolean commited = false;
    Query query = null;
    name = normalizeIdentifier(name);
    try {
        query = createGetResourcePlanQuery();
        ns = getNsOrDefault(ns);
        resourcePlan = (MWMResourcePlan) query.execute(name, ns);
        pm.retrieve(resourcePlan);
        commited = commitTransaction();
    } finally {
        rollbackAndCleanup(commited, query);
    }
    if (mustExist && resourcePlan == null) {
        throw new NoSuchObjectException("There is no resource plan named: " + name + " in " + ns);
    }
    if (editCheck && resourcePlan != null && resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) {
        throw new InvalidOperationException("Resource plan must be disabled to edit it.");
    }
    return resourcePlan;
}
Also used : ScheduledQuery(org.apache.hadoop.hive.metastore.api.ScheduledQuery) Query(javax.jdo.Query) MScheduledQuery(org.apache.hadoop.hive.metastore.model.MScheduledQuery) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MWMResourcePlan(org.apache.hadoop.hive.metastore.model.MWMResourcePlan)

Example 19 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class TestTablesCreateDropAlterTruncate method testAlterTableAlreadyExists.

@Test
public void testAlterTableAlreadyExists() throws Exception {
    Table originalTable = testTables[0];
    Table newTable = originalTable.deepCopy();
    newTable.setTableName(testTables[2].getTableName());
    try {
        // Already existing table
        client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
        // TODO: Maybe throw AlreadyExistsException.
        Assert.fail("Expected an InvalidOperationException to be thrown");
    } catch (InvalidOperationException exception) {
    // Expected exception
    }
}
Also used : SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.metastore.api.Table) TestHiveMetaStore.createSourceTable(org.apache.hadoop.hive.metastore.TestHiveMetaStore.createSourceTable) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 20 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class HiveAlterHandler method alterPartition.

@Override
public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, String dbname, String name, List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext, IHMSHandler handler, String validWriteIds) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
    boolean success = false;
    Partition oldPart;
    List<TransactionalMetaStoreEventListener> transactionalListeners = null;
    if (handler != null) {
        transactionalListeners = handler.getTransactionalListeners();
    }
    // Set DDL time to now if not specified
    if (new_part.getParameters() == null || new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
        new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
    }
    // alter partition
    if (part_vals == null || part_vals.size() == 0) {
        try {
            msdb.openTransaction();
            Table tbl = msdb.getTable(catName, dbname, name, null);
            if (tbl == null) {
                throw new InvalidObjectException("Unable to alter partition because table or database does not exist.");
            }
            oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues());
            if (MetaStoreServerUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
                // if stats are same, no need to update
                if (MetaStoreServerUtils.isFastStatsSame(oldPart, new_part)) {
                    MetaStoreServerUtils.updateBasicState(environmentContext, new_part.getParameters());
                } else {
                    MetaStoreServerUtils.updatePartitionStatsFast(new_part, tbl, wh, false, true, environmentContext, false);
                }
            }
            // PartitionView does not have SD. We do not need update its column stats
            if (oldPart.getSd() != null) {
                updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(), oldPart.getSd().getCols(), tbl, new_part, null, null);
            }
            Deadline.checkTimeout();
            msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part, validWriteIds);
            if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
                MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, tbl, false, true, new_part.getWriteId(), handler), environmentContext);
            }
            success = msdb.commitTransaction();
        } catch (InvalidObjectException e) {
            LOG.warn("Alter failed", e);
            throw new InvalidOperationException("alter is not possible: " + e.getMessage());
        } catch (NoSuchObjectException e) {
            // old partition does not exist
            throw new InvalidOperationException("alter is not possible: " + e.getMessage());
        } finally {
            if (!success) {
                msdb.rollbackTransaction();
            }
        }
        return oldPart;
    }
    // rename partition
    String oldPartLoc;
    String newPartLoc;
    Path srcPath = null;
    Path destPath = null;
    FileSystem srcFs;
    FileSystem destFs = null;
    boolean dataWasMoved = false;
    Database db;
    try {
        msdb.openTransaction();
        Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, null);
        if (tbl == null) {
            throw new InvalidObjectException("Unable to alter partition because table or database does not exist.");
        }
        try {
            oldPart = msdb.getPartition(catName, dbname, name, part_vals);
        } catch (NoSuchObjectException e) {
            // this means there is no existing partition
            throw new InvalidObjectException("Unable to rename partition because old partition does not exist");
        }
        Partition check_part;
        try {
            check_part = msdb.getPartition(catName, dbname, name, new_part.getValues());
        } catch (NoSuchObjectException e) {
            // this means there is no existing partition
            check_part = null;
        }
        if (check_part != null) {
            throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + new_part.getValues());
        }
        // 3) rename the partition directory if it is not an external table
        if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
            // TODO: refactor this into a separate method after master merge, this one is too big.
            try {
                db = msdb.getDatabase(catName, dbname);
                // if tbl location is available use it
                // else derive the tbl location from database location
                destPath = wh.getPartitionPath(db, tbl, new_part.getValues());
                destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
            } catch (NoSuchObjectException e) {
                LOG.debug("Didn't find object in metastore ", e);
                throw new InvalidOperationException("Unable to change partition or table. Database " + dbname + " does not exist" + " Check metastore logs for detailed stack." + e.getMessage());
            }
            if (destPath != null) {
                newPartLoc = destPath.toString();
                oldPartLoc = oldPart.getSd().getLocation();
                LOG.info("srcPath:" + oldPartLoc);
                LOG.info("descPath:" + newPartLoc);
                srcPath = new Path(oldPartLoc);
                srcFs = wh.getFs(srcPath);
                destFs = wh.getFs(destPath);
                // check that src and dest are on the same file system
                if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
                    throw new InvalidOperationException("New table location " + destPath + " is on a different file system than the old location " + srcPath + ". This operation is not supported.");
                }
                try {
                    if (srcFs.exists(srcPath)) {
                        if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
                            throw new InvalidOperationException("New location for this table " + tbl.getDbName() + "." + tbl.getTableName() + " already exists : " + destPath);
                        }
                        // if destPath's parent path doesn't exist, we should mkdir it
                        Path destParentPath = destPath.getParent();
                        if (!wh.mkdirs(destParentPath)) {
                            throw new MetaException("Unable to create path " + destParentPath);
                        }
                        // rename the data directory
                        wh.renameDir(srcPath, destPath, ReplChangeManager.shouldEnableCm(db, tbl));
                        LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
                        dataWasMoved = true;
                    }
                } catch (IOException e) {
                    LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e);
                    throw new InvalidOperationException("Unable to access src or dest location for partition " + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
                } catch (MetaException me) {
                    LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me);
                    throw me;
                }
                new_part.getSd().setLocation(newPartLoc);
            }
        } else {
            new_part.getSd().setLocation(oldPart.getSd().getLocation());
        }
        if (MetaStoreServerUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
            MetaStoreServerUtils.updatePartitionStatsFast(new_part, tbl, wh, false, true, environmentContext, false);
        }
        String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
        List<ColumnStatistics> multiColumnStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null, null);
        msdb.alterPartition(catName, dbname, name, part_vals, new_part, validWriteIds);
        if (!multiColumnStats.isEmpty()) {
            for (ColumnStatistics cs : multiColumnStats) {
                cs.getStatsDesc().setPartName(newPartName);
                try {
                    msdb.updatePartitionColumnStatistics(cs, new_part.getValues(), validWriteIds, new_part.getWriteId());
                } catch (InvalidInputException iie) {
                    throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
                } catch (NoSuchObjectException nsoe) {
                // It is ok, ignore
                }
            }
        }
        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
            MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, tbl, false, true, new_part.getWriteId(), handler), environmentContext);
        }
        success = msdb.commitTransaction();
    } finally {
        if (!success) {
            LOG.error("Failed to rename a partition. Rollback transaction");
            msdb.rollbackTransaction();
            if (dataWasMoved) {
                LOG.error("Revert the data move in renaming a partition.");
                try {
                    if (destFs.exists(destPath)) {
                        wh.renameDir(destPath, srcPath, false);
                    }
                } catch (MetaException me) {
                    LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath + " in alter partition failure. Manual restore is needed.");
                } catch (IOException ioe) {
                    LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath + " in alter partition failure. Manual restore is needed.");
                }
            }
        }
    }
    return oldPart;
}
Also used : Path(org.apache.hadoop.fs.Path) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) Partition(org.apache.hadoop.hive.metastore.api.Partition) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) AlterPartitionEvent(org.apache.hadoop.hive.metastore.events.AlterPartitionEvent) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Aggregations

InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)51 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)26 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)23 IOException (java.io.IOException)19 ArrayList (java.util.ArrayList)18 Table (org.apache.hadoop.hive.metastore.api.Table)17 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)16 TException (org.apache.thrift.TException)15 Partition (org.apache.hadoop.hive.metastore.api.Partition)14 FileSystem (org.apache.hadoop.fs.FileSystem)12 Path (org.apache.hadoop.fs.Path)12 List (java.util.List)10 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)10 InvalidInputException (org.apache.hadoop.hive.metastore.api.InvalidInputException)10 MWMResourcePlan (org.apache.hadoop.hive.metastore.model.MWMResourcePlan)9 SQLException (java.sql.SQLException)8 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)8 Test (org.junit.Test)8 LinkedList (java.util.LinkedList)7 Database (org.apache.hadoop.hive.metastore.api.Database)7