Search in sources :

Example 36 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class HCatClientHMSImpl method updateTableSchema.

@Override
public void updateTableSchema(String dbName, String tableName, List<HCatFieldSchema> columnSchema) throws HCatException {
    try {
        Table table = hmsClient.getTable(dbName, tableName);
        table.getSd().setCols(HCatSchemaUtils.getFieldSchemas(columnSchema));
        hmsClient.alter_table(dbName, tableName, table);
    } catch (InvalidOperationException e) {
        throw new HCatException("InvalidOperationException while updating table schema.", e);
    } catch (MetaException e) {
        throw new HCatException("MetaException while updating table schema.", e);
    } catch (NoSuchObjectException e) {
        throw new ObjectNotFoundException("NoSuchObjectException while updating table schema.", e);
    } catch (TException e) {
        throw new ConnectionFailureException("TException while updating table schema.", e);
    }
}
Also used : TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) HCatException(org.apache.hive.hcatalog.common.HCatException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 37 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class HiveAlterHandler method getExistingPartitions.

private Map<List<String>, Partition> getExistingPartitions(final RawStore msdb, final List<Partition> new_parts, final Table tbl, final String catName, final String dbname, final String name) throws MetaException, NoSuchObjectException, InvalidOperationException {
    // Get list of partition values
    List<String> partValues = new LinkedList<>();
    for (Partition tmpPart : new_parts) {
        partValues.add(Warehouse.makePartName(tbl.getPartitionKeys(), tmpPart.getValues()));
    }
    // Get existing partitions from store
    List<Partition> oldParts = msdb.getPartitionsByNames(catName, dbname, name, partValues);
    if (new_parts.size() != oldParts.size()) {
        throw new InvalidOperationException("Alter partition operation failed: " + "new parts size " + new_parts.size() + " not matching with old parts size " + oldParts.size());
    }
    return oldParts.stream().collect(Collectors.toMap(Partition::getValues, Partition -> Partition));
}
Also used : ArrayListMultimap(com.google.common.collect.ArrayListMultimap) MetaStoreUtils.getDefaultCatalog(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AlterPartitionEvent(org.apache.hadoop.hive.metastore.events.AlterPartitionEvent) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) StringUtils(org.apache.commons.lang3.StringUtils) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) StringUtils.normalizeIdentifier(org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier) ColumnStatisticsDesc(org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) URI(java.net.URI) DEFAULT_CATALOG_NAME(org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) FileUtils(org.apache.hadoop.hive.metastore.utils.FileUtils) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) Collectors(java.util.stream.Collectors) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) TxnUtils(org.apache.hadoop.hive.metastore.txn.TxnUtils) List(java.util.List) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) Entry(java.util.Map.Entry) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) AlterTableEvent(org.apache.hadoop.hive.metastore.events.AlterTableEvent) Multimap(com.google.common.collect.Multimap) Partition(org.apache.hadoop.hive.metastore.api.Partition) ArrayList(java.util.ArrayList) Lists(com.google.common.collect.Lists) CollectionUtils(org.apache.commons.collections.CollectionUtils) ALTER_TABLE_OPERATION_TYPE(org.apache.hadoop.hive.metastore.HiveMetaHook.ALTER_TABLE_OPERATION_TYPE) EventMessage(org.apache.hadoop.hive.metastore.messaging.EventMessage) ReplConst(org.apache.hadoop.hive.common.repl.ReplConst) TableName(org.apache.hadoop.hive.common.TableName) LinkedList(java.util.LinkedList) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) ALTERLOCATION(org.apache.hadoop.hive.metastore.HiveMetaHook.ALTERLOCATION) IOException(java.io.IOException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) MetaStoreServerUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Database(org.apache.hadoop.hive.metastore.api.Database) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) Collections(java.util.Collections) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Partition(org.apache.hadoop.hive.metastore.api.Partition) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) LinkedList(java.util.LinkedList)

Example 38 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class HiveAlterHandler method alterTable.

@Override
public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newt, EnvironmentContext environmentContext, IHMSHandler handler, String writeIdList) throws InvalidOperationException, MetaException {
    catName = normalizeIdentifier(catName);
    name = name.toLowerCase();
    dbname = dbname.toLowerCase();
    final boolean cascade;
    final boolean replDataLocationChanged;
    final boolean isReplicated;
    if ((environmentContext != null) && environmentContext.isSetProperties()) {
        cascade = StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(StatsSetupConst.CASCADE));
        replDataLocationChanged = ReplConst.TRUE.equals(environmentContext.getProperties().get(ReplConst.REPL_DATA_LOCATION_CHANGED));
    } else {
        cascade = false;
        replDataLocationChanged = false;
    }
    if (newt == null) {
        throw new InvalidOperationException("New table is null");
    }
    String newTblName = newt.getTableName().toLowerCase();
    String newDbName = newt.getDbName().toLowerCase();
    if (!MetaStoreUtils.validateName(newTblName, handler.getConf())) {
        throw new InvalidOperationException(newTblName + " is not a valid object name");
    }
    String validate = MetaStoreServerUtils.validateTblColumns(newt.getSd().getCols());
    if (validate != null) {
        throw new InvalidOperationException("Invalid column " + validate);
    }
    // Validate bucketedColumns in new table
    List<String> bucketColumns = MetaStoreServerUtils.validateBucketColumns(newt.getSd());
    if (CollectionUtils.isNotEmpty(bucketColumns)) {
        String errMsg = "Bucket columns - " + bucketColumns.toString() + " doesn't match with any table columns";
        LOG.error(errMsg);
        throw new InvalidOperationException(errMsg);
    }
    Path srcPath = null;
    FileSystem srcFs;
    Path destPath = null;
    FileSystem destFs = null;
    boolean success = false;
    boolean dataWasMoved = false;
    boolean isPartitionedTable = false;
    Database olddb = null;
    Table oldt = null;
    List<TransactionalMetaStoreEventListener> transactionalListeners = handler.getTransactionalListeners();
    List<MetaStoreEventListener> listeners = handler.getListeners();
    Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
    try {
        boolean rename = false;
        List<Partition> parts;
        // Switching tables between catalogs is not allowed.
        if (!catName.equalsIgnoreCase(newt.getCatName())) {
            throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" + catName + ", new catalog " + newt.getCatName());
        }
        // check if table with the new name already exists
        if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
            if (msdb.getTable(catName, newDbName, newTblName, null) != null) {
                throw new InvalidOperationException("new table " + newDbName + "." + newTblName + " already exists");
            }
            rename = true;
        }
        msdb.openTransaction();
        // get old table
        // Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
        olddb = msdb.getDatabase(catName, dbname);
        oldt = msdb.getTable(catName, dbname, name, null);
        if (oldt == null) {
            throw new InvalidOperationException("table " + TableName.getQualified(catName, dbname, name) + " doesn't exist");
        }
        validateTableChangesOnReplSource(olddb, oldt, newt, environmentContext);
        // On a replica this alter table will be executed only if old and new both the databases are
        // available and being replicated into. Otherwise, it will be either create or drop of table.
        isReplicated = HMSHandler.isDbReplicationTarget(olddb);
        if (oldt.getPartitionKeysSize() != 0) {
            isPartitionedTable = true;
        }
        // Throws InvalidOperationException if the new column types are not
        // compatible with the current column types.
        DefaultIncompatibleTableChangeHandler.get().allowChange(handler.getConf(), oldt, newt);
        // check that partition keys have not changed, except for virtual views
        // however, allow the partition comments to change
        boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(), newt.getPartitionKeys());
        if (!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
            Map<String, String> properties = environmentContext.getProperties();
            if (properties == null || (properties != null && !Boolean.parseBoolean(properties.getOrDefault(HiveMetaHook.ALLOW_PARTITION_KEY_CHANGE, "false")))) {
                if (!partKeysPartiallyEqual) {
                    throw new InvalidOperationException("partition keys can not be changed.");
                }
            }
        }
        // 4) the table was not initially created with a specified location
        if (replDataLocationChanged || (rename && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 || StringUtils.isEmpty(newt.getSd().getLocation())) && !MetaStoreUtils.isExternalTable(oldt))) {
            srcPath = new Path(oldt.getSd().getLocation());
            if (replDataLocationChanged) {
                // If data location is changed in replication flow, then new path was already set in
                // the newt. Also, it is as good as the data is moved and set dataWasMoved=true so that
                // location in partitions are also updated accordingly.
                // No need to validate if the destPath exists as in replication flow, data gets replicated
                // separately.
                destPath = new Path(newt.getSd().getLocation());
                dataWasMoved = true;
            } else {
                // Rename flow.
                // If a table was created in a user specified location using the DDL like
                // create table tbl ... location ...., it should be treated like an external table
                // in the table rename, its data location should not be changed. We can check
                // if the table directory was created directly under its database directory to tell
                // if it is such a table
                String oldtRelativePath = wh.getDatabaseManagedPath(olddb).toUri().relativize(srcPath.toUri()).toString();
                boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name) && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
                if (!tableInSpecifiedLoc) {
                    srcFs = wh.getFs(srcPath);
                    // get new location
                    Database db = msdb.getDatabase(catName, newDbName);
                    assert (isReplicated == HMSHandler.isDbReplicationTarget(db));
                    Path databasePath = constructRenamedPath(wh.getDatabaseManagedPath(db), srcPath);
                    destPath = new Path(databasePath, newTblName);
                    destFs = wh.getFs(destPath);
                    newt.getSd().setLocation(destPath.toString());
                    // check that src and dest are on the same file system
                    if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
                        throw new InvalidOperationException("table new location " + destPath + " is on a different file system than the old location " + srcPath + ". This operation is not supported");
                    }
                    try {
                        if (destFs.exists(destPath)) {
                            throw new InvalidOperationException("New location for this table " + TableName.getQualified(catName, newDbName, newTblName) + " already exists : " + destPath);
                        }
                        // check that src exists and also checks permissions necessary, rename src to dest
                        if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, ReplChangeManager.shouldEnableCm(olddb, oldt))) {
                            dataWasMoved = true;
                        }
                    } catch (IOException | MetaException e) {
                        LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
                        throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name + " failed to move data due to: '" + getSimpleMessage(e) + "' See hive log file for details.");
                    }
                    if (!HiveMetaStore.isRenameAllowed(olddb, db)) {
                        LOG.error("Alter Table operation for " + TableName.getQualified(catName, dbname, name) + "to new table = " + TableName.getQualified(catName, newDbName, newTblName) + " failed ");
                        throw new MetaException("Alter table not allowed for table " + TableName.getQualified(catName, dbname, name) + "to new table = " + TableName.getQualified(catName, newDbName, newTblName));
                    }
                }
            }
            if (isPartitionedTable) {
                String oldTblLocPath = srcPath.toUri().getPath();
                String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
                // also the location field in partition
                parts = msdb.getPartitions(catName, dbname, name, -1);
                Multimap<Partition, ColumnStatistics> columnStatsNeedUpdated = ArrayListMultimap.create();
                for (Partition part : parts) {
                    String oldPartLoc = part.getSd().getLocation();
                    if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
                        URI oldUri = new Path(oldPartLoc).toUri();
                        String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
                        Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
                        part.getSd().setLocation(newPartLocPath.toString());
                    }
                    part.setDbName(newDbName);
                    part.setTableName(newTblName);
                    List<ColumnStatistics> multiColStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), part.getSd().getCols(), oldt, part, null, null);
                    for (ColumnStatistics colStats : multiColStats) {
                        columnStatsNeedUpdated.put(part, colStats);
                    }
                }
                // Do not verify stats parameters on a partitioned table.
                msdb.alterTable(catName, dbname, name, newt, null);
                // alterPartition is only for changing the partition location in the table rename
                if (dataWasMoved) {
                    int partsToProcess = parts.size();
                    int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
                    int batchStart = 0;
                    while (partsToProcess > 0) {
                        int batchEnd = Math.min(batchStart + partitionBatchSize, parts.size());
                        List<Partition> partBatch = parts.subList(batchStart, batchEnd);
                        int partBatchSize = partBatch.size();
                        partsToProcess -= partBatchSize;
                        batchStart += partBatchSize;
                        List<List<String>> partValues = new ArrayList<>(partBatchSize);
                        for (Partition part : partBatch) {
                            partValues.add(part.getValues());
                        }
                        msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch, newt.getWriteId(), writeIdList);
                    }
                }
                Deadline.checkTimeout();
                for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entries()) {
                    ColumnStatistics newPartColStats = partColStats.getValue();
                    newPartColStats.getStatsDesc().setDbName(newDbName);
                    newPartColStats.getStatsDesc().setTableName(newTblName);
                    msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(), writeIdList, newt.getWriteId());
                }
            } else {
                alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext, writeIdList, conf, null);
            }
        } else {
            // operations other than table rename
            if (MetaStoreServerUtils.requireCalStats(null, null, newt, environmentContext) && !isPartitionedTable) {
                Database db = msdb.getDatabase(catName, newDbName);
                assert (isReplicated == HMSHandler.isDbReplicationTarget(db));
                // Update table stats. For partitioned table, we update stats in alterPartition()
                MetaStoreServerUtils.updateTableStatsSlow(db, newt, wh, false, true, environmentContext);
            }
            if (isPartitionedTable) {
                // Currently only column related changes can be cascaded in alter table
                boolean runPartitionMetadataUpdate = (cascade && !MetaStoreServerUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols()));
                // we may skip the update entirely if there are only new columns added
                runPartitionMetadataUpdate |= !cascade && !MetaStoreServerUtils.arePrefixColumns(oldt.getSd().getCols(), newt.getSd().getCols());
                boolean retainOnColRemoval = MetastoreConf.getBoolVar(handler.getConf(), MetastoreConf.ConfVars.COLSTATS_RETAIN_ON_COLUMN_REMOVAL);
                if (runPartitionMetadataUpdate) {
                    if (cascade || retainOnColRemoval) {
                        parts = msdb.getPartitions(catName, dbname, name, -1);
                        for (Partition part : parts) {
                            Partition oldPart = new Partition(part);
                            List<FieldSchema> oldCols = part.getSd().getCols();
                            part.getSd().setCols(newt.getSd().getCols());
                            List<ColumnStatistics> colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), oldCols, oldt, part, null, null);
                            assert (colStats.isEmpty());
                            Deadline.checkTimeout();
                            if (cascade) {
                                msdb.alterPartition(catName, dbname, name, part.getValues(), part, writeIdList);
                            } else {
                                // update changed properties (stats)
                                oldPart.setParameters(part.getParameters());
                                msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, writeIdList);
                            }
                        }
                    } else {
                        // clear all column stats to prevent incorract behaviour in case same column is reintroduced
                        TableName tableName = new TableName(catName, dbname, name);
                        msdb.deleteAllPartitionColumnStatistics(tableName, writeIdList);
                    }
                    // Don't validate table-level stats for a partitoned table.
                    msdb.alterTable(catName, dbname, name, newt, null);
                } else {
                    LOG.warn("Alter table not cascaded to partitions.");
                    alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext, writeIdList, conf, null);
                }
            } else {
                alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext, writeIdList, conf, null);
            }
        }
        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
            txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_TABLE, new AlterTableEvent(oldt, newt, false, true, newt.getWriteId(), handler, isReplicated), environmentContext);
        }
        // commit the changes
        success = msdb.commitTransaction();
    } catch (InvalidObjectException e) {
        LOG.debug("Failed to get object from Metastore ", e);
        throw new InvalidOperationException("Unable to change partition or table." + " Check metastore logs for detailed stack." + e.getMessage());
    } catch (InvalidInputException e) {
        LOG.debug("Accessing Metastore failed due to invalid input ", e);
        throw new InvalidOperationException("Unable to change partition or table." + " Check metastore logs for detailed stack." + e.getMessage());
    } catch (NoSuchObjectException e) {
        LOG.debug("Object not found in metastore ", e);
        throw new InvalidOperationException("Unable to change partition or table. Object " + e.getMessage() + " does not exist." + " Check metastore logs for detailed stack.");
    } finally {
        if (success) {
            // If data location is changed in replication flow, then need to delete the old path.
            if (replDataLocationChanged) {
                assert (olddb != null);
                assert (oldt != null);
                Path deleteOldDataLoc = new Path(oldt.getSd().getLocation());
                boolean isSkipTrash = MetaStoreUtils.isSkipTrash(oldt.getParameters());
                try {
                    wh.deleteDir(deleteOldDataLoc, true, isSkipTrash, ReplChangeManager.shouldEnableCm(olddb, oldt));
                    LOG.info("Deleted the old data location: {} for the table: {}", deleteOldDataLoc, dbname + "." + name);
                } catch (MetaException ex) {
                    // Eat the exception as it doesn't affect the state of existing tables.
                    // Expect, user to manually drop this path when exception and so logging a warning.
                    LOG.warn("Unable to delete the old data location: {} for the table: {}", deleteOldDataLoc, dbname + "." + name);
                }
            }
        } else {
            LOG.error("Failed to alter table " + TableName.getQualified(catName, dbname, name));
            msdb.rollbackTransaction();
            if (!replDataLocationChanged && dataWasMoved) {
                try {
                    if (destFs.exists(destPath)) {
                        if (!destFs.rename(destPath, srcPath)) {
                            LOG.error("Failed to restore data from " + destPath + " to " + srcPath + " in alter table failure. Manual restore is needed.");
                        }
                    }
                } catch (IOException e) {
                    LOG.error("Failed to restore data from " + destPath + " to " + srcPath + " in alter table failure. Manual restore is needed.");
                }
            }
        }
    }
    if (!listeners.isEmpty()) {
        // I don't think event notifications in case of failures are necessary, but other HMS operations
        // make this call whether the event failed or succeeded. To make this behavior consistent,
        // this call is made for failed events also.
        MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE, new AlterTableEvent(oldt, newt, false, success, newt.getWriteId(), handler, isReplicated), environmentContext, txnAlterTableEventResponses, msdb);
    }
}
Also used : FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) URI(java.net.URI) AlterTableEvent(org.apache.hadoop.hive.metastore.events.AlterTableEvent) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) Database(org.apache.hadoop.hive.metastore.api.Database) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Path(org.apache.hadoop.fs.Path) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) Partition(org.apache.hadoop.hive.metastore.api.Partition) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) Table(org.apache.hadoop.hive.metastore.api.Table) IOException(java.io.IOException) TableName(org.apache.hadoop.hive.common.TableName) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 39 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class NonCatCallsWithCatalog method tablesCreateDropAlterTruncate.

@Test
public void tablesCreateDropAlterTruncate() throws TException, URISyntaxException {
    String dbName = "db_in_other_catalog";
    // For this one don't specify a location to make sure it gets put in the catalog directory
    Database db = new DatabaseBuilder().setName(dbName).build(conf);
    db.unsetCatalogName();
    client.createDatabase(db);
    String[] tableNames = new String[4];
    for (int i = 0; i < tableNames.length; i++) {
        tableNames[i] = "table_in_other_catalog_" + i;
        TableBuilder builder = new TableBuilder().inDb(db).setTableName(tableNames[i]).addCol("col1_" + i, ColumnType.STRING_TYPE_NAME).addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
        // Make one have a non-standard location
        if (i == 0) {
            builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
        }
        // Make one partitioned
        if (i == 2) {
            builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
        }
        // Make one a materialized view
        /*
      // TODO HIVE-18991
      if (i == 3) {
        builder.setType(TableType.MATERIALIZED_VIEW.name())
            .setRewriteEnabled(true)
            .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]);
      }
      */
        Table t = builder.build(conf);
        t.unsetCatName();
        client.createTable(t);
    }
    // Add partitions for the partitioned table
    String[] partVals = new String[3];
    Table partitionedTable = client.getTable(dbName, tableNames[2]);
    for (int i = 0; i < partVals.length; i++) {
        partVals[i] = "part" + i;
        Partition p = new PartitionBuilder().inTable(partitionedTable).addValue(partVals[i]).build(conf);
        p.unsetCatName();
        client.add_partition(p);
    }
    // Get tables, make sure the locations are correct
    for (int i = 0; i < tableNames.length; i++) {
        Table t = client.getTable(dbName, tableNames[i]);
        Assert.assertEquals(expectedCatalog(), t.getCatName());
        String expectedLocation = (i < 1) ? new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() : new File(expectedBaseDir() + File.separatorChar + dbName + ".db", tableNames[i]).toURI().toString();
        Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
        File dir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(dir.exists() && dir.isDirectory());
    }
    // Make sure getting table in the wrong catalog does not work
    try {
        Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
        Assert.fail();
    } catch (NoSuchObjectException e) {
    // NOP
    }
    // test getAllTables
    Set<String> fetchedNames = new HashSet<>(client.getAllTables(dbName));
    Assert.assertEquals(tableNames.length, fetchedNames.size());
    for (String tableName : tableNames) {
        Assert.assertTrue(fetchedNames.contains(tableName));
    }
    fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
    for (String tableName : tableNames) {
        Assert.assertFalse(fetchedNames.contains(tableName));
    }
    // test getMaterializedViewsForRewriting
    /* TODO HIVE-18991
    List<String> materializedViews = client.getMaterializedViewsForRewriting(dbName);
    Assert.assertEquals(1, materializedViews.size());
    Assert.assertEquals(tableNames[3], materializedViews.get(0));
    */
    fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
    Assert.assertFalse(fetchedNames.contains(tableNames[3]));
    // test getTableObjectsByName
    List<Table> fetchedTables = client.getTableObjectsByName(dbName, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(2, fetchedTables.size());
    Collections.sort(fetchedTables);
    Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
    Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
    fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(0, fetchedTables.size());
    // Test altering the table
    Table t = client.getTable(dbName, tableNames[0]).deepCopy();
    t.getParameters().put("test", "test");
    client.alter_table(dbName, tableNames[0], t);
    t = client.getTable(dbName, tableNames[0]).deepCopy();
    Assert.assertEquals("test", t.getParameters().get("test"));
    // Alter a table in the wrong catalog
    try {
        client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
        Assert.fail();
    } catch (InvalidOperationException e) {
    // NOP
    }
    // Update the metadata for the materialized view
    /* TODO HIVE-18991
    CreationMetadata cm = client.getTable(dbName, tableNames[3]).getCreationMetadata();
    cm.addToTablesUsed(dbName + "." + tableNames[1]);
    client.updateCreationMetadata(dbName, tableNames[3], cm);
    */
    List<String> partNames = new ArrayList<>();
    for (String partVal : partVals) {
        partNames.add("pcol1=" + partVal);
    }
    // Truncate a table
    client.truncateTable(dbName, tableNames[0], partNames);
    // Have to do this in reverse order so that we drop the materialized view first.
    for (int i = tableNames.length - 1; i >= 0; i--) {
        t = client.getTable(dbName, tableNames[i]);
        File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
            client.dropTable(dbName, tableNames[i], false, false);
            Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        } else {
            client.dropTable(dbName, tableNames[i]);
            Assert.assertFalse(tableDir.exists());
        }
    }
    Assert.assertEquals(0, client.getAllTables(dbName).size());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) URI(java.net.URI) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 40 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class TestTablesCreateDropAlterTruncate method tablesInOtherCatalogs.

@Test
public void tablesInOtherCatalogs() throws TException, URISyntaxException {
    String catName = "create_etc_tables_in_other_catalogs";
    Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
    client.createCatalog(cat);
    String dbName = "db_in_other_catalog";
    // For this one don't specify a location to make sure it gets put in the catalog directory
    Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
    Table table = new TableBuilder().inDb(db).setTableName("mvSource").addCol("col1_1", ColumnType.STRING_TYPE_NAME).addCol("col2_2", ColumnType.INT_TYPE_NAME).build(metaStore.getConf());
    client.createTable(table);
    SourceTable sourceTable = createSourceTable(table);
    String[] tableNames = new String[4];
    for (int i = 0; i < tableNames.length; i++) {
        tableNames[i] = "table_in_other_catalog_" + i;
        TableBuilder builder = new TableBuilder().inDb(db).setTableName(tableNames[i]).addCol("col1_" + i, ColumnType.STRING_TYPE_NAME).addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
        // Make one have a non-standard location
        if (i == 0) {
            builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
        }
        // Make one partitioned
        if (i == 2) {
            builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
        }
        // Make one a materialized view
        if (i == 3) {
            builder.setType(TableType.MATERIALIZED_VIEW.name()).setRewriteEnabled(true).addMaterializedViewReferencedTable(sourceTable);
        }
        client.createTable(builder.build(metaStore.getConf()));
    }
    // Add partitions for the partitioned table
    String[] partVals = new String[3];
    Table partitionedTable = client.getTable(catName, dbName, tableNames[2]);
    for (int i = 0; i < partVals.length; i++) {
        partVals[i] = "part" + i;
        new PartitionBuilder().inTable(partitionedTable).addValue(partVals[i]).addToTable(client, metaStore.getConf());
    }
    // Get tables, make sure the locations are correct
    for (int i = 0; i < tableNames.length; i++) {
        Table t = client.getTable(catName, dbName, tableNames[i]);
        Assert.assertEquals(catName, t.getCatName());
        String expectedLocation = (i < 1) ? new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() : new File(cat.getLocationUri() + File.separatorChar + dbName + ".db", tableNames[i]).toURI().toString();
        Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
        File dir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(dir.exists() && dir.isDirectory());
    }
    // Make sure getting table in the wrong catalog does not work
    try {
        Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
        Assert.fail();
    } catch (NoSuchObjectException e) {
    // NOP
    }
    // test getAllTables
    Set<String> fetchedNames = new HashSet<>(client.getAllTables(catName, dbName));
    Assert.assertEquals(tableNames.length + 1, fetchedNames.size());
    for (String tableName : tableNames) {
        Assert.assertTrue(fetchedNames.contains(tableName));
    }
    fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
    for (String tableName : tableNames) {
        Assert.assertFalse(fetchedNames.contains(tableName));
    }
    // test getMaterializedViewsForRewriting
    List<String> materializedViews = client.getMaterializedViewsForRewriting(catName, dbName);
    Assert.assertEquals(1, materializedViews.size());
    Assert.assertEquals(tableNames[3], materializedViews.get(0));
    fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
    Assert.assertFalse(fetchedNames.contains(tableNames[3]));
    // test getTableObjectsByName
    List<Table> fetchedTables = client.getTableObjectsByName(catName, dbName, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(2, fetchedTables.size());
    Collections.sort(fetchedTables);
    Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
    Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
    fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(0, fetchedTables.size());
    // Test altering the table
    Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
    t.getParameters().put("test", "test");
    client.alter_table(catName, dbName, tableNames[0], t);
    t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
    Assert.assertEquals("test", t.getParameters().get("test"));
    // Alter a table in the wrong catalog
    try {
        client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
        Assert.fail();
    } catch (InvalidOperationException e) {
    // NOP
    }
    // Update the metadata for the materialized view
    CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata();
    Table table1 = new TableBuilder().inDb(db).setTableName("mvSource2").addCol("col1_1", ColumnType.STRING_TYPE_NAME).addCol("col2_2", ColumnType.INT_TYPE_NAME).build(metaStore.getConf());
    client.createTable(table1);
    sourceTable = createSourceTable(table1);
    cm.addToTablesUsed(TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName()));
    cm.addToSourceTables(sourceTable);
    cm.unsetMaterializationTime();
    client.updateCreationMetadata(catName, dbName, tableNames[3], cm);
    List<String> partNames = new ArrayList<>();
    for (String partVal : partVals) {
        partNames.add("pcol1=" + partVal);
    }
    // Truncate a table
    client.truncateTable(catName, dbName, tableNames[0], partNames);
    // Truncate a table in the wrong catalog
    try {
        client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames);
        Assert.fail();
    } catch (NoSuchObjectException | TApplicationException e) {
    // NOP
    }
    // Drop a table from the wrong catalog
    try {
        client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false);
        Assert.fail();
    } catch (NoSuchObjectException | TApplicationException e) {
    // NOP
    }
    // Should ignore the failure
    client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true);
    // Have to do this in reverse order so that we drop the materialized view first.
    for (int i = tableNames.length - 1; i >= 0; i--) {
        t = client.getTable(catName, dbName, tableNames[i]);
        File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
            client.dropTable(catName, dbName, tableNames[i], false, false);
            Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        } else {
            client.dropTable(catName, dbName, tableNames[i]);
            Assert.assertFalse(tableDir.exists());
        }
    }
    client.dropTable(table.getCatName(), table.getDbName(), table.getTableName());
    client.dropTable(table1.getCatName(), table1.getDbName(), table1.getTableName());
    Assert.assertEquals(0, client.getAllTables(catName, dbName).size());
}
Also used : SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.metastore.api.Table) TestHiveMetaStore.createSourceTable(org.apache.hadoop.hive.metastore.TestHiveMetaStore.createSourceTable) ArrayList(java.util.ArrayList) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) TestHiveMetaStore.createSourceTable(org.apache.hadoop.hive.metastore.TestHiveMetaStore.createSourceTable) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) URI(java.net.URI) Catalog(org.apache.hadoop.hive.metastore.api.Catalog) TApplicationException(org.apache.thrift.TApplicationException) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) CreationMetadata(org.apache.hadoop.hive.metastore.api.CreationMetadata) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) CatalogBuilder(org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Aggregations

InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)51 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)26 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)23 IOException (java.io.IOException)19 ArrayList (java.util.ArrayList)18 Table (org.apache.hadoop.hive.metastore.api.Table)17 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)16 TException (org.apache.thrift.TException)15 Partition (org.apache.hadoop.hive.metastore.api.Partition)14 FileSystem (org.apache.hadoop.fs.FileSystem)12 Path (org.apache.hadoop.fs.Path)12 List (java.util.List)10 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)10 InvalidInputException (org.apache.hadoop.hive.metastore.api.InvalidInputException)10 MWMResourcePlan (org.apache.hadoop.hive.metastore.model.MWMResourcePlan)9 SQLException (java.sql.SQLException)8 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)8 Test (org.junit.Test)8 LinkedList (java.util.LinkedList)7 Database (org.apache.hadoop.hive.metastore.api.Database)7