Search in sources :

Example 1 with AlreadyExistsException

use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project storm by apache.

the class HiveSetupUtil method createDbAndTable.

public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals, String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
    IMetaStoreClient client = new HiveMetaStoreClient(conf);
    try {
        Database db = new Database();
        db.setName(databaseName);
        db.setLocationUri(dbLocation);
        client.createDatabase(db);
        Table tbl = new Table();
        tbl.setDbName(databaseName);
        tbl.setTableName(tableName);
        tbl.setTableType(TableType.MANAGED_TABLE.toString());
        StorageDescriptor sd = new StorageDescriptor();
        sd.setCols(getTableColumns(colNames, colTypes));
        sd.setNumBuckets(1);
        sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
        if (partNames != null && partNames.length != 0) {
            tbl.setPartitionKeys(getPartitionKeys(partNames));
        }
        tbl.setSd(sd);
        sd.setBucketCols(new ArrayList<String>(2));
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
        sd.setInputFormat(OrcInputFormat.class.getName());
        sd.setOutputFormat(OrcOutputFormat.class.getName());
        Map<String, String> tableParams = new HashMap<String, String>();
        tbl.setParameters(tableParams);
        client.createTable(tbl);
        try {
            if (partVals != null && partVals.size() > 0) {
                addPartition(client, tbl, partVals);
            }
        } catch (AlreadyExistsException e) {
        }
    } finally {
        client.close();
    }
}
Also used : HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) OrcOutputFormat(org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) Database(org.apache.hadoop.hive.metastore.api.Database)

Example 2 with AlreadyExistsException

use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.

the class Hive method loadPartition.

/**
   * Load a directory into a Hive Table Partition - Alters existing content of
   * the partition with the contents of loadPath. - If the partition does not
   * exist - one is created - files in loadPath are moved into Hive. But the
   * directory itself is not removed.
   *
   * @param loadPath
   *          Directory containing files to load into Table
   * @param  tbl
   *          name of table to be loaded.
   * @param partSpec
   *          defines which partition needs to be loaded
   * @param replace
   *          if true - replace files in the partition, otherwise add files to
   *          the partition
   * @param inheritTableSpecs if true, on [re]creating the partition, take the
   *          location/inputformat/outputformat/serde details from table spec
   * @param isSrcLocal
   *          If the source directory is LOCAL
   * @param isAcid true if this is an ACID operation
   */
public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
    Path tblDataLocationPath = tbl.getDataLocation();
    try {
        Partition oldPart = getPartition(tbl, partSpec, false);
        /**
       * Move files before creating the partition since down stream processes
       * check for existence of partition in metadata before accessing the data.
       * If partition is created before data is moved, downstream waiting
       * processes might move forward with partial data
       */
        Path oldPartPath = (oldPart != null) ? oldPart.getDataLocation() : null;
        Path newPartPath = null;
        if (inheritTableSpecs) {
            Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec));
            newPartPath = new Path(tblDataLocationPath.toUri().getScheme(), tblDataLocationPath.toUri().getAuthority(), partPath.toUri().getPath());
            if (oldPart != null) {
                /*
           * If we are moving the partition across filesystem boundaries
           * inherit from the table properties. Otherwise (same filesystem) use the
           * original partition location.
           *
           * See: HIVE-1707 and HIVE-2117 for background
           */
                FileSystem oldPartPathFS = oldPartPath.getFileSystem(getConf());
                FileSystem loadPathFS = loadPath.getFileSystem(getConf());
                if (FileUtils.equalsFileSystem(oldPartPathFS, loadPathFS)) {
                    newPartPath = oldPartPath;
                }
            }
        } else {
            newPartPath = oldPartPath;
        }
        List<Path> newFiles = null;
        PerfLogger perfLogger = SessionState.getPerfLogger();
        perfLogger.PerfLogBegin("MoveTask", "FileMoves");
        if (replace || (oldPart == null && !isAcid)) {
            replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), isSrcLocal);
        } else {
            if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null) {
                newFiles = Collections.synchronizedList(new ArrayList<Path>());
            }
            FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
            Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles);
        }
        perfLogger.PerfLogEnd("MoveTask", "FileMoves");
        Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
        alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString());
        validatePartition(newTPart);
        if ((null != newFiles) || replace) {
            fireInsertEvent(tbl, partSpec, newFiles);
        } else {
            LOG.debug("No new files were created, and is not a replace. Skipping generating INSERT event.");
        }
        //column stats will be inaccurate
        StatsSetupConst.clearColumnStatsState(newTPart.getParameters());
        // recreate the partition if it existed before
        if (isSkewedStoreAsSubdir) {
            org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition();
            SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo();
            /* Construct list bucketing location mappings from sub-directory name. */
            Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(newPartPath, skewedInfo);
            /* Add list bucketing location mappings. */
            skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
            newCreatedTpart.getSd().setSkewedInfo(skewedInfo);
        }
        if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
            StatsSetupConst.setBasicStatsState(newTPart.getParameters(), StatsSetupConst.FALSE);
        }
        if (oldPart == null) {
            newTPart.getTPartition().setParameters(new HashMap<String, String>());
            if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                StatsSetupConst.setBasicStatsStateForCreateTable(newTPart.getParameters(), StatsSetupConst.TRUE);
            }
            MetaStoreUtils.populateQuickStats(HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)), newTPart.getParameters());
            try {
                LOG.debug("Adding new partition " + newTPart.getSpec());
                getSychronizedMSC().add_partition(newTPart.getTPartition());
            } catch (AlreadyExistsException aee) {
                // With multiple users concurrently issuing insert statements on the same partition has
                // a side effect that some queries may not see a partition at the time when they're issued,
                // but will realize the partition is actually there when it is trying to add such partition
                // to the metastore and thus get AlreadyExistsException, because some earlier query just created it (race condition).
                // For example, imagine such a table is created:
                //  create table T (name char(50)) partitioned by (ds string);
                // and the following two queries are launched at the same time, from different sessions:
                //  insert into table T partition (ds) values ('Bob', 'today'); -- creates the partition 'today'
                //  insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException
                // In that case, we want to retry with alterPartition.
                LOG.debug("Caught AlreadyExistsException, trying to alter partition instead");
                setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
            }
        } else {
            setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
        }
        return newTPart;
    } catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (MetaException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (InvalidOperationException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (TException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TException(org.apache.thrift.TException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) ArrayList(java.util.ArrayList) IOException(java.io.IOException) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException)

Example 3 with AlreadyExistsException

use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.

the class Hive method getPartition.

/**
   * Returns partition metadata
   *
   * @param tbl
   *          the partition's table
   * @param partSpec
   *          partition keys and values
   * @param forceCreate
   *          if this is true and partition doesn't exist then a partition is
   *          created
   * @param partPath the path where the partition data is located
   * @param inheritTableSpecs whether to copy over the table specs for if/of/serde
   * @param newFiles An optional list of new files that were moved into this partition.  If
   *                 non-null these will be included in the DML event sent to the metastore.
   * @return result partition object or null if there is no partition
   * @throws HiveException
   */
public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate, String partPath, boolean inheritTableSpecs, List<Path> newFiles) throws HiveException {
    tbl.validatePartColumnNames(partSpec, true);
    List<String> pvals = new ArrayList<String>();
    for (FieldSchema field : tbl.getPartCols()) {
        String val = partSpec.get(field.getName());
        // enable dynamic partitioning
        if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) || (val != null && val.length() == 0)) {
            throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty");
        } else if (val != null) {
            pvals.add(val);
        }
    }
    org.apache.hadoop.hive.metastore.api.Partition tpart = null;
    try {
        tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
    } catch (NoSuchObjectException nsoe) {
        // this means no partition exists for the given partition
        // key value pairs - thrift cannot handle null return values, hence
        // getPartition() throws NoSuchObjectException to indicate null partition
        tpart = null;
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
    try {
        if (forceCreate) {
            if (tpart == null) {
                LOG.debug("creating partition for table " + tbl.getTableName() + " with partition spec : " + partSpec);
                try {
                    tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
                } catch (AlreadyExistsException aee) {
                    LOG.debug("Caught already exists exception, trying to alter partition instead");
                    tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
                    alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
                } catch (Exception e) {
                    if (CheckJDOException.isJDODataStoreException(e)) {
                        // Using utility method above, so that JDODataStoreException doesn't
                        // have to be used here. This helps avoid adding jdo dependency for
                        // hcatalog client uses
                        LOG.debug("Caught JDO exception, trying to alter partition instead");
                        tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
                        if (tpart == null) {
                            // in creating the partition, since the partition still doesn't exist.
                            throw e;
                        }
                        alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
                    } else {
                        throw e;
                    }
                }
            } else {
                alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
                fireInsertEvent(tbl, partSpec, newFiles);
            }
        }
        if (tpart == null) {
            return null;
        }
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
    return new Partition(tbl, tpart);
}
Also used : AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 4 with AlreadyExistsException

use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.

the class HiveAlterHandler method alterPartition.

@Override
public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, final String name, final List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext, HMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
    boolean success = false;
    Path srcPath = null;
    Path destPath = null;
    FileSystem srcFs = null;
    FileSystem destFs;
    Partition oldPart = null;
    String oldPartLoc = null;
    String newPartLoc = null;
    List<MetaStoreEventListener> transactionalListeners = null;
    if (handler != null) {
        transactionalListeners = handler.getTransactionalListeners();
    }
    // Set DDL time to now if not specified
    if (new_part.getParameters() == null || new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
        new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
    }
    Table tbl = msdb.getTable(dbname, name);
    if (tbl == null) {
        throw new InvalidObjectException("Unable to alter partition because table or database does not exist.");
    }
    //alter partition
    if (part_vals == null || part_vals.size() == 0) {
        try {
            msdb.openTransaction();
            oldPart = msdb.getPartition(dbname, name, new_part.getValues());
            if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) {
                // if stats are same, no need to update
                if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
                    MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
                } else {
                    MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
                }
            }
            updatePartColumnStats(msdb, dbname, name, new_part.getValues(), new_part);
            msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
            if (transactionalListeners != null && transactionalListeners.size() > 0) {
                AlterPartitionEvent alterPartitionEvent = new AlterPartitionEvent(oldPart, new_part, tbl, true, handler);
                alterPartitionEvent.setEnvironmentContext(environmentContext);
                for (MetaStoreEventListener transactionalListener : transactionalListeners) {
                    transactionalListener.onAlterPartition(alterPartitionEvent);
                }
            }
            success = msdb.commitTransaction();
        } catch (InvalidObjectException e) {
            throw new InvalidOperationException("alter is not possible");
        } catch (NoSuchObjectException e) {
            //old partition does not exist
            throw new InvalidOperationException("alter is not possible");
        } finally {
            if (!success) {
                msdb.rollbackTransaction();
            }
        }
        return oldPart;
    }
    //rename partition
    try {
        msdb.openTransaction();
        try {
            oldPart = msdb.getPartition(dbname, name, part_vals);
        } catch (NoSuchObjectException e) {
            // this means there is no existing partition
            throw new InvalidObjectException("Unable to rename partition because old partition does not exist");
        }
        Partition check_part;
        try {
            check_part = msdb.getPartition(dbname, name, new_part.getValues());
        } catch (NoSuchObjectException e) {
            // this means there is no existing partition
            check_part = null;
        }
        if (check_part != null) {
            throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + new_part.getValues());
        }
        // if the external partition is renamed, the file should not change
        if (tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
            new_part.getSd().setLocation(oldPart.getSd().getLocation());
            String oldPartName = Warehouse.makePartName(tbl.getPartitionKeys(), oldPart.getValues());
            try {
                //existing partition column stats is no longer valid, remove
                msdb.deletePartitionColumnStatistics(dbname, name, oldPartName, oldPart.getValues(), null);
            } catch (NoSuchObjectException nsoe) {
            //ignore
            } catch (InvalidInputException iie) {
                throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
            }
            msdb.alterPartition(dbname, name, part_vals, new_part);
        } else {
            try {
                destPath = new Path(wh.getTablePath(msdb.getDatabase(dbname), name), Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()));
                destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
            } catch (NoSuchObjectException e) {
                LOG.debug("Didn't find object in metastore ", e);
                throw new InvalidOperationException("Unable to change partition or table. Database " + dbname + " does not exist" + " Check metastore logs for detailed stack." + e.getMessage());
            }
            if (destPath != null) {
                newPartLoc = destPath.toString();
                oldPartLoc = oldPart.getSd().getLocation();
                LOG.info("srcPath:" + oldPartLoc);
                LOG.info("descPath:" + newPartLoc);
                srcPath = new Path(oldPartLoc);
                srcFs = wh.getFs(srcPath);
                destFs = wh.getFs(destPath);
                // check that src and dest are on the same file system
                if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
                    throw new InvalidOperationException("New table location " + destPath + " is on a different file system than the old location " + srcPath + ". This operation is not supported.");
                }
                try {
                    srcFs.exists(srcPath);
                    if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
                        throw new InvalidOperationException("New location for this table " + tbl.getDbName() + "." + tbl.getTableName() + " already exists : " + destPath);
                    }
                } catch (IOException e) {
                    throw new InvalidOperationException("Unable to access new location " + destPath + " for partition " + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
                }
                new_part.getSd().setLocation(newPartLoc);
                if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) {
                    MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
                }
                String oldPartName = Warehouse.makePartName(tbl.getPartitionKeys(), oldPart.getValues());
                try {
                    //existing partition column stats is no longer valid, remove
                    msdb.deletePartitionColumnStatistics(dbname, name, oldPartName, oldPart.getValues(), null);
                } catch (NoSuchObjectException nsoe) {
                //ignore
                } catch (InvalidInputException iie) {
                    throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
                }
                msdb.alterPartition(dbname, name, part_vals, new_part);
            }
        }
        if (transactionalListeners != null && transactionalListeners.size() > 0) {
            AlterPartitionEvent alterPartitionEvent = new AlterPartitionEvent(oldPart, new_part, tbl, true, handler);
            alterPartitionEvent.setEnvironmentContext(environmentContext);
            for (MetaStoreEventListener transactionalListener : transactionalListeners) {
                transactionalListener.onAlterPartition(alterPartitionEvent);
            }
        }
        success = msdb.commitTransaction();
    } finally {
        if (!success) {
            msdb.rollbackTransaction();
        }
        if (success && newPartLoc != null && newPartLoc.compareTo(oldPartLoc) != 0) {
            //rename the data directory
            try {
                if (srcFs.exists(srcPath)) {
                    //if destPath's parent path doesn't exist, we should mkdir it
                    Path destParentPath = destPath.getParent();
                    if (!wh.mkdirs(destParentPath, true)) {
                        throw new IOException("Unable to create path " + destParentPath);
                    }
                    wh.renameDir(srcPath, destPath, true);
                    LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
                }
            } catch (IOException ex) {
                LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, ex);
                boolean revertMetaDataTransaction = false;
                try {
                    msdb.openTransaction();
                    msdb.alterPartition(dbname, name, new_part.getValues(), oldPart);
                    if (transactionalListeners != null && transactionalListeners.size() > 0) {
                        AlterPartitionEvent alterPartitionEvent = new AlterPartitionEvent(new_part, oldPart, tbl, true, handler);
                        alterPartitionEvent.setEnvironmentContext(environmentContext);
                        for (MetaStoreEventListener transactionalListener : transactionalListeners) {
                            transactionalListener.onAlterPartition(alterPartitionEvent);
                        }
                    }
                    revertMetaDataTransaction = msdb.commitTransaction();
                } catch (Exception ex2) {
                    LOG.error("Attempt to revert partition metadata change failed. The revert was attempted " + "because associated filesystem rename operation failed with exception " + ex.getMessage(), ex2);
                    if (!revertMetaDataTransaction) {
                        msdb.rollbackTransaction();
                    }
                }
                throw new InvalidOperationException("Unable to access old location " + srcPath + " for partition " + tbl.getDbName() + "." + tbl.getTableName() + " " + part_vals);
            }
        }
    }
    return oldPart;
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) AlterPartitionEvent(org.apache.hadoop.hive.metastore.events.AlterPartitionEvent) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) IOException(java.io.IOException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) RemoteException(org.apache.hadoop.ipc.RemoteException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 5 with AlreadyExistsException

use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project metacat by Netflix.

the class HiveConnectorTableService method create.

/**
     * Create a table.
     *
     * @param requestContext The request context
     * @param tableInfo      The resource metadata
     */
@Override
public void create(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final TableInfo tableInfo) {
    final QualifiedName tableName = tableInfo.getName();
    try {
        final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
        updateTable(requestContext, table, tableInfo);
        metacatHiveClient.createTable(table);
    } catch (AlreadyExistsException exception) {
        throw new TableAlreadyExistsException(tableName, exception);
    } catch (MetaException exception) {
        throw new InvalidMetaException(tableName, exception);
    } catch (NoSuchObjectException | InvalidObjectException exception) {
        throw new DatabaseNotFoundException(QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()), exception);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
    }
}
Also used : TException(org.apache.thrift.TException) TableAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) TableAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException) QualifiedName(com.netflix.metacat.common.QualifiedName) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Aggregations

AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)17 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)14 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)13 TException (org.apache.thrift.TException)13 IOException (java.io.IOException)8 Table (org.apache.hadoop.hive.metastore.api.Table)8 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)7 ArrayList (java.util.ArrayList)5 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)5 Partition (org.apache.hadoop.hive.metastore.api.Partition)5 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)4 ExecutionException (java.util.concurrent.ExecutionException)3 Database (org.apache.hadoop.hive.metastore.api.Database)3 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)3 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)3 QualifiedName (com.netflix.metacat.common.QualifiedName)2 ConnectorException (com.netflix.metacat.common.server.connectors.exception.ConnectorException)2 InvalidMetaException (com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2