Search in sources :

Example 26 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class DDLTask method partitionInCustomLocation.

/**
   * Checks in partition is in custom (not-standard) location.
   * @param tbl - table in which partition is
   * @param p - partition
   * @return true if partition location is custom, false if it is standard
   */
boolean partitionInCustomLocation(Table tbl, Partition p) throws HiveException {
    String subdir = null;
    try {
        subdir = Warehouse.makePartName(tbl.getPartCols(), p.getValues());
    } catch (MetaException e) {
        throw new HiveException("Unable to get partition's directory", e);
    }
    Path tableDir = tbl.getDataLocation();
    if (tableDir == null) {
        throw new HiveException("Table has no location set");
    }
    String standardLocation = (new Path(tableDir, subdir)).toString();
    if (ArchiveUtils.isArchived(p)) {
        return !getOriginalLocation(p).equals(standardLocation);
    } else {
        return !p.getLocation().equals(standardLocation);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 27 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class Hive method loadPartition.

/**
   * Load a directory into a Hive Table Partition - Alters existing content of
   * the partition with the contents of loadPath. - If the partition does not
   * exist - one is created - files in loadPath are moved into Hive. But the
   * directory itself is not removed.
   *
   * @param loadPath
   *          Directory containing files to load into Table
   * @param  tbl
   *          name of table to be loaded.
   * @param partSpec
   *          defines which partition needs to be loaded
   * @param replace
   *          if true - replace files in the partition, otherwise add files to
   *          the partition
   * @param inheritTableSpecs if true, on [re]creating the partition, take the
   *          location/inputformat/outputformat/serde details from table spec
   * @param isSrcLocal
   *          If the source directory is LOCAL
   * @param isAcid true if this is an ACID operation
   */
public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
    Path tblDataLocationPath = tbl.getDataLocation();
    try {
        Partition oldPart = getPartition(tbl, partSpec, false);
        /**
       * Move files before creating the partition since down stream processes
       * check for existence of partition in metadata before accessing the data.
       * If partition is created before data is moved, downstream waiting
       * processes might move forward with partial data
       */
        Path oldPartPath = (oldPart != null) ? oldPart.getDataLocation() : null;
        Path newPartPath = null;
        if (inheritTableSpecs) {
            Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec));
            newPartPath = new Path(tblDataLocationPath.toUri().getScheme(), tblDataLocationPath.toUri().getAuthority(), partPath.toUri().getPath());
            if (oldPart != null) {
                /*
           * If we are moving the partition across filesystem boundaries
           * inherit from the table properties. Otherwise (same filesystem) use the
           * original partition location.
           *
           * See: HIVE-1707 and HIVE-2117 for background
           */
                FileSystem oldPartPathFS = oldPartPath.getFileSystem(getConf());
                FileSystem loadPathFS = loadPath.getFileSystem(getConf());
                if (FileUtils.equalsFileSystem(oldPartPathFS, loadPathFS)) {
                    newPartPath = oldPartPath;
                }
            }
        } else {
            newPartPath = oldPartPath;
        }
        List<Path> newFiles = null;
        PerfLogger perfLogger = SessionState.getPerfLogger();
        perfLogger.PerfLogBegin("MoveTask", "FileMoves");
        if (replace || (oldPart == null && !isAcid)) {
            replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), isSrcLocal);
        } else {
            if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null) {
                newFiles = Collections.synchronizedList(new ArrayList<Path>());
            }
            FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
            Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles);
        }
        perfLogger.PerfLogEnd("MoveTask", "FileMoves");
        Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
        alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString());
        validatePartition(newTPart);
        if ((null != newFiles) || replace) {
            fireInsertEvent(tbl, partSpec, newFiles);
        } else {
            LOG.debug("No new files were created, and is not a replace. Skipping generating INSERT event.");
        }
        //column stats will be inaccurate
        StatsSetupConst.clearColumnStatsState(newTPart.getParameters());
        // recreate the partition if it existed before
        if (isSkewedStoreAsSubdir) {
            org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition();
            SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo();
            /* Construct list bucketing location mappings from sub-directory name. */
            Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(newPartPath, skewedInfo);
            /* Add list bucketing location mappings. */
            skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
            newCreatedTpart.getSd().setSkewedInfo(skewedInfo);
        }
        if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
            StatsSetupConst.setBasicStatsState(newTPart.getParameters(), StatsSetupConst.FALSE);
        }
        if (oldPart == null) {
            newTPart.getTPartition().setParameters(new HashMap<String, String>());
            if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                StatsSetupConst.setBasicStatsStateForCreateTable(newTPart.getParameters(), StatsSetupConst.TRUE);
            }
            MetaStoreUtils.populateQuickStats(HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)), newTPart.getParameters());
            try {
                LOG.debug("Adding new partition " + newTPart.getSpec());
                getSychronizedMSC().add_partition(newTPart.getTPartition());
            } catch (AlreadyExistsException aee) {
                // With multiple users concurrently issuing insert statements on the same partition has
                // a side effect that some queries may not see a partition at the time when they're issued,
                // but will realize the partition is actually there when it is trying to add such partition
                // to the metastore and thus get AlreadyExistsException, because some earlier query just created it (race condition).
                // For example, imagine such a table is created:
                //  create table T (name char(50)) partitioned by (ds string);
                // and the following two queries are launched at the same time, from different sessions:
                //  insert into table T partition (ds) values ('Bob', 'today'); -- creates the partition 'today'
                //  insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException
                // In that case, we want to retry with alterPartition.
                LOG.debug("Caught AlreadyExistsException, trying to alter partition instead");
                setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
            }
        } else {
            setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
        }
        return newTPart;
    } catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (MetaException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (InvalidOperationException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (TException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TException(org.apache.thrift.TException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) ArrayList(java.util.ArrayList) IOException(java.io.IOException) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException)

Example 28 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class Hive method renamePartition.

/**
   * Rename a old partition to new partition
   *
   * @param tbl
   *          existing table
   * @param oldPartSpec
   *          spec of old partition
   * @param newPart
   *          new partition
   * @throws InvalidOperationException
   *           if the changes in metadata is not acceptable
   * @throws TException
   */
public void renamePartition(Table tbl, Map<String, String> oldPartSpec, Partition newPart) throws HiveException {
    try {
        Map<String, String> newPartSpec = newPart.getSpec();
        if (oldPartSpec.keySet().size() != tbl.getPartCols().size() || newPartSpec.keySet().size() != tbl.getPartCols().size()) {
            throw new HiveException("Unable to rename partition to the same name: number of partition cols don't match. ");
        }
        if (!oldPartSpec.keySet().equals(newPartSpec.keySet())) {
            throw new HiveException("Unable to rename partition to the same name: old and new partition cols don't match. ");
        }
        List<String> pvals = new ArrayList<String>();
        for (FieldSchema field : tbl.getPartCols()) {
            String val = oldPartSpec.get(field.getName());
            if (val == null || val.length() == 0) {
                throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty");
            } else if (val != null) {
                pvals.add(val);
            }
        }
        getMSC().renamePartition(tbl.getDbName(), tbl.getTableName(), pvals, newPart.getTPartition());
    } catch (InvalidOperationException e) {
        throw new HiveException("Unable to rename partition. " + e.getMessage(), e);
    } catch (MetaException e) {
        throw new HiveException("Unable to rename partition. " + e.getMessage(), e);
    } catch (TException e) {
        throw new HiveException("Unable to rename partition. " + e.getMessage(), e);
    }
}
Also used : TException(org.apache.thrift.TException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException)

Example 29 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class GenMapRedUtils method getInputPathsForPartialScan.

public static List<Path> getInputPathsForPartialScan(TableScanOperator tableScanOp, Appendable aggregationKey) throws SemanticException {
    List<Path> inputPaths = new ArrayList<Path>();
    switch(tableScanOp.getConf().getTableMetadata().getTableSpec().specType) {
        case TABLE_ONLY:
            inputPaths.add(tableScanOp.getConf().getTableMetadata().getTableSpec().tableHandle.getPath());
            break;
        case STATIC_PARTITION:
            Partition part = tableScanOp.getConf().getTableMetadata().getTableSpec().partHandle;
            try {
                aggregationKey.append(Warehouse.makePartPath(part.getSpec()));
            } catch (MetaException e) {
                throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_AGGKEY.getMsg(part.getDataLocation().toString() + e.getMessage()));
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            inputPaths.add(part.getDataLocation());
            break;
        default:
            assert false;
    }
    return inputPaths;
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) ArrayList(java.util.ArrayList) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 30 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class MetaDataExportListener method export_meta_data.

/** Export the metadata to a given path, and then move it to the user's trash */
private void export_meta_data(PreDropTableEvent tableEvent) throws MetaException {
    FileSystem fs = null;
    Table tbl = tableEvent.getTable();
    String name = tbl.getTableName();
    org.apache.hadoop.hive.ql.metadata.Table mTbl = new org.apache.hadoop.hive.ql.metadata.Table(tbl);
    HMSHandler handler = tableEvent.getHandler();
    HiveConf hiveconf = handler.getHiveConf();
    Warehouse wh = new Warehouse(hiveconf);
    Path tblPath = new Path(tbl.getSd().getLocation());
    fs = wh.getFs(tblPath);
    Date now = new Date();
    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
    String dateString = sdf.format(now);
    String exportPathString = hiveconf.getVar(HiveConf.ConfVars.METADATA_EXPORT_LOCATION);
    boolean moveMetadataToTrash = hiveconf.getBoolVar(HiveConf.ConfVars.MOVE_EXPORTED_METADATA_TO_TRASH);
    Path exportPath = null;
    if (exportPathString != null && exportPathString.length() == 0) {
        exportPath = fs.getHomeDirectory();
    } else {
        exportPath = new Path(exportPathString);
    }
    Path metaPath = new Path(exportPath, name + "." + dateString);
    LOG.info("Exporting the metadata of table " + tbl.toString() + " to path " + metaPath.toString());
    try {
        fs.mkdirs(metaPath);
    } catch (IOException e) {
        throw new MetaException(e.getMessage());
    }
    Path outFile = new Path(metaPath, name + EximUtil.METADATA_NAME);
    try {
        SessionState.getConsole().printInfo("Beginning metadata export");
        EximUtil.createExportDump(fs, outFile, mTbl, null, null);
        if (moveMetadataToTrash == true) {
            wh.deleteDir(metaPath, true);
        }
    } catch (IOException e) {
        throw new MetaException(e.getMessage());
    } catch (SemanticException e) {
        throw new MetaException(e.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) Table(org.apache.hadoop.hive.metastore.api.Table) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) HiveConf(org.apache.hadoop.hive.conf.HiveConf) SimpleDateFormat(java.text.SimpleDateFormat) HMSHandler(org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Aggregations

MetaException (org.apache.hadoop.hive.metastore.api.MetaException)232 IOException (java.io.IOException)97 ArrayList (java.util.ArrayList)66 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)55 TException (org.apache.thrift.TException)50 Table (org.apache.hadoop.hive.metastore.api.Table)43 Partition (org.apache.hadoop.hive.metastore.api.Partition)37 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)33 Path (org.apache.hadoop.fs.Path)32 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)21 Database (org.apache.hadoop.hive.metastore.api.Database)20 SQLException (java.sql.SQLException)19 List (java.util.List)19 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)18 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)17 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)17 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)17 HashMap (java.util.HashMap)16 UnknownDBException (org.apache.hadoop.hive.metastore.api.UnknownDBException)15 Connection (java.sql.Connection)14