Search in sources :

Example 21 with HiveException

use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.

the class DDLTask method alterTableOrSinglePartition.

private int alterTableOrSinglePartition(AlterTableDesc alterTbl, Table tbl, Partition part) throws HiveException {
    EnvironmentContext environmentContext = alterTbl.getEnvironmentContext();
    if (environmentContext == null) {
        environmentContext = new EnvironmentContext();
        alterTbl.setEnvironmentContext(environmentContext);
    }
    // do not need update stats in alter table/partition operations
    if (environmentContext.getProperties() == null || environmentContext.getProperties().get(StatsSetupConst.DO_NOT_UPDATE_STATS) == null) {
        environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
    }
    if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
        tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName()));
        tbl.setTableName(Utilities.getTableName(alterTbl.getNewName()));
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        String serializationLib = sd.getSerdeInfo().getSerializationLib();
        AvroSerdeUtils.handleAlterTableForAvro(conf, serializationLib, tbl.getTTable().getParameters());
        List<FieldSchema> oldCols = (part == null ? tbl.getColsForMetastore() : part.getColsForMetastore());
        List<FieldSchema> newCols = alterTbl.getNewCols();
        if (serializationLib.equals("org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
            console.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
            sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
            sd.setCols(newCols);
        } else {
            // make sure the columns does not already exist
            Iterator<FieldSchema> iterNewCols = newCols.iterator();
            while (iterNewCols.hasNext()) {
                FieldSchema newCol = iterNewCols.next();
                String newColName = newCol.getName();
                Iterator<FieldSchema> iterOldCols = oldCols.iterator();
                while (iterOldCols.hasNext()) {
                    String oldColName = iterOldCols.next().getName();
                    if (oldColName.equalsIgnoreCase(newColName)) {
                        throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newColName);
                    }
                }
                oldCols.add(newCol);
            }
            sd.setCols(oldCols);
        }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        String serializationLib = sd.getSerdeInfo().getSerializationLib();
        AvroSerdeUtils.handleAlterTableForAvro(conf, serializationLib, tbl.getTTable().getParameters());
        List<FieldSchema> oldCols = (part == null ? tbl.getColsForMetastore() : part.getColsForMetastore());
        List<FieldSchema> newCols = new ArrayList<FieldSchema>();
        Iterator<FieldSchema> iterOldCols = oldCols.iterator();
        String oldName = alterTbl.getOldColName();
        String newName = alterTbl.getNewColName();
        String type = alterTbl.getNewColType();
        String comment = alterTbl.getNewColComment();
        boolean first = alterTbl.getFirst();
        String afterCol = alterTbl.getAfterCol();
        // if orc table, restrict reordering columns as it will break schema evolution
        boolean isOrcSchemaEvolution = sd.getInputFormat().equals(OrcInputFormat.class.getName()) && isSchemaEvolutionEnabled(tbl);
        if (isOrcSchemaEvolution && (first || (afterCol != null && !afterCol.trim().isEmpty()))) {
            throw new HiveException(ErrorMsg.CANNOT_REORDER_COLUMNS, alterTbl.getOldName());
        }
        FieldSchema column = null;
        boolean found = false;
        int position = -1;
        if (first) {
            position = 0;
        }
        int i = 1;
        while (iterOldCols.hasNext()) {
            FieldSchema col = iterOldCols.next();
            String oldColName = col.getName();
            if (oldColName.equalsIgnoreCase(newName) && !oldColName.equalsIgnoreCase(oldName)) {
                throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newName);
            } else if (oldColName.equalsIgnoreCase(oldName)) {
                col.setName(newName);
                if (type != null && !type.trim().equals("")) {
                    col.setType(type);
                }
                if (comment != null) {
                    col.setComment(comment);
                }
                found = true;
                if (first || (afterCol != null && !afterCol.trim().equals(""))) {
                    column = col;
                    continue;
                }
            }
            if (afterCol != null && !afterCol.trim().equals("") && oldColName.equalsIgnoreCase(afterCol)) {
                position = i;
            }
            i++;
            newCols.add(col);
        }
        // did not find the column
        if (!found) {
            throw new HiveException(ErrorMsg.INVALID_COLUMN, oldName);
        }
        // after column is not null, but we did not find it.
        if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
            throw new HiveException(ErrorMsg.INVALID_COLUMN, afterCol);
        }
        if (position >= 0) {
            newCols.add(position, column);
        }
        sd.setCols(newCols);
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        // change SerDe to LazySimpleSerDe if it is columnsetSerDe
        String serializationLib = sd.getSerdeInfo().getSerializationLib();
        if (serializationLib.equals("org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
            console.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
            sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
        } else if (!serializationLib.equals(MetadataTypedColumnsetSerDe.class.getName()) && !serializationLib.equals(LazySimpleSerDe.class.getName()) && !serializationLib.equals(ColumnarSerDe.class.getName()) && !serializationLib.equals(DynamicSerDe.class.getName()) && !serializationLib.equals(ParquetHiveSerDe.class.getName()) && !serializationLib.equals(OrcSerde.class.getName())) {
            throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, alterTbl.getOldName());
        }
        final boolean isOrcSchemaEvolution = serializationLib.equals(OrcSerde.class.getName()) && isSchemaEvolutionEnabled(tbl);
        // adding columns and limited integer type promotion is supported for ORC schema evolution
        if (isOrcSchemaEvolution) {
            final List<FieldSchema> existingCols = sd.getCols();
            final List<FieldSchema> replaceCols = alterTbl.getNewCols();
            if (replaceCols.size() < existingCols.size()) {
                throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, alterTbl.getOldName());
            }
        }
        sd.setCols(alterTbl.getNewCols());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
        if (StatsSetupConst.USER.equals(environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED))) {
            environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
        }
        if (part != null) {
            part.getTPartition().getParameters().putAll(alterTbl.getProps());
        } else {
            tbl.getTTable().getParameters().putAll(alterTbl.getProps());
        }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) {
        Iterator<String> keyItr = alterTbl.getProps().keySet().iterator();
        if (StatsSetupConst.USER.equals(environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED))) {
            // drop a stats parameter, which triggers recompute stats update automatically
            environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
        }
        while (keyItr.hasNext()) {
            if (part != null) {
                part.getTPartition().getParameters().remove(keyItr.next());
            } else {
                tbl.getTTable().getParameters().remove(keyItr.next());
            }
        }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        String serdeName = alterTbl.getSerdeName();
        String oldSerdeName = sd.getSerdeInfo().getSerializationLib();
        // if orc table, restrict changing the serde as it can break schema evolution
        if (isSchemaEvolutionEnabled(tbl) && oldSerdeName.equalsIgnoreCase(OrcSerde.class.getName()) && !serdeName.equalsIgnoreCase(OrcSerde.class.getName())) {
            throw new HiveException(ErrorMsg.CANNOT_CHANGE_SERDE, OrcSerde.class.getSimpleName(), alterTbl.getOldName());
        }
        sd.getSerdeInfo().setSerializationLib(serdeName);
        if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
            sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
        }
        if (part != null) {
            // TODO: wtf? This doesn't do anything.
            part.getTPartition().getSd().setCols(part.getTPartition().getSd().getCols());
        } else {
            if (Table.shouldStoreFieldsInMetastore(conf, serdeName, tbl.getParameters()) && !Table.hasMetastoreBasedSchema(conf, oldSerdeName)) {
                // from old SerDe are too long to be stored in metastore, but there's nothing we can do.
                try {
                    Deserializer oldSerde = MetaStoreUtils.getDeserializer(conf, tbl.getTTable(), false, oldSerdeName);
                    tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), oldSerde));
                } catch (MetaException ex) {
                    throw new HiveException(ex);
                }
            }
        }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        // if orc table, restrict changing the file format as it can break schema evolution
        if (isSchemaEvolutionEnabled(tbl) && sd.getInputFormat().equals(OrcInputFormat.class.getName()) && !alterTbl.getInputFormat().equals(OrcInputFormat.class.getName())) {
            throw new HiveException(ErrorMsg.CANNOT_CHANGE_FILEFORMAT, "ORC", alterTbl.getOldName());
        }
        sd.setInputFormat(alterTbl.getInputFormat());
        sd.setOutputFormat(alterTbl.getOutputFormat());
        if (alterTbl.getSerdeName() != null) {
            sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName());
        }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        // validate sort columns and bucket columns
        List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
        if (!alterTbl.isTurnOffSorting()) {
            Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
        }
        if (alterTbl.getSortColumns() != null) {
            Utilities.validateColumnNames(columns, Utilities.getColumnNamesFromSortCols(alterTbl.getSortColumns()));
        }
        if (alterTbl.isTurnOffSorting()) {
            sd.setSortCols(new ArrayList<Order>());
        } else if (alterTbl.getNumberBuckets() == -1) {
            // -1 buckets means to turn off bucketing
            sd.setBucketCols(new ArrayList<String>());
            sd.setNumBuckets(-1);
            sd.setSortCols(new ArrayList<Order>());
        } else {
            sd.setBucketCols(alterTbl.getBucketColumns());
            sd.setNumBuckets(alterTbl.getNumberBuckets());
            sd.setSortCols(alterTbl.getSortColumns());
        }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
        StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
        String newLocation = alterTbl.getNewLocation();
        try {
            URI locUri = new URI(newLocation);
            if (!new Path(locUri).isAbsolute()) {
                throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
            }
            sd.setLocation(newLocation);
        } catch (URISyntaxException e) {
            throw new HiveException(e);
        }
        environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSKEWEDBY) {
        // Validation's been done at compile time. no validation is needed here.
        List<String> skewedColNames = null;
        List<List<String>> skewedValues = null;
        if (alterTbl.isTurnOffSkewed()) {
            // Convert skewed table to non-skewed table.
            skewedColNames = new ArrayList<String>();
            skewedValues = new ArrayList<List<String>>();
        } else {
            skewedColNames = alterTbl.getSkewedColNames();
            skewedValues = alterTbl.getSkewedColValues();
        }
        if (null == tbl.getSkewedInfo()) {
            // Convert non-skewed table to skewed table.
            SkewedInfo skewedInfo = new SkewedInfo();
            skewedInfo.setSkewedColNames(skewedColNames);
            skewedInfo.setSkewedColValues(skewedValues);
            tbl.setSkewedInfo(skewedInfo);
        } else {
            tbl.setSkewedColNames(skewedColNames);
            tbl.setSkewedColValues(skewedValues);
        }
        tbl.setStoredAsSubDirectories(alterTbl.isStoredAsSubDirectories());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERSKEWEDLOCATION) {
        // process location one-by-one
        Map<List<String>, String> locMaps = alterTbl.getSkewedLocations();
        Set<List<String>> keys = locMaps.keySet();
        for (List<String> key : keys) {
            String newLocation = locMaps.get(key);
            try {
                URI locUri = new URI(newLocation);
                if (part != null) {
                    List<String> slk = new ArrayList<String>(key);
                    part.setSkewedValueLocationMap(slk, locUri.toString());
                } else {
                    List<String> slk = new ArrayList<String>(key);
                    tbl.setSkewedValueLocationMap(slk, locUri.toString());
                }
            } catch (URISyntaxException e) {
                throw new HiveException(e);
            }
        }
        environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
    } else if (alterTbl.getOp() == AlterTableTypes.ALTERBUCKETNUM) {
        if (part != null) {
            if (part.getBucketCount() == alterTbl.getNumberBuckets()) {
                return 0;
            }
            part.setBucketCount(alterTbl.getNumberBuckets());
        } else {
            if (tbl.getNumBuckets() == alterTbl.getNumberBuckets()) {
                return 0;
            }
            tbl.setNumBuckets(alterTbl.getNumberBuckets());
        }
    } else {
        throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString());
    }
    return 0;
}
Also used : HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ArrayList(java.util.ArrayList) URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Order(org.apache.hadoop.hive.metastore.api.Order) Path(org.apache.hadoop.fs.Path) DynamicSerDe(org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe) MetadataTypedColumnsetSerDe(org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) Deserializer(org.apache.hadoop.hive.serde2.Deserializer)

Example 22 with HiveException

use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.

the class DDLTask method showLocks.

/**
   * Write a list of the current locks to a file.
   * @param db
   *
   * @param showLocks
   *          the locks we're interested in.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    HiveLockManager lockMgr = txnManager.getLockManager();
    if (txnManager.useNewShowLocksFormat())
        return showLocksNewFormat(showLocks, lockMgr);
    boolean isExt = showLocks.isExt();
    if (lockMgr == null) {
        throw new HiveException("show Locks LockManager not specified");
    }
    // write the results in the file
    DataOutputStream outStream = getOutputStream(showLocks.getResFile());
    try {
        List<HiveLock> locks = null;
        if (showLocks.getTableName() == null) {
            // TODO should be doing security check here.  Users should not be
            // able to see each other's locks.
            locks = lockMgr.getLocks(false, isExt);
        } else {
            locks = lockMgr.getLocks(HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true, isExt);
        }
        Collections.sort(locks, new Comparator<HiveLock>() {

            @Override
            public int compare(HiveLock o1, HiveLock o2) {
                int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
                if (cmp == 0) {
                    if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
                        return cmp;
                    }
                    // EXCLUSIVE locks occur before SHARED locks
                    if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
                        return -1;
                    }
                    return +1;
                }
                return cmp;
            }
        });
        Iterator<HiveLock> locksIter = locks.iterator();
        while (locksIter.hasNext()) {
            HiveLock lock = locksIter.next();
            outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
            outStream.write(separator);
            outStream.writeBytes(lock.getHiveLockMode().toString());
            if (isExt) {
                HiveLockObjectData lockData = lock.getHiveLockObject().getData();
                if (lockData != null) {
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr());
                }
            }
            outStream.write(terminator);
        }
    } catch (FileNotFoundException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (IOException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (Exception e) {
        throw new HiveException(e.toString(), e);
    } finally {
        IOUtils.closeStream(outStream);
    }
    return 0;
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) Context(org.apache.hadoop.hive.ql.Context) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) DriverContext(org.apache.hadoop.hive.ql.DriverContext) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FileNotFoundException(java.io.FileNotFoundException) HiveLock(org.apache.hadoop.hive.ql.lockmgr.HiveLock) IOException(java.io.IOException) HiveLockManager(org.apache.hadoop.hive.ql.lockmgr.HiveLockManager) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) StringUtils.stringifyException(org.apache.hadoop.util.StringUtils.stringifyException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SQLException(java.sql.SQLException) FileNotFoundException(java.io.FileNotFoundException) HiveAuthzPluginException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) HiveLockObjectData(org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 23 with HiveException

use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.

the class DDLTask method showGrants.

private int showGrants(Hive db, ShowGrantDesc showGrantDesc) throws HiveException {
    HiveAuthorizer authorizer = getSessionAuthorizer(db);
    try {
        List<HivePrivilegeInfo> privInfos = authorizer.showPrivileges(getAuthorizationTranslator(authorizer).getHivePrincipal(showGrantDesc.getPrincipalDesc()), getAuthorizationTranslator(authorizer).getHivePrivilegeObject(showGrantDesc.getHiveObj()));
        boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST);
        writeToFile(writeGrantInfo(privInfos, testMode), showGrantDesc.getResFile());
    } catch (IOException e) {
        throw new HiveException("Error in show grant statement", e);
    }
    return 0;
}
Also used : HiveAuthorizer(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer) HivePrivilegeInfo(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeInfo) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException)

Example 24 with HiveException

use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.

the class DDLTask method alterTableAlterPart.

/**
   * Alter partition column type in a table
   *
   * @param db
   *          Database to rename the partition.
   * @param alterPartitionDesc
   *          change partition column type.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   */
private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc) throws HiveException {
    Table tbl = db.getTable(alterPartitionDesc.getTableName(), true);
    String tabName = alterPartitionDesc.getTableName();
    // This is checked by DDLSemanticAnalyzer
    assert (tbl.isPartitioned());
    List<FieldSchema> newPartitionKeys = new ArrayList<FieldSchema>();
    // with a non null value before trying to alter the partition column type.
    try {
        Set<Partition> partitions = db.getAllPartitionsOf(tbl);
        int colIndex = -1;
        for (FieldSchema col : tbl.getTTable().getPartitionKeys()) {
            colIndex++;
            if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) {
                break;
            }
        }
        if (colIndex == -1 || colIndex == tbl.getTTable().getPartitionKeys().size()) {
            throw new HiveException("Cannot find partition column " + alterPartitionDesc.getPartKeySpec().getName());
        }
        TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(alterPartitionDesc.getPartKeySpec().getType());
        ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType);
        Converter converter = ObjectInspectorConverters.getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI);
        // For all the existing partitions, check if the value can be type casted to a non-null object
        for (Partition part : partitions) {
            if (part.getName().equals(conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) {
                continue;
            }
            try {
                String value = part.getValues().get(colIndex);
                Object convertedValue = converter.convert(value);
                if (convertedValue == null) {
                    throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + value + " resulted in NULL object");
                }
            } catch (Exception e) {
                throw new HiveException("Exception while converting " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + part.getValues().get(colIndex));
            }
        }
    } catch (Exception e) {
        throw new HiveException("Exception while checking type conversion of existing partition values to " + alterPartitionDesc.getPartKeySpec() + " : " + e.getMessage());
    }
    for (FieldSchema col : tbl.getTTable().getPartitionKeys()) {
        if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) {
            newPartitionKeys.add(alterPartitionDesc.getPartKeySpec());
        } else {
            newPartitionKeys.add(col);
        }
    }
    tbl.getTTable().setPartitionKeys(newPartitionKeys);
    try {
        db.alterTable(tabName, tbl, null);
    } catch (InvalidOperationException e) {
        throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "Unable to alter " + tabName);
    }
    work.getInputs().add(new ReadEntity(tbl));
    // We've already locked the table as the input, don't relock it as the output.
    addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
    return 0;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) StringUtils.stringifyException(org.apache.hadoop.util.StringUtils.stringifyException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SQLException(java.sql.SQLException) FileNotFoundException(java.io.FileNotFoundException) HiveAuthzPluginException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) StatObjectConverter(org.apache.hadoop.hive.metastore.StatObjectConverter) Converter(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) HiveLockObject(org.apache.hadoop.hive.ql.lockmgr.HiveLockObject) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 25 with HiveException

use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.

the class FileSinkOperator method closeOp.

@Override
public void closeOp(boolean abort) throws HiveException {
    row_count.set(numRows);
    LOG.info(toString() + ": records written - " + numRows);
    if (!bDynParts && !filesCreated) {
        boolean skipFiles = "tez".equalsIgnoreCase(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE));
        if (skipFiles) {
            Class<?> clazz = conf.getTableInfo().getOutputFileFormatClass();
            skipFiles = !StreamingOutputFormat.class.isAssignableFrom(clazz);
        }
        if (!skipFiles) {
            createBucketFiles(fsp);
        }
    }
    lastProgressReport = System.currentTimeMillis();
    if (!abort) {
        // (the size of buffer is kept track of in the ThriftJDBCBinarySerDe).
        if (conf.isUsingThriftJDBCBinarySerDe()) {
            try {
                recordValue = serializer.serialize(null, inputObjInspectors[0]);
                if (null != fpaths) {
                    rowOutWriters = fpaths.outWriters;
                    rowOutWriters[0].write(recordValue);
                }
            } catch (SerDeException | IOException e) {
                throw new HiveException(e);
            }
        }
        for (FSPaths fsp : valToPaths.values()) {
            fsp.closeWriters(abort);
            // accumulated statistics which will be aggregated in case of spray writers
            if (conf.isGatherStats() && isCollectRWStats) {
                if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) {
                    for (int idx = 0; idx < fsp.outWriters.length; idx++) {
                        RecordWriter outWriter = fsp.outWriters[idx];
                        if (outWriter != null) {
                            SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
                            if (stats != null) {
                                fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
                                fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
                            }
                        }
                    }
                } else {
                    for (int i = 0; i < fsp.updaters.length; i++) {
                        if (fsp.updaters[i] != null) {
                            SerDeStats stats = fsp.updaters[i].getStats();
                            if (stats != null) {
                                fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
                                fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
                            }
                        }
                    }
                }
            }
            if (isNativeTable) {
                fsp.commit(fs);
            }
        }
        // Only publish stats if this operator's flag was set to gather stats
        if (conf.isGatherStats()) {
            publishStats();
        }
    } else {
        // reduce().
        for (FSPaths fsp : valToPaths.values()) {
            fsp.abortWriters(fs, abort, !autoDelete && isNativeTable);
        }
    }
    fsp = prevFsp = null;
    super.closeOp(abort);
}
Also used : SerDeStats(org.apache.hadoop.hive.serde2.SerDeStats) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) StatsProvidingRecordWriter(org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter) StatsProvidingRecordWriter(org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter) IOException(java.io.IOException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException)

Aggregations

HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)364 IOException (java.io.IOException)144 ArrayList (java.util.ArrayList)64 Table (org.apache.hadoop.hive.ql.metadata.Table)60 Path (org.apache.hadoop.fs.Path)55 SerDeException (org.apache.hadoop.hive.serde2.SerDeException)42 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)41 Partition (org.apache.hadoop.hive.ql.metadata.Partition)36 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)35 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)35 StructObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)31 FileNotFoundException (java.io.FileNotFoundException)26 FileSystem (org.apache.hadoop.fs.FileSystem)26 InvalidTableException (org.apache.hadoop.hive.ql.metadata.InvalidTableException)26 URISyntaxException (java.net.URISyntaxException)25 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)25 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)24 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)23 HashMap (java.util.HashMap)21 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)21