Search in sources :

Example 1 with AddPartitionDesc

use of org.apache.hadoop.hive.ql.plan.AddPartitionDesc in project hive by apache.

the class ImportSemanticAnalyzer method getBaseAddPartitionDescFromPartition.

private static AddPartitionDesc getBaseAddPartitionDescFromPartition(Path fromPath, String dbname, ImportTableDesc tblDesc, Partition partition) throws MetaException, SemanticException {
    AddPartitionDesc partsDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(), EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()), partition.getSd().getLocation(), partition.getParameters());
    AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0);
    partDesc.setInputFormat(partition.getSd().getInputFormat());
    partDesc.setOutputFormat(partition.getSd().getOutputFormat());
    partDesc.setNumBuckets(partition.getSd().getNumBuckets());
    partDesc.setCols(partition.getSd().getCols());
    partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib());
    partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters());
    partDesc.setBucketCols(partition.getSd().getBucketCols());
    partDesc.setSortCols(partition.getSd().getSortCols());
    partDesc.setLocation(new Path(fromPath, Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
    return partsDesc;
}
Also used : Path(org.apache.hadoop.fs.Path) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc)

Example 2 with AddPartitionDesc

use of org.apache.hadoop.hive.ql.plan.AddPartitionDesc in project hive by apache.

the class ImportSemanticAnalyzer method addSinglePartition.

private static Task<?> addSinglePartition(URI fromURI, FileSystem fs, ImportTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, IOException, HiveException {
    AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
    if (tblDesc.isExternal() && tblDesc.getLocation() == null) {
        x.getLOG().debug("Importing in-place: adding AddPart for partition " + partSpecToString(partSpec.getPartSpec()));
        // addPartitionDesc already has the right partition location
        Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf());
        return addPartTask;
    } else {
        String srcLocation = partSpec.getLocation();
        fixLocationInPartSpec(fs, tblDesc, table, wh, replicationSpec, partSpec, x);
        x.getLOG().debug("adding dependent CopyWork/AddPart/MoveWork for partition " + partSpecToString(partSpec.getPartSpec()) + " with source location: " + srcLocation);
        Path tgtLocation = new Path(partSpec.getLocation());
        Path tmpPath = x.getCtx().getExternalTmpPath(tgtLocation);
        Task<?> copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, new Path(srcLocation), tmpPath, x.getConf());
        Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf());
        LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, Utilities.getTableDesc(table), partSpec.getPartSpec(), true);
        loadTableWork.setInheritTableSpecs(false);
        Task<?> loadPartTask = TaskFactory.get(new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork, null, false), x.getConf());
        copyTask.addDependentTask(loadPartTask);
        addPartTask.addDependentTask(loadPartTask);
        x.getTasks().add(copyTask);
        return addPartTask;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc)

Example 3 with AddPartitionDesc

use of org.apache.hadoop.hive.ql.plan.AddPartitionDesc in project hive by apache.

the class DDLTask method execute.

@Override
public int execute(DriverContext driverContext) {
    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
        return 0;
    }
    // Create the db
    Hive db;
    try {
        db = Hive.get(conf);
        CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
        if (null != createDatabaseDesc) {
            return createDatabase(db, createDatabaseDesc);
        }
        DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
        if (dropDatabaseDesc != null) {
            return dropDatabase(db, dropDatabaseDesc);
        }
        LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
        if (lockDatabaseDesc != null) {
            return lockDatabase(db, lockDatabaseDesc);
        }
        UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
        if (unlockDatabaseDesc != null) {
            return unlockDatabase(db, unlockDatabaseDesc);
        }
        SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
        if (switchDatabaseDesc != null) {
            return switchDatabase(db, switchDatabaseDesc);
        }
        DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
        if (descDatabaseDesc != null) {
            return descDatabase(db, descDatabaseDesc);
        }
        AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
        if (alterDatabaseDesc != null) {
            return alterDatabase(db, alterDatabaseDesc);
        }
        CreateTableDesc crtTbl = work.getCreateTblDesc();
        if (crtTbl != null) {
            return createTable(db, crtTbl);
        }
        CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
        if (crtTblLike != null) {
            return createTableLike(db, crtTblLike);
        }
        DropTableDesc dropTbl = work.getDropTblDesc();
        if (dropTbl != null) {
            dropTableOrPartitions(db, dropTbl);
            return 0;
        }
        AlterTableDesc alterTbl = work.getAlterTblDesc();
        if (alterTbl != null) {
            if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) {
                // no alter, the table is missing either due to drop/rename which follows the alter.
                // or the existing table is newer than our update.
                LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName());
                return 0;
            }
            if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
                return dropConstraint(db, alterTbl);
            } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
                return addConstraints(db, alterTbl);
            } else {
                return alterTable(db, alterTbl);
            }
        }
        CreateViewDesc crtView = work.getCreateViewDesc();
        if (crtView != null) {
            return createView(db, crtView);
        }
        AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
        if (addPartitionDesc != null) {
            return addPartitions(db, addPartitionDesc);
        }
        RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
        if (renamePartitionDesc != null) {
            return renamePartition(db, renamePartitionDesc);
        }
        AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
        if (simpleDesc != null) {
            if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
                return touch(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
                return archive(db, simpleDesc, driverContext);
            } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
                return unarchive(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
                return compact(db, simpleDesc);
            }
        }
        MsckDesc msckDesc = work.getMsckDesc();
        if (msckDesc != null) {
            return msck(db, msckDesc);
        }
        DescTableDesc descTbl = work.getDescTblDesc();
        if (descTbl != null) {
            return describeTable(db, descTbl);
        }
        DescFunctionDesc descFunc = work.getDescFunctionDesc();
        if (descFunc != null) {
            return describeFunction(db, descFunc);
        }
        ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
        if (showDatabases != null) {
            return showDatabases(db, showDatabases);
        }
        ShowTablesDesc showTbls = work.getShowTblsDesc();
        if (showTbls != null) {
            return showTablesOrViews(db, showTbls);
        }
        ShowColumnsDesc showCols = work.getShowColumnsDesc();
        if (showCols != null) {
            return showColumns(db, showCols);
        }
        ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
        if (showTblStatus != null) {
            return showTableStatus(db, showTblStatus);
        }
        ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
        if (showTblProperties != null) {
            return showTableProperties(db, showTblProperties);
        }
        ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
        if (showFuncs != null) {
            return showFunctions(db, showFuncs);
        }
        ShowLocksDesc showLocks = work.getShowLocksDesc();
        if (showLocks != null) {
            return showLocks(db, showLocks);
        }
        ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
        if (compactionsDesc != null) {
            return showCompactions(db, compactionsDesc);
        }
        ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
        if (txnsDesc != null) {
            return showTxns(db, txnsDesc);
        }
        AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
        if (abortTxnsDesc != null) {
            return abortTxns(db, abortTxnsDesc);
        }
        LockTableDesc lockTbl = work.getLockTblDesc();
        if (lockTbl != null) {
            return lockTable(db, lockTbl);
        }
        UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
        if (unlockTbl != null) {
            return unlockTable(db, unlockTbl);
        }
        ShowPartitionsDesc showParts = work.getShowPartsDesc();
        if (showParts != null) {
            return showPartitions(db, showParts);
        }
        ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
        if (showCreateDb != null) {
            return showCreateDatabase(db, showCreateDb);
        }
        ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
        if (showCreateTbl != null) {
            return showCreateTable(db, showCreateTbl);
        }
        ShowConfDesc showConf = work.getShowConfDesc();
        if (showConf != null) {
            return showConf(db, showConf);
        }
        RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
        if (roleDDLDesc != null) {
            return roleDDL(db, roleDDLDesc);
        }
        GrantDesc grantDesc = work.getGrantDesc();
        if (grantDesc != null) {
            return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
        }
        RevokeDesc revokeDesc = work.getRevokeDesc();
        if (revokeDesc != null) {
            return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
        }
        ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
        if (showGrantDesc != null) {
            return showGrants(db, showGrantDesc);
        }
        GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
        if (grantOrRevokeRoleDDL != null) {
            return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
        }
        AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
        if (mergeFilesDesc != null) {
            return mergeFiles(db, mergeFilesDesc, driverContext);
        }
        AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
        if (alterPartDesc != null) {
            return alterTableAlterPart(db, alterPartDesc);
        }
        TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
        if (truncateTableDesc != null) {
            return truncateTable(db, truncateTableDesc);
        }
        AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
        if (alterTableExchangePartition != null) {
            return exchangeTablePartition(db, alterTableExchangePartition);
        }
        CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
        if (cacheMetadataDesc != null) {
            return cacheMetadata(db, cacheMetadataDesc);
        }
        InsertTableDesc insertTableDesc = work.getInsertTableDesc();
        if (insertTableDesc != null) {
            return insertCommitWork(db, insertTableDesc);
        }
        PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
        if (preInsertTableDesc != null) {
            return preInsertWork(db, preInsertTableDesc);
        }
        KillQueryDesc killQueryDesc = work.getKillQueryDesc();
        if (killQueryDesc != null) {
            return killQuery(db, killQueryDesc);
        }
        if (work.getCreateResourcePlanDesc() != null) {
            return createResourcePlan(db, work.getCreateResourcePlanDesc());
        }
        if (work.getShowResourcePlanDesc() != null) {
            return showResourcePlans(db, work.getShowResourcePlanDesc());
        }
        if (work.getAlterResourcePlanDesc() != null) {
            return alterResourcePlan(db, work.getAlterResourcePlanDesc());
        }
        if (work.getDropResourcePlanDesc() != null) {
            return dropResourcePlan(db, work.getDropResourcePlanDesc());
        }
        if (work.getCreateWMTriggerDesc() != null) {
            return createWMTrigger(db, work.getCreateWMTriggerDesc());
        }
        if (work.getAlterWMTriggerDesc() != null) {
            return alterWMTrigger(db, work.getAlterWMTriggerDesc());
        }
        if (work.getDropWMTriggerDesc() != null) {
            return dropWMTrigger(db, work.getDropWMTriggerDesc());
        }
        if (work.getWmPoolDesc() != null) {
            return createOrAlterWMPool(db, work.getWmPoolDesc());
        }
        if (work.getDropWMPoolDesc() != null) {
            return dropWMPool(db, work.getDropWMPoolDesc());
        }
        if (work.getWmMappingDesc() != null) {
            return createOrAlterWMMapping(db, work.getWmMappingDesc());
        }
        if (work.getDropWMMappingDesc() != null) {
            return dropWMMapping(db, work.getDropWMMappingDesc());
        }
        if (work.getTriggerToPoolMappingDesc() != null) {
            return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc());
        }
        if (work.getAlterMaterializedViewDesc() != null) {
            return alterMaterializedView(db, work.getAlterMaterializedViewDesc());
        }
    } catch (Throwable e) {
        failed(e);
        return 1;
    }
    assert false;
    return 0;
}
Also used : DropDatabaseDesc(org.apache.hadoop.hive.ql.plan.DropDatabaseDesc) LockTableDesc(org.apache.hadoop.hive.ql.plan.LockTableDesc) UnlockTableDesc(org.apache.hadoop.hive.ql.plan.UnlockTableDesc) ShowTblPropertiesDesc(org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc) RenamePartitionDesc(org.apache.hadoop.hive.ql.plan.RenamePartitionDesc) ShowDatabasesDesc(org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc) DescFunctionDesc(org.apache.hadoop.hive.ql.plan.DescFunctionDesc) CreateViewDesc(org.apache.hadoop.hive.ql.plan.CreateViewDesc) ShowTxnsDesc(org.apache.hadoop.hive.ql.plan.ShowTxnsDesc) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc) ShowCompactionsDesc(org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) GrantDesc(org.apache.hadoop.hive.ql.plan.GrantDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) AlterTableSimpleDesc(org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc) DescTableDesc(org.apache.hadoop.hive.ql.plan.DescTableDesc) GrantRevokeRoleDDL(org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL) TruncateTableDesc(org.apache.hadoop.hive.ql.plan.TruncateTableDesc) DropTableDesc(org.apache.hadoop.hive.ql.plan.DropTableDesc) ShowTablesDesc(org.apache.hadoop.hive.ql.plan.ShowTablesDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) ShowFunctionsDesc(org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc) AbortTxnsDesc(org.apache.hadoop.hive.ql.plan.AbortTxnsDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) CacheMetadataDesc(org.apache.hadoop.hive.ql.plan.CacheMetadataDesc) UnlockDatabaseDesc(org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc) RevokeDesc(org.apache.hadoop.hive.ql.plan.RevokeDesc) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) MsckDesc(org.apache.hadoop.hive.ql.plan.MsckDesc) ShowLocksDesc(org.apache.hadoop.hive.ql.plan.ShowLocksDesc) SwitchDatabaseDesc(org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc) ShowConfDesc(org.apache.hadoop.hive.ql.plan.ShowConfDesc) ShowPartitionsDesc(org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc) CreateTableLikeDesc(org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc) ShowTableStatusDesc(org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) AlterTableAlterPartDesc(org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc) RoleDDLDesc(org.apache.hadoop.hive.ql.plan.RoleDDLDesc) AlterTablePartMergeFilesDesc(org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) LockDatabaseDesc(org.apache.hadoop.hive.ql.plan.LockDatabaseDesc) CreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) InsertTableDesc(org.apache.hadoop.hive.ql.plan.InsertTableDesc) KillQueryDesc(org.apache.hadoop.hive.ql.plan.KillQueryDesc) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc) DescDatabaseDesc(org.apache.hadoop.hive.ql.plan.DescDatabaseDesc) ShowColumnsDesc(org.apache.hadoop.hive.ql.plan.ShowColumnsDesc)

Example 4 with AddPartitionDesc

use of org.apache.hadoop.hive.ql.plan.AddPartitionDesc in project hive by apache.

the class ImportSemanticAnalyzer method alterSinglePartition.

private static Task<? extends Serializable> alterSinglePartition(URI fromURI, FileSystem fs, ImportTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, IOException, HiveException {
    addPartitionDesc.setReplaceMode(true);
    if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())) {
        addPartitionDesc.setReplicationSpec(replicationSpec);
    }
    AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
    if (ptn == null) {
        fixLocationInPartSpec(fs, tblDesc, table, wh, replicationSpec, partSpec, x);
    } else {
        // use existing location
        partSpec.setLocation(ptn.getLocation());
    }
    return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc));
}
Also used : DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc)

Example 5 with AddPartitionDesc

use of org.apache.hadoop.hive.ql.plan.AddPartitionDesc in project hive by apache.

the class ImportSemanticAnalyzer method prepareImport.

/**
 * The same code is used from both the "repl load" as well as "import".
 * Given that "repl load" now supports two modes "repl load dbName [location]" and
 * "repl load [location]" in which case the database name has to be taken from the table metadata
 * by default and then over-ridden if something specified on the command line.
 *
 * hence for import to work correctly we have to pass in the sessionState default Db via the
 * parsedDbName parameter
 */
public static boolean prepareImport(boolean isImportCmd, boolean isLocationSet, boolean isExternalSet, boolean isPartSpecSet, boolean waitOnPrecursor, String parsedLocation, String parsedTableName, String overrideDBName, LinkedHashMap<String, String> parsedPartSpec, String fromLocn, EximUtil.SemanticAnalyzerWrapperContext x, UpdatedMetaDataTracker updatedMetadata) throws IOException, MetaException, HiveException, URISyntaxException {
    // initialize load path
    URI fromURI = EximUtil.getValidatedURI(x.getConf(), stripQuotes(fromLocn));
    Path fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(), fromURI.getPath());
    FileSystem fs = FileSystem.get(fromURI, x.getConf());
    x.getInputs().add(toReadEntity(fromPath, x.getConf()));
    MetaData rv;
    try {
        rv = EximUtil.readMetaData(fs, new Path(fromPath, EximUtil.METADATA_NAME));
    } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
    }
    if (rv.getTable() == null) {
        // nothing to do here, silently return.
        return false;
    }
    ReplicationSpec replicationSpec = rv.getReplicationSpec();
    if (replicationSpec.isNoop()) {
        // nothing to do here, silently return.
        x.getLOG().debug("Current update with ID:{} is noop", replicationSpec.getCurrentReplicationState());
        return false;
    }
    if (isImportCmd) {
        replicationSpec.setReplSpecType(ReplicationSpec.Type.IMPORT);
    }
    String dbname = rv.getTable().getDbName();
    if ((overrideDBName != null) && (!overrideDBName.isEmpty())) {
        // If the parsed statement contained a db.tablename specification, prefer that.
        dbname = overrideDBName;
    }
    // Create table associated with the import
    // Executed if relevant, and used to contain all the other details about the table if not.
    ImportTableDesc tblDesc;
    try {
        tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable());
    } catch (Exception e) {
        throw new HiveException(e);
    }
    boolean isSourceMm = AcidUtils.isInsertOnlyTable(tblDesc.getTblProps());
    if ((replicationSpec != null) && replicationSpec.isInReplicationScope()) {
        tblDesc.setReplicationSpec(replicationSpec);
        StatsSetupConst.setBasicStatsState(tblDesc.getTblProps(), StatsSetupConst.FALSE);
    }
    if (isExternalSet) {
        if (isSourceMm) {
            throw new SemanticException("Cannot import an MM table as external");
        }
        tblDesc.setExternal(isExternalSet);
    // This condition-check could have been avoided, but to honour the old
    // default of not calling if it wasn't set, we retain that behaviour.
    // TODO:cleanup after verification that the outer if isn't really needed here
    }
    if (isLocationSet) {
        tblDesc.setLocation(parsedLocation);
        x.getInputs().add(toReadEntity(new Path(parsedLocation), x.getConf()));
    }
    if ((parsedTableName != null) && (!parsedTableName.isEmpty())) {
        tblDesc.setTableName(parsedTableName);
    }
    List<AddPartitionDesc> partitionDescs = new ArrayList<AddPartitionDesc>();
    Iterable<Partition> partitions = rv.getPartitions();
    for (Partition partition : partitions) {
        // TODO: this should ideally not create AddPartitionDesc per partition
        AddPartitionDesc partsDesc = getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition);
        if ((replicationSpec != null) && replicationSpec.isInReplicationScope()) {
            StatsSetupConst.setBasicStatsState(partsDesc.getPartition(0).getPartParams(), StatsSetupConst.FALSE);
        }
        partitionDescs.add(partsDesc);
    }
    if (isPartSpecSet) {
        // The import specification asked for only a particular partition to be loaded
        // We load only that, and ignore all the others.
        boolean found = false;
        for (Iterator<AddPartitionDesc> partnIter = partitionDescs.listIterator(); partnIter.hasNext(); ) {
            AddPartitionDesc addPartitionDesc = partnIter.next();
            if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(parsedPartSpec)) {
                found = true;
            } else {
                partnIter.remove();
            }
        }
        if (!found) {
            throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(" - Specified partition not found in import directory"));
        }
    }
    if (tblDesc.getTableName() == null) {
        // or from the export dump.
        throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg());
    } else {
        x.getConf().set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            addPartitionDesc.setTableName(tblDesc.getTableName());
        }
    }
    Warehouse wh = new Warehouse(x.getConf());
    Table table = tableIfExists(tblDesc, x.getHive());
    boolean tableExists = false;
    if (table != null) {
        checkTable(table, tblDesc, replicationSpec, x.getConf());
        x.getLOG().debug("table " + tblDesc.getTableName() + " exists: metadata checked");
        tableExists = true;
    }
    // Initialize with 0 for non-ACID and non-MM tables.
    Long writeId = 0L;
    if (((table != null) && AcidUtils.isTransactionalTable(table)) || AcidUtils.isTablePropertyTransactional(tblDesc.getTblProps())) {
        // Explain plan doesn't open a txn and hence no need to allocate write id.
        if (x.getCtx().getExplainConfig() == null) {
            writeId = SessionState.get().getTxnMgr().getTableWriteId(tblDesc.getDatabaseName(), tblDesc.getTableName());
        }
    }
    int stmtId = 0;
    /*
    if (isAcid(writeId)) {
      tblDesc.setInitialMmWriteId(writeId);
    }
    */
    if (!replicationSpec.isInReplicationScope()) {
        createRegularImportTasks(tblDesc, partitionDescs, isPartSpecSet, replicationSpec, table, fromURI, fs, wh, x, writeId, stmtId, isSourceMm);
    } else {
        createReplImportTasks(tblDesc, partitionDescs, replicationSpec, waitOnPrecursor, table, fromURI, fs, wh, x, writeId, stmtId, isSourceMm, updatedMetadata);
    }
    return tableExists;
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Table(org.apache.hadoop.hive.ql.metadata.Table) ImportTableDesc(org.apache.hadoop.hive.ql.plan.ImportTableDesc) ArrayList(java.util.ArrayList) IOException(java.io.IOException) URI(java.net.URI) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) MetaData(org.apache.hadoop.hive.ql.parse.repl.load.MetaData) FileSystem(org.apache.hadoop.fs.FileSystem) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc)

Aggregations

AddPartitionDesc (org.apache.hadoop.hive.ql.plan.AddPartitionDesc)17 Path (org.apache.hadoop.fs.Path)11 Table (org.apache.hadoop.hive.ql.metadata.Table)7 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)5 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 Database (org.apache.hadoop.hive.metastore.api.Database)4 IOException (java.io.IOException)3 URISyntaxException (java.net.URISyntaxException)3 ArrayList (java.util.ArrayList)3 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)3 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)3 InvalidTableException (org.apache.hadoop.hive.ql.metadata.InvalidTableException)3 URI (java.net.URI)2 HashSet (java.util.HashSet)2 Warehouse (org.apache.hadoop.hive.metastore.Warehouse)2 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)2 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)2 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)2 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)2