Search in sources :

Example 21 with ReplicationSpec

use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.

the class BootStrapReplicationSpecFunction method fromMetaStore.

@Override
public ReplicationSpec fromMetaStore() throws HiveException {
    try {
        long currentReplicationState = (this.currentNotificationId > 0) ? this.currentNotificationId : db.getMSC().getCurrentNotificationEventId().getEventId();
        ReplicationSpec replicationSpec = new ReplicationSpec(true, false, "replv2", "will-be-set", false, false);
        replicationSpec.setCurrentReplicationState(String.valueOf(currentReplicationState));
        return replicationSpec;
    } catch (Exception e) {
        throw new SemanticException(e);
    }
}
Also used : ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 22 with ReplicationSpec

use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.

the class DropDatabaseOperation method execute.

@Override
public int execute() throws HiveException {
    try {
        String dbName = desc.getDatabaseName();
        ReplicationSpec replicationSpec = desc.getReplicationSpec();
        if (replicationSpec.isInReplicationScope()) {
            Database database = context.getDb().getDatabase(dbName);
            if (database == null || !replicationSpec.allowEventReplacementInto(database.getParameters())) {
                return 0;
            }
        }
        context.getDb().dropDatabase(dbName, true, desc.getIfExists(), desc.isCasdade());
        if (LlapHiveUtils.isLlapMode(context.getConf())) {
            ProactiveEviction.Request.Builder llapEvictRequestBuilder = ProactiveEviction.Request.Builder.create();
            llapEvictRequestBuilder.addDb(dbName);
            ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
        }
        // Unregister the functions as well
        if (desc.isCasdade()) {
            FunctionRegistry.unregisterPermanentFunctions(dbName);
        }
    } catch (NoSuchObjectException ex) {
        throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, desc.getDatabaseName());
    }
    return 0;
}
Also used : ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Database(org.apache.hadoop.hive.metastore.api.Database) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 23 with ReplicationSpec

use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.

the class DropDatabaseAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    String databaseName = unescapeIdentifier(root.getChild(0).getText());
    boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null;
    boolean cascade = root.getFirstChildWithType(HiveParser.TOK_CASCADE) != null;
    Database database = getDatabase(databaseName, !ifExists);
    if (database == null) {
        return;
    }
    // if cascade=true, then we need to authorize the drop table action as well, and add the tables to the outputs
    if (cascade) {
        try {
            for (Table table : db.getAllTableObjects(databaseName)) {
                // We want no lock here, as the database lock will cover the tables,
                // and putting a lock will actually cause us to deadlock on ourselves.
                outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
            }
        } catch (HiveException e) {
            throw new SemanticException(e);
        }
    }
    inputs.add(new ReadEntity(database));
    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE));
    DropDatabaseDesc desc = new DropDatabaseDesc(databaseName, ifExists, cascade, new ReplicationSpec());
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Database(org.apache.hadoop.hive.metastore.api.Database) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 24 with ReplicationSpec

use of org.apache.hadoop.hive.ql.parse.ReplicationSpec in project hive by apache.

the class LoadTable method newTableTasks.

private void newTableTasks(ImportTableDesc tblDesc, Task<?> tblRootTask, TableLocationTuple tuple) throws Exception {
    Table table = tblDesc.toTable(context.hiveConf);
    ReplicationSpec replicationSpec = event.replicationSpec();
    if (!tblDesc.isExternal()) {
        tblDesc.setLocation(null);
    }
    Task<?> createTableTask = tblDesc.getCreateTableTask(new HashSet<>(), new HashSet<>(), context.hiveConf, true, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector, true);
    if (tblRootTask == null) {
        tblRootTask = createTableTask;
    } else {
        tblRootTask.addDependentTask(createTableTask);
    }
    if (replicationSpec.isMetadataOnly()) {
        tracker.addTask(tblRootTask);
        return;
    }
    Task<?> parentTask = createTableTask;
    if (replicationSpec.isTransactionalTableDump()) {
        List<String> partNames = isPartitioned(tblDesc) ? event.partitions(tblDesc) : null;
        ReplTxnWork replTxnWork = new ReplTxnWork(tblDesc.getDatabaseName(), tblDesc.getTableName(), partNames, replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
        Task<?> replTxnTask = TaskFactory.get(replTxnWork, context.hiveConf);
        parentTask.addDependentTask(replTxnTask);
        parentTask = replTxnTask;
    }
    boolean shouldCreateLoadTableTask = (!isPartitioned(tblDesc) && !TableType.EXTERNAL_TABLE.equals(table.getTableType())) || tuple.isConvertedFromManagedToExternal;
    if (shouldCreateLoadTableTask) {
        LOG.debug("adding dependent ReplTxnTask/CopyWork/MoveWork for table");
        Task<?> loadTableTask = loadTableTask(table, replicationSpec, table.getDataLocation(), event.dataPath());
        parentTask.addDependentTask(loadTableTask);
    }
    tracker.addTask(tblRootTask);
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) Table(org.apache.hadoop.hive.ql.metadata.Table) ReplTxnWork(org.apache.hadoop.hive.ql.plan.ReplTxnWork)

Aggregations

ReplicationSpec (org.apache.hadoop.hive.ql.parse.ReplicationSpec)24 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)11 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)9 Table (org.apache.hadoop.hive.ql.metadata.Table)8 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)7 Database (org.apache.hadoop.hive.metastore.api.Database)6 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)6 ArrayList (java.util.ArrayList)5 Partition (org.apache.hadoop.hive.ql.metadata.Partition)5 IOException (java.io.IOException)4 ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)4 InvalidTableException (org.apache.hadoop.hive.ql.metadata.InvalidTableException)4 Path (org.apache.hadoop.fs.Path)3 TableName (org.apache.hadoop.hive.common.TableName)3 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)3 Task (org.apache.hadoop.hive.ql.exec.Task)3 FileNotFoundException (java.io.FileNotFoundException)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)2