Search in sources :

Example 1 with AlterDatabaseSetPropertiesDesc

use of org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc in project hive by apache.

the class ReplLoadTask method executeIncrementalLoad.

private int executeIncrementalLoad(long loadStartTime) throws Exception {
    // that are excluded in the new replication policy.
    if (work.replScopeModified) {
        dropTablesExcludedInReplScope(work.currentReplScope);
    }
    Database targetDb = getHive().getDatabase(work.dbNameToLoadIn);
    Map<String, String> props = new HashMap<>();
    // Check if it is a optimised bootstrap failover.
    if (work.isFirstFailover) {
        // Check it should be marked as target of replication & not source of replication.
        if (MetaStoreUtils.isTargetOfReplication(targetDb)) {
            LOG.error("The database {} is already marked as target for replication", targetDb.getName());
            throw new Exception("Failover target is already marked as target");
        }
        if (!ReplChangeManager.isSourceOfReplication(targetDb)) {
            LOG.error("The database {} is already source of replication.", targetDb.getName());
            throw new Exception("Failover target was not source of replication");
        }
        boolean isTableDiffPresent = checkFileExists(new Path(work.dumpDirectory).getParent(), conf, TABLE_DIFF_COMPLETE_DIRECTORY);
        Long eventId = Long.parseLong(getEventIdFromFile(new Path(work.dumpDirectory).getParent(), conf)[0]);
        if (!isTableDiffPresent) {
            prepareTableDiffFile(eventId, getHive(), work, conf);
            if (this.childTasks == null) {
                this.childTasks = new ArrayList<>();
            }
            createReplLoadCompleteAckTask();
            return 0;
        }
    } else if (work.isSecondFailover) {
        // DROP the tables to be bootstrapped.
        Hive db = getHive();
        for (String table : work.tablesToBootstrap) {
            db.dropTable(work.dbNameToLoadIn + "." + table, true);
        }
    }
    if (!MetaStoreUtils.isTargetOfReplication(targetDb)) {
        props.put(ReplConst.TARGET_OF_REPLICATION, ReplConst.TRUE);
    }
    if (!work.shouldFailover() && MetaStoreUtils.isDbBeingFailedOver(targetDb)) {
        props.put(ReplConst.REPL_FAILOVER_ENDPOINT, "");
    }
    if (!props.isEmpty()) {
        AlterDatabaseSetPropertiesDesc setTargetDesc = new AlterDatabaseSetPropertiesDesc(work.dbNameToLoadIn, props, null);
        Task<?> addReplTargetPropTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), setTargetDesc, true, work.dumpDirectory, work.getMetricCollector()), conf);
        if (this.childTasks == null) {
            this.childTasks = new ArrayList<>();
        }
        this.childTasks.add(addReplTargetPropTask);
    }
    IncrementalLoadTasksBuilder builder = work.incrementalLoadTasksBuilder();
    // If incremental events are already applied, then check and perform if need to bootstrap any tables.
    if (!builder.hasMoreWork() && work.isLastReplIDUpdated()) {
        if (work.hasBootstrapLoadTasks()) {
            LOG.debug("Current incremental dump have tables to be bootstrapped. Switching to bootstrap " + "mode after applying all events.");
            return executeBootStrapLoad();
        }
    }
    List<Task<?>> childTasks = new ArrayList<>();
    int maxTasks = conf.getIntVar(HiveConf.ConfVars.REPL_APPROX_MAX_LOAD_TASKS);
    TaskTracker tracker = new TaskTracker(maxTasks);
    addLazyDataCopyTask(tracker, builder.getReplLogger());
    childTasks.add(builder.build(context, getHive(), LOG, tracker));
    // incremental cycle won't consider the events in this dump again if it starts from this id.
    if (!builder.hasMoreWork()) {
        // The name of the database to be loaded into is either specified directly in REPL LOAD
        // command i.e. when dbNameToLoadIn has a valid dbname or is available through dump
        // metadata during table level replication.
        String dbName = work.dbNameToLoadIn;
        if (dbName == null || StringUtils.isBlank(dbName)) {
            if (work.currentReplScope != null) {
                String replScopeDbName = work.currentReplScope.getDbName();
                if (replScopeDbName != null && !"*".equals(replScopeDbName)) {
                    dbName = replScopeDbName;
                }
            }
        }
        // update repl id in all those databases.
        if (StringUtils.isNotBlank(dbName)) {
            String lastEventid = builder.eventTo().toString();
            Map<String, String> mapProp = new HashMap<>();
            mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID_SOURCE.toString(), lastEventid);
            AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, mapProp, new ReplicationSpec(lastEventid, lastEventid));
            Task<?> updateReplIdTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, (new Path(work.dumpDirectory).getParent()).toString(), work.getMetricCollector()), conf);
            DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(updateReplIdTask));
            work.setLastReplIDUpdated(true);
            LOG.debug("Added task to set last repl id of db " + dbName + " to " + lastEventid);
        }
    }
    // Once all the incremental events are applied, enable bootstrap of tables if exist.
    if (builder.hasMoreWork() || work.hasBootstrapLoadTasks()) {
        DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(TaskFactory.get(work, conf)));
    }
    if (this.childTasks == null) {
        this.childTasks = new ArrayList<>();
    }
    this.childTasks.addAll(childTasks);
    createReplLoadCompleteAckTask();
    // Clean-up snapshots
    if (conf.getBoolVar(REPL_SNAPSHOT_DIFF_FOR_EXTERNAL_TABLE_COPY)) {
        cleanupSnapshots(new Path(work.getDumpDirectory()).getParent().getParent().getParent(), work.getSourceDbName().toLowerCase(), conf, null, true);
    }
    // pass the current time at the end of repl-load stage as the starting time of the first event.
    long currentTimestamp = System.currentTimeMillis();
    ((IncrementalLoadLogger) work.incrementalLoadTasksBuilder().getReplLogger()).initiateEventTimestamp(currentTimestamp);
    LOG.info("REPL_INCREMENTAL_LOAD stage duration : {} ms", currentTimestamp - loadStartTime);
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) TaskTracker(org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker) Task(org.apache.hadoop.hive.ql.exec.Task) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) FileNotFoundException(java.io.FileNotFoundException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) TException(org.apache.thrift.TException) IOException(java.io.IOException) LoadConstraint(org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.LoadConstraint) IncrementalLoadLogger(org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger) Hive(org.apache.hadoop.hive.ql.metadata.Hive) IncrementalLoadTasksBuilder(org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) LoadDatabase(org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.LoadDatabase) Database(org.apache.hadoop.hive.metastore.api.Database) AlterDatabase(org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.LoadDatabase.AlterDatabase) AlterDatabaseSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc) HashSet(java.util.HashSet) AddDependencyToLeaves(org.apache.hadoop.hive.ql.exec.repl.util.AddDependencyToLeaves)

Example 2 with AlterDatabaseSetPropertiesDesc

use of org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc in project hive by apache.

the class LoadDatabase method alterDbTask.

private static Task<?> alterDbTask(String dbName, Map<String, String> props, HiveConf hiveConf, String dumpDirectory, ReplicationMetricCollector metricCollector) {
    AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, props, null);
    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, (new Path(dumpDirectory)).getParent().toString(), metricCollector);
    return TaskFactory.get(work, hiveConf);
}
Also used : Path(org.apache.hadoop.fs.Path) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) AlterDatabaseSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc)

Example 3 with AlterDatabaseSetPropertiesDesc

use of org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc in project hive by apache.

the class IncrementalLoadTasksBuilder method dbUpdateReplStateTask.

private Task<?> dbUpdateReplStateTask(String dbName, String replState, Task<?> preCursor) {
    HashMap<String, String> mapProp = new HashMap<>();
    mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID_SOURCE.toString(), replState);
    AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, mapProp, new ReplicationSpec(replState, replState));
    Task<?> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc, true, dumpDirectory, metricCollector), conf);
    // Link the update repl state task with dependency collection task
    if (preCursor != null) {
        preCursor.addDependentTask(updateReplIdTask);
        log.debug("Added {}:{} as a precursor of {}:{}", preCursor.getClass(), preCursor.getId(), updateReplIdTask.getClass(), updateReplIdTask.getId());
    }
    return updateReplIdTask;
}
Also used : ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) HashMap(java.util.HashMap) AlterDatabaseSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc)

Example 4 with AlterDatabaseSetPropertiesDesc

use of org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc in project hive by apache.

the class AlterDatabaseHandler method handle.

@Override
public List<Task<?>> handle(Context context) throws SemanticException {
    AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload());
    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
    try {
        Database oldDb = msg.getDbObjBefore();
        Database newDb = msg.getDbObjAfter();
        AbstractAlterDatabaseDesc alterDbDesc;
        if ((oldDb.getOwnerType() == newDb.getOwnerType()) && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
            // If owner information is unchanged, then DB properties would've changed
            Map<String, String> newDbProps = new HashMap<>();
            Map<String, String> dbProps = newDb.getParameters();
            for (Map.Entry<String, String> entry : dbProps.entrySet()) {
                String key = entry.getKey();
                // Ignore the keys which are local to source warehouse
                if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID_SOURCE.toString()) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID_TARGET.toString()) || key.equals(ReplUtils.REPL_CHECKPOINT_KEY) || key.equals(ReplChangeManager.SOURCE_OF_REPLICATION) || key.equals(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) || key.equals(ReplConst.REPL_FAILOVER_ENDPOINT)) {
                    continue;
                }
                newDbProps.put(key, entry.getValue());
            }
            alterDbDesc = new AlterDatabaseSetPropertiesDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec());
        } else {
            alterDbDesc = new AlterDatabaseSetOwnerDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec());
        }
        Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
        context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName);
        // Only database object is updated
        updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
        return Collections.singletonList(alterDbTask);
    } catch (Exception e) {
        throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
    }
}
Also used : AbstractAlterDatabaseDesc(org.apache.hadoop.hive.ql.ddl.database.alter.AbstractAlterDatabaseDesc) HashMap(java.util.HashMap) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) PrincipalDesc(org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc) AlterDatabaseMessage(org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Database(org.apache.hadoop.hive.metastore.api.Database) AlterDatabaseSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc) AlterDatabaseSetOwnerDesc(org.apache.hadoop.hive.ql.ddl.database.alter.owner.AlterDatabaseSetOwnerDesc) HashMap(java.util.HashMap) Map(java.util.Map) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 5 with AlterDatabaseSetPropertiesDesc

use of org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc in project hive by apache.

the class CreateDatabaseHandler method handle.

@Override
public List<Task<?>> handle(Context context) throws SemanticException {
    MetaData metaData;
    try {
        FileSystem fs = FileSystem.get(new Path(context.location).toUri(), context.hiveConf);
        metaData = EximUtil.readMetaData(fs, new Path(context.location, EximUtil.METADATA_NAME));
    } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
    }
    Database db = metaData.getDatabase();
    String destinationDBName = context.dbName == null ? db.getName() : context.dbName;
    CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, null, true, db.getParameters());
    Task<DDLWork> createDBTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
    if (!db.getParameters().isEmpty()) {
        AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(destinationDBName, db.getParameters(), context.eventOnlyReplicationSpec());
        Task<DDLWork> alterDbProperties = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
        createDBTask.addDependentTask(alterDbProperties);
    }
    if (StringUtils.isNotEmpty(db.getOwnerName())) {
        AlterDatabaseSetOwnerDesc alterDbOwner = new AlterDatabaseSetOwnerDesc(destinationDBName, new PrincipalDesc(db.getOwnerName(), db.getOwnerType()), context.eventOnlyReplicationSpec());
        Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbOwner, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
        createDBTask.addDependentTask(alterDbTask);
    }
    updatedMetadata.set(context.dmd.getEventTo().toString(), destinationDBName, null, null);
    return Collections.singletonList(createDBTask);
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) PrincipalDesc(org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) CreateDatabaseDesc(org.apache.hadoop.hive.ql.ddl.database.create.CreateDatabaseDesc) MetaData(org.apache.hadoop.hive.ql.parse.repl.load.MetaData) FileSystem(org.apache.hadoop.fs.FileSystem) Database(org.apache.hadoop.hive.metastore.api.Database) AlterDatabaseSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc) AlterDatabaseSetOwnerDesc(org.apache.hadoop.hive.ql.ddl.database.alter.owner.AlterDatabaseSetOwnerDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HashSet(java.util.HashSet)

Aggregations

DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)5 AlterDatabaseSetPropertiesDesc (org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc)5 HashMap (java.util.HashMap)3 Path (org.apache.hadoop.fs.Path)3 Database (org.apache.hadoop.hive.metastore.api.Database)3 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)3 IOException (java.io.IOException)2 HashSet (java.util.HashSet)2 AlterDatabaseSetOwnerDesc (org.apache.hadoop.hive.ql.ddl.database.alter.owner.AlterDatabaseSetOwnerDesc)2 PrincipalDesc (org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc)2 ReplicationSpec (org.apache.hadoop.hive.ql.parse.ReplicationSpec)2 FileNotFoundException (java.io.FileNotFoundException)1 ArrayList (java.util.ArrayList)1 LinkedHashMap (java.util.LinkedHashMap)1 Map (java.util.Map)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 AlterDatabaseMessage (org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage)1 AbstractAlterDatabaseDesc (org.apache.hadoop.hive.ql.ddl.database.alter.AbstractAlterDatabaseDesc)1 CreateDatabaseDesc (org.apache.hadoop.hive.ql.ddl.database.create.CreateDatabaseDesc)1 Task (org.apache.hadoop.hive.ql.exec.Task)1