Search in sources :

Example 26 with DumpMetaData

use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.

the class OpenTxnHandler method handle.

@Override
public void handle(Context withinContext) throws Exception {
    if (!ReplUtils.includeAcidTableInDump(withinContext.hiveConf)) {
        return;
    }
    LOG.info("Processing#{} OPEN_TXN message : {}", fromEventId(), eventMessageAsJSON);
    DumpMetaData dmd = withinContext.createDmd(this);
    dmd.setPayload(eventMessageAsJSON);
    dmd.write();
}
Also used : DumpMetaData(org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData)

Example 27 with DumpMetaData

use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.

the class UpdateTableColStatHandler method handle.

@Override
public void handle(Context withinContext) throws Exception {
    LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON);
    Table qlMdTable = new Table(eventMessage.getTableObject());
    if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, true, withinContext.getTablesForBootstrap(), withinContext.oldReplScope, withinContext.hiveConf)) {
        return;
    }
    // Statistics without data doesn't make sense.
    if (withinContext.replicationSpec.isMetadataOnly() || Utils.shouldDumpMetaDataOnlyForExternalTables(qlMdTable, withinContext.hiveConf)) {
        return;
    }
    DumpMetaData dmd = withinContext.createDmd(this);
    dmd.setPayload(eventMessageAsJSON);
    dmd.write();
}
Also used : Table(org.apache.hadoop.hive.ql.metadata.Table) DumpMetaData(org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData)

Example 28 with DumpMetaData

use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.

the class AddUniqueConstraintHandler method handle.

@Override
public void handle(Context withinContext) throws Exception {
    LOG.debug("Processing#{} ADD_UNIQUECONSTRAINT_MESSAGE message : {}", fromEventId(), eventMessageAsJSON);
    if (shouldReplicate(withinContext)) {
        DumpMetaData dmd = withinContext.createDmd(this);
        dmd.setPayload(eventMessageAsJSON);
        dmd.write();
    }
}
Also used : DumpMetaData(org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData)

Example 29 with DumpMetaData

use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.

the class AlterTableHandler method handle.

@Override
public void handle(Context withinContext) throws Exception {
    LOG.info("Processing#{} ALTER_TABLE message : {}", fromEventId(), eventMessageAsJSON);
    Table qlMdTableBefore = new Table(before);
    Set<String> bootstrapTableList;
    ReplScope oldReplScope;
    if (Scenario.RENAME == scenario) {
        // Handling for table level replication is not done in shouldReplicate method for rename events. Its done in
        // handleRenameForReplacePolicy and handleRenameForTableLevelReplication method.
        bootstrapTableList = null;
        oldReplScope = null;
    } else {
        // This check was ignored for alter table event during event filter.
        if (!ReplUtils.tableIncludedInReplScope(withinContext.replScope, before.getTableName())) {
            LOG.debug("Table " + before.getTableName() + " does not satisfy the policy");
            return;
        }
        bootstrapTableList = withinContext.getTablesForBootstrap();
        oldReplScope = withinContext.oldReplScope;
    }
    if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTableBefore, true, bootstrapTableList, oldReplScope, withinContext.hiveConf)) {
        return;
    }
    if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_BOOTSTRAP_ACID_TABLES)) {
        if (!AcidUtils.isTransactionalTable(before) && AcidUtils.isTransactionalTable(after)) {
            LOG.info("The table " + after.getTableName() + " is converted to ACID table." + " It will be replicated with bootstrap load as hive.repl.bootstrap.acid.tables is set to true.");
            return;
        }
    }
    if (Scenario.RENAME == scenario) {
        String oldName = before.getTableName();
        String newName = after.getTableName();
        boolean needDump = true;
        if (withinContext.oldReplScope != null && !withinContext.oldReplScope.equals(withinContext.replScope)) {
            needDump = handleRenameForReplacePolicy(withinContext, oldName, newName);
        } else if (!withinContext.replScope.includeAllTables()) {
            needDump = handleRenameForTableLevelReplication(withinContext, oldName, newName);
        }
        if (!needDump) {
            LOG.info("Rename event for table " + oldName + " to " + newName + " is skipped from dumping");
            return;
        }
    }
    if (Scenario.ALTER == scenario) {
        withinContext.replicationSpec.setIsMetadataOnly(true);
        Table qlMdTableAfter = new Table(after);
        Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
        // empty table.
        if (Utils.shouldDumpMetaDataOnly(withinContext.hiveConf) || Utils.shouldDumpMetaDataOnlyForExternalTables(qlMdTableAfter, withinContext.hiveConf)) {
            qlMdTableAfter.setStatsStateLikeNewTable();
        }
        EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath, qlMdTableAfter, null, withinContext.replicationSpec, withinContext.hiveConf);
    }
    DumpMetaData dmd = withinContext.createDmd(this);
    dmd.setPayload(eventMessageAsJSON);
    dmd.write();
}
Also used : Path(org.apache.hadoop.fs.Path) ReplScope(org.apache.hadoop.hive.common.repl.ReplScope) Table(org.apache.hadoop.hive.ql.metadata.Table) DumpMetaData(org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData)

Example 30 with DumpMetaData

use of org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData in project hive by apache.

the class DefaultHandler method handle.

@Override
public void handle(Context withinContext) throws Exception {
    // we specifically use the the message string from the original event since we dont know what type of message
    // to convert this message to, this handler should not be called since with different message formats we need
    // the ability to convert messages to a given message type.
    LOG.info("Dummy processing#{} message : {}", fromEventId(), event.getMessage());
    DumpMetaData dmd = withinContext.createDmd(this);
    dmd.setPayload(event.getMessage());
    dmd.write();
}
Also used : DumpMetaData(org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData)

Aggregations

DumpMetaData (org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData)39 Path (org.apache.hadoop.fs.Path)17 FileSystem (org.apache.hadoop.fs.FileSystem)6 Test (org.junit.Test)6 ArrayList (java.util.ArrayList)5 Table (org.apache.hadoop.hive.ql.metadata.Table)5 Database (org.apache.hadoop.hive.metastore.api.Database)4 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)4 IOException (java.io.IOException)3 ReplScope (org.apache.hadoop.hive.common.repl.ReplScope)3 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)3 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)3 Partition (org.apache.hadoop.hive.ql.metadata.Partition)3 HashMap (java.util.HashMap)2 List (java.util.List)2 Task (org.apache.hadoop.hive.ql.exec.Task)2 InvalidTableException (org.apache.hadoop.hive.ql.metadata.InvalidTableException)2 FailoverMetaData (org.apache.hadoop.hive.ql.parse.repl.load.FailoverMetaData)2 FileNotFoundException (java.io.FileNotFoundException)1 URI (java.net.URI)1