Search in sources :

Example 6 with AbortTxnsRequest

use of org.apache.hadoop.hive.metastore.api.AbortTxnsRequest in project hive by apache.

the class TestReplicationScenariosAcidTables method testAcidTablesBootstrapWithOpenTxnsPrimaryAndSecondaryDb.

@Test
public void testAcidTablesBootstrapWithOpenTxnsPrimaryAndSecondaryDb() throws Throwable {
    int numTxns = 5;
    HiveConf primaryConf = primary.getConf();
    TxnStore txnHandler = TxnUtils.getTxnStore(primary.getConf());
    // Open 5 txns for secondary db
    List<Long> txns = openTxns(numTxns, txnHandler, primaryConf);
    // Open 5 txns for primary db
    List<Long> txnsSameDb = openTxns(numTxns, txnHandler, primaryConf);
    // Create 2 tables, one partitioned and other not. Also, have both types of full ACID and MM tables.
    primary.run("use " + primaryDbName).run("create table t1 (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")").run("insert into t1 values(1)").run("create table t2 (rank int) partitioned by (name string) tblproperties(\"transactional\"=\"true\", " + "\"transactional_properties\"=\"insert_only\")").run("insert into t2 partition(name='Bob') values(11)").run("insert into t2 partition(name='Carl') values(10)");
    // Allocate write ids for both tables of secondary db for all txns
    // t1=5 and t2=5
    Map<String, Long> tablesInSecDb = new HashMap<>();
    tablesInSecDb.put("t1", (long) numTxns);
    tablesInSecDb.put("t2", (long) numTxns);
    List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra", tablesInSecDb, txnHandler, txns, primaryConf);
    // Allocate write ids for both tables of primary db for all txns
    // t1=5+1L and t2=5+2L inserts
    Map<String, Long> tablesInPrimDb = new HashMap<>();
    tablesInPrimDb.put("t1", (long) numTxns + 1L);
    tablesInPrimDb.put("t2", (long) numTxns + 2L);
    lockIds.addAll(allocateWriteIdsForTablesAndAcquireLocks(primaryDbName, tablesInPrimDb, txnHandler, txnsSameDb, primaryConf));
    // Bootstrap dump with open txn timeout as 1s.
    List<String> withConfigs = Arrays.asList("'" + HiveConf.ConfVars.REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT + "'='1s'");
    WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName).dump(primaryDbName, withConfigs);
    // After bootstrap dump, all the opened txns should not be aborted as it belongs to a diff db. Verify it.
    verifyAllOpenTxnsNotAborted(txns, primaryConf);
    // After bootstrap dump, all the opened txns should be aborted as it belongs to db under replication. Verify it.
    verifyAllOpenTxnsAborted(txnsSameDb, primaryConf);
    verifyNextId(tablesInPrimDb, primaryDbName, primaryConf);
    // Bootstrap load which should replicate the write ids on both tables as they are on same db and
    // not on different db.
    HiveConf replicaConf = replica.getConf();
    replica.load(replicatedDbName, primaryDbName).run("use " + replicatedDbName).run("show tables").verifyResults(new String[] { "t1", "t2" }).run("repl status " + replicatedDbName).verifyResult(bootstrapDump.lastReplicationId).run("select id from t1").verifyResults(new String[] { "1" }).run("select rank from t2 order by rank").verifyResults(new String[] { "10", "11" });
    // Verify if HWM is properly set after REPL LOAD
    verifyNextId(tablesInPrimDb, replicatedDbName, replicaConf);
    // Verify if only the write ids belonging to primary db are replicated to the replicated DB.
    for (Map.Entry<String, Long> entry : tablesInPrimDb.entrySet()) {
        entry.setValue((long) numTxns);
    }
    verifyWriteIdsForTables(tablesInPrimDb, replicaConf, replicatedDbName);
    // Abort the txns for secondary db
    txnHandler.abortTxns(new AbortTxnsRequest(txns));
    verifyAllOpenTxnsAborted(txns, primaryConf);
    // Release the locks
    releaseLocks(txnHandler, lockIds);
}
Also used : HashMap(java.util.HashMap) AbortTxnsRequest(org.apache.hadoop.hive.metastore.api.AbortTxnsRequest) HiveConf(org.apache.hadoop.hive.conf.HiveConf) TxnStore(org.apache.hadoop.hive.metastore.txn.TxnStore) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 7 with AbortTxnsRequest

use of org.apache.hadoop.hive.metastore.api.AbortTxnsRequest in project hive by apache.

the class TestTxnHandler method testAbortTxns.

@Test
public void testAbortTxns() throws Exception {
    createDatabaseForReplTests("default", MetaStoreUtils.getDefaultCatalog(conf));
    OpenTxnsResponse openedTxns = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
    List<Long> txnList = openedTxns.getTxn_ids();
    txnHandler.abortTxns(new AbortTxnsRequest(txnList));
    OpenTxnRequest replRqst = new OpenTxnRequest(2, "me", "localhost");
    replRqst.setReplPolicy("default.*");
    replRqst.setTxn_type(TxnType.REPL_CREATED);
    replRqst.setReplSrcTxnIds(Arrays.asList(1L, 2L));
    List<Long> targetTxns = txnHandler.openTxns(replRqst).getTxn_ids();
    assertTrue(targetTxnsPresentInReplTxnMap(1L, 2L, targetTxns));
    txnHandler.abortTxns(new AbortTxnsRequest(targetTxns));
    assertFalse(targetTxnsPresentInReplTxnMap(1L, 2L, targetTxns));
    GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
    assertEquals(5, txnsInfo.getOpen_txns().size());
    txnsInfo.getOpen_txns().forEach(txn -> assertEquals(TxnState.ABORTED, txn.getState()));
}
Also used : OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) AbortTxnsRequest(org.apache.hadoop.hive.metastore.api.AbortTxnsRequest) OpenTxnsResponse(org.apache.hadoop.hive.metastore.api.OpenTxnsResponse) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) Test(org.junit.Test)

Example 8 with AbortTxnsRequest

use of org.apache.hadoop.hive.metastore.api.AbortTxnsRequest in project hive by apache.

the class TestTxnHandler method testReplOpenTxn.

@Test
public void testReplOpenTxn() throws Exception {
    createDatabaseForReplTests("default", MetaStoreUtils.getDefaultCatalog(conf));
    int numTxn = 50000;
    String[] output = TestTxnDbUtil.queryToString(conf, "SELECT MAX(\"TXN_ID\") + 1 FROM \"TXNS\"").split("\n");
    long startTxnId = Long.parseLong(output[1].trim());
    txnHandler.setOpenTxnTimeOutMillis(50000);
    List<Long> txnList = replOpenTxnForTest(startTxnId, numTxn, "default.*");
    txnHandler.setOpenTxnTimeOutMillis(1000);
    assert (txnList.size() == numTxn);
    txnHandler.abortTxns(new AbortTxnsRequest(txnList));
}
Also used : AbortTxnsRequest(org.apache.hadoop.hive.metastore.api.AbortTxnsRequest) Test(org.junit.Test)

Example 9 with AbortTxnsRequest

use of org.apache.hadoop.hive.metastore.api.AbortTxnsRequest in project hive by apache.

the class TestAcidTxnCleanerService method cleansEmptyAbortedBatchTxns.

@Test
public void cleansEmptyAbortedBatchTxns() throws Exception {
    // add one non-empty aborted txn
    openNonEmptyThenAbort();
    // add a batch of empty, aborted txns
    txnHandler.setOpenTxnTimeOutMillis(30000);
    MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.TXN_MAX_OPEN_BATCH, TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50);
    OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50, "user", "hostname"));
    txnHandler.setOpenTxnTimeOutMillis(1);
    txnHandler.abortTxns(new AbortTxnsRequest(resp.getTxn_ids()));
    GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
    Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
    underTest.run();
    openTxns = txnHandler.getOpenTxns();
    Assert.assertEquals(2, openTxns.getOpen_txnsSize());
    Assert.assertTrue("The max txnId should be at least", getMaxTxnId() >= TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1);
}
Also used : GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) AbortTxnsRequest(org.apache.hadoop.hive.metastore.api.AbortTxnsRequest) OpenTxnsResponse(org.apache.hadoop.hive.metastore.api.OpenTxnsResponse) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) Test(org.junit.Test)

Aggregations

AbortTxnsRequest (org.apache.hadoop.hive.metastore.api.AbortTxnsRequest)9 Test (org.junit.Test)9 HashMap (java.util.HashMap)5 HiveConf (org.apache.hadoop.hive.conf.HiveConf)5 TxnStore (org.apache.hadoop.hive.metastore.txn.TxnStore)5 Map (java.util.Map)3 GetOpenTxnsResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse)3 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)3 OpenTxnsResponse (org.apache.hadoop.hive.metastore.api.OpenTxnsResponse)3 IOException (java.io.IOException)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 CommandProcessorException (org.apache.hadoop.hive.ql.processors.CommandProcessorException)2 ArrayList (java.util.ArrayList)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)1 Database (org.apache.hadoop.hive.metastore.api.Database)1 GetOpenTxnsInfoResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse)1 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)1 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)1