Search in sources :

Example 81 with LockResponse

use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.

the class TestInitiator method chooseMajorOverMinorWhenBothValid.

@Test
public void chooseMajorOverMinorWhenBothValid() throws Exception {
    Table t = newTable("default", "cmomwbv", false);
    addBaseFile(t, null, 200L, 200);
    addDeltaFile(t, null, 201L, 211L, 11);
    addDeltaFile(t, null, 212L, 222L, 11);
    addDeltaFile(t, null, 223L, 233L, 11);
    addDeltaFile(t, null, 234L, 244L, 11);
    addDeltaFile(t, null, 245L, 255L, 11);
    addDeltaFile(t, null, 256L, 266L, 11);
    addDeltaFile(t, null, 267L, 277L, 11);
    addDeltaFile(t, null, 278L, 288L, 11);
    addDeltaFile(t, null, 289L, 299L, 11);
    addDeltaFile(t, null, 300L, 310L, 11);
    addDeltaFile(t, null, 311L, 321L, 11);
    burnThroughTransactions("default", "cmomwbv", 320);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
    comp.setTablename("cmomwbv");
    comp.setOperationType(DataOperationType.UPDATE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    long writeid = allocateWriteId("default", "cmomwbv", txnid);
    Assert.assertEquals(321, writeid);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("cmomwbv", compacts.get(0).getTablename());
    Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ArrayList(java.util.ArrayList) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 82 with LockResponse

use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.

the class TestCompactionTxnHandler method testMarkCleanedCleansTxnsAndTxnComponents.

// TODO test changes to mark cleaned to clean txns and txn_components
@Test
public void testMarkCleanedCleansTxnsAndTxnComponents() throws Exception {
    long txnid = openTxn();
    long mytableWriteId = allocateTableWriteIds("mydb", "mytable", txnid);
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setOperationType(DataOperationType.INSERT);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    txnid = openTxn();
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("yourtable");
    comp.setOperationType(DataOperationType.DELETE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    txnid = openTxn();
    long fooWriteId = allocateTableWriteIds("mydb", "foo", txnid);
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("foo");
    comp.setPartitionname("bar=compact");
    comp.setOperationType(DataOperationType.UPDATE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("foo");
    comp.setPartitionname("baz=compact");
    comp.setOperationType(DataOperationType.UPDATE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    CompactionInfo ci;
    // Now clean them and check that they are removed from the count.
    CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
    ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
    assertNotNull(ci);
    ci.highestWriteId = mytableWriteId;
    txnHandler.updateCompactorState(ci, 0);
    txnHandler.markCompacted(ci);
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    List<CompactionInfo> toClean = txnHandler.findReadyToClean(0, 0);
    assertEquals(1, toClean.size());
    txnHandler.markCleaned(ci);
    // Check that we are cleaning up the empty aborted transactions
    GetOpenTxnsResponse txnList = txnHandler.getOpenTxns();
    assertEquals(3, txnList.getOpen_txnsSize());
    // Create one aborted for low water mark
    txnid = openTxn();
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    txnHandler.cleanEmptyAbortedAndCommittedTxns();
    txnList = txnHandler.getOpenTxns();
    assertEquals(3, txnList.getOpen_txnsSize());
    rqst = new CompactionRequest("mydb", "foo", CompactionType.MAJOR);
    rqst.setPartitionname("bar");
    txnHandler.compact(rqst);
    assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
    ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
    assertNotNull(ci);
    ci.highestWriteId = fooWriteId;
    txnHandler.updateCompactorState(ci, 0);
    txnHandler.markCompacted(ci);
    toClean = txnHandler.findReadyToClean(0, 0);
    assertEquals(1, toClean.size());
    txnHandler.markCleaned(ci);
    txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
    // The open txn will became the low water mark
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    txnHandler.setOpenTxnTimeOutMillis(1);
    txnHandler.cleanEmptyAbortedAndCommittedTxns();
    txnList = txnHandler.getOpenTxns();
    assertEquals(3, txnList.getOpen_txnsSize());
    txnHandler.setOpenTxnTimeOutMillis(1000);
}
Also used : LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 83 with LockResponse

use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.

the class TxnHandler method lockMaterializationRebuild.

@Override
public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws MetaException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Acquiring lock for materialization rebuild with {} for {}", JavaUtils.txnIdToString(txnId), TableName.getDbTable(dbName, tableName));
    }
    TxnStore.MutexAPI.LockHandle handle = null;
    Connection dbConn = null;
    PreparedStatement pst = null;
    ResultSet rs = null;
    try {
        lockInternal();
        /**
         * MUTEX_KEY.MaterializationRebuild lock ensures that there is only 1 entry in
         * Initiated/Working state for any resource. This ensures we do not run concurrent
         * rebuild operations on any materialization.
         */
        handle = getMutexAPI().acquireLock(MUTEX_KEY.MaterializationRebuild.name());
        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
        List<String> params = Arrays.asList(dbName, tableName);
        String selectQ = "SELECT \"MRL_TXN_ID\" FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE" + " \"MRL_DB_NAME\" = ? AND \"MRL_TBL_NAME\" = ?";
        pst = sqlGenerator.prepareStmtWithParameters(dbConn, selectQ, params);
        LOG.debug("Going to execute query <" + selectQ.replaceAll("\\?", "{}") + ">", quoteString(dbName), quoteString(tableName));
        rs = pst.executeQuery();
        if (rs.next()) {
            LOG.info("Ignoring request to rebuild " + dbName + "/" + tableName + " since it is already being rebuilt");
            return new LockResponse(txnId, LockState.NOT_ACQUIRED);
        }
        String insertQ = "INSERT INTO \"MATERIALIZATION_REBUILD_LOCKS\" " + "(\"MRL_TXN_ID\", \"MRL_DB_NAME\", \"MRL_TBL_NAME\", \"MRL_LAST_HEARTBEAT\") VALUES (" + txnId + ", ?, ?, " + Instant.now().toEpochMilli() + ")";
        closeStmt(pst);
        pst = sqlGenerator.prepareStmtWithParameters(dbConn, insertQ, params);
        LOG.debug("Going to execute update <" + insertQ.replaceAll("\\?", "{}") + ">", quoteString(dbName), quoteString(tableName));
        pst.executeUpdate();
        LOG.debug("Going to commit");
        dbConn.commit();
        return new LockResponse(txnId, LockState.ACQUIRED);
    } catch (SQLException ex) {
        LOG.warn("lockMaterializationRebuild failed due to " + getMessage(ex), ex);
        throw new MetaException("Unable to retrieve materialization invalidation information due to " + StringUtils.stringifyException(ex));
    } finally {
        close(rs, pst, dbConn);
        if (handle != null) {
            handle.releaseLocks();
        }
        unlockInternal();
    }
}
Also used : LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) SQLException(java.sql.SQLException) Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 84 with LockResponse

use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.

the class TestHiveMetaStoreTxns method testLocks.

@Test
public void testLocks() throws Exception {
    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
    rqstBuilder.addLockComponent(new LockComponentBuilder().setDbName("mydb").setTableName("mytable").setPartitionName("MyPartition=MyValue").setExclusive().setOperationType(DataOperationType.NO_TXN).build());
    rqstBuilder.addLockComponent(new LockComponentBuilder().setDbName("mydb").setTableName("yourtable").setSharedWrite().setOperationType(DataOperationType.NO_TXN).build());
    rqstBuilder.addLockComponent(new LockComponentBuilder().setDbName("yourdb").setOperationType(DataOperationType.NO_TXN).setSharedRead().build());
    rqstBuilder.setUser("fred");
    LockResponse res = client.lock(rqstBuilder.build());
    Assert.assertEquals(1L, res.getLockid());
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    res = client.checkLock(1);
    Assert.assertEquals(1L, res.getLockid());
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    client.heartbeat(0, 1);
    client.unlock(1);
}
Also used : LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 85 with LockResponse

use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.

the class TransactionBatch method beginNextTransactionImpl.

private void beginNextTransactionImpl() throws StreamingException {
    beginNextTransactionImpl("No more transactions available in" + " next batch for connection: " + conn + " user: " + username);
    lastTxnUsed = getCurrentTxnId();
    lockRequest = createLockRequest(conn, partNameForLock, username, getCurrentTxnId(), agentInfo);
    createdPartitions = Sets.newHashSet();
    try {
        LockResponse res = conn.getMSC().lock(lockRequest);
        if (res.getState() != LockState.ACQUIRED) {
            throw new TransactionError("Unable to acquire lock on " + conn);
        }
    } catch (TException e) {
        throw new TransactionError("Unable to acquire lock on " + conn, e);
    }
}
Also used : TException(org.apache.thrift.TException) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse)

Aggregations

LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)85 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)78 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)75 Test (org.junit.Test)74 ArrayList (java.util.ArrayList)73 Table (org.apache.hadoop.hive.metastore.api.Table)38 CheckLockRequest (org.apache.hadoop.hive.metastore.api.CheckLockRequest)34 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)31 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)31 CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)27 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)24 Partition (org.apache.hadoop.hive.metastore.api.Partition)20 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)12 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)7 UnlockRequest (org.apache.hadoop.hive.metastore.api.UnlockRequest)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)5 TException (org.apache.thrift.TException)5 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)4 NoSuchLockException (org.apache.hadoop.hive.metastore.api.NoSuchLockException)4 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)4