use of org.apache.hadoop.hive.metastore.api.ShowLocksResponse in project hive by apache.
the class TestDbTxnManager2 method getLocksWithFilterOptions.
//todo: Concurrent insert/update of same partition - should pass
private List<ShowLocksResponseElement> getLocksWithFilterOptions(HiveTxnManager txnMgr, String dbName, String tblName, Map<String, String> partSpec) throws Exception {
if (dbName == null && tblName != null) {
dbName = SessionState.get().getCurrentDatabase();
}
ShowLocksRequest rqst = new ShowLocksRequest();
rqst.setDbname(dbName);
rqst.setTablename(tblName);
if (partSpec != null) {
List<String> keyList = new ArrayList<String>();
List<String> valList = new ArrayList<String>();
for (String partKey : partSpec.keySet()) {
String partVal = partSpec.remove(partKey);
keyList.add(partKey);
valList.add(partVal);
}
String partName = FileUtils.makePartName(keyList, valList);
rqst.setPartname(partName);
}
ShowLocksResponse rsp = ((DbLockManager) txnMgr.getLockManager()).getLocks(rqst);
return rsp.getLocks();
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponse in project hive by apache.
the class TestDbTxnManager method testLockExpiration.
private void testLockExpiration(HiveTxnManager txnMgr, int numLocksBefore, boolean shouldExpire) throws Exception {
DbLockManager lockManager = (DbLockManager) txnMgr.getLockManager();
ShowLocksResponse resp = lockManager.getLocks();
Assert.assertEquals("Wrong number of locks before expire", numLocksBefore, resp.getLocks().size());
Thread.sleep(HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS));
runReaper();
resp = lockManager.getLocks();
if (shouldExpire) {
Assert.assertEquals("Expected all locks to expire", 0, resp.getLocks().size());
lockManager.clearLocalLockRecords();
} else {
Assert.assertEquals("No lock should expire because there is heartbeating", numLocksBefore, resp.getLocks().size());
}
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponse in project hive by apache.
the class TestStreaming method testTransactionBatchAbortAndCommit.
@Test
public void testTransactionBatchAbortAndCommit() throws Exception {
String agentInfo = "UT_" + Thread.currentThread().getName();
HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, partitionVals);
StreamingConnection connection = endPt.newConnection(false, agentInfo);
DelimitedInputWriter writer = new DelimitedInputWriter(fieldNames, ",", endPt, connection);
TransactionBatch txnBatch = connection.fetchTransactionBatch(10, writer);
txnBatch.beginNextTransaction();
txnBatch.write("1,Hello streaming".getBytes());
txnBatch.write("2,Welcome to streaming".getBytes());
ShowLocksResponse resp = msClient.showLocks(new ShowLocksRequest());
Assert.assertEquals("LockCount", 1, resp.getLocksSize());
Assert.assertEquals("LockType", LockType.SHARED_READ, resp.getLocks().get(0).getType());
Assert.assertEquals("LockState", LockState.ACQUIRED, resp.getLocks().get(0).getState());
Assert.assertEquals("AgentInfo", agentInfo, resp.getLocks().get(0).getAgentInfo());
txnBatch.abort();
checkNothingWritten(partLoc);
Assert.assertEquals(TransactionBatch.TxnState.ABORTED, txnBatch.getCurrentTransactionState());
txnBatch.beginNextTransaction();
txnBatch.write("1,Hello streaming".getBytes());
txnBatch.write("2,Welcome to streaming".getBytes());
txnBatch.commit();
checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}");
txnBatch.close();
connection.close();
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponse in project hive by apache.
the class Cleaner method run.
@Override
public void run() {
if (cleanerCheckInterval == 0) {
cleanerCheckInterval = conf.getTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, TimeUnit.MILLISECONDS);
}
do {
// This is solely for testing. It checks if the test has set the looped value to false,
// and if so remembers that and then sets it to true at the end. We have to check here
// first to make sure we go through a complete iteration of the loop before resetting it.
boolean setLooped = !looped.get();
TxnStore.MutexAPI.LockHandle handle = null;
long startedAt = -1;
// so wrap it in a big catch Throwable statement.
try {
handle = txnHandler.getMutexAPI().acquireLock(TxnStore.MUTEX_KEY.Cleaner.name());
startedAt = System.currentTimeMillis();
// First look for all the compactions that are waiting to be cleaned. If we have not
// seen an entry before, look for all the locks held on that table or partition and
// record them. We will then only clean the partition once all of those locks have been
// released. This way we avoid removing the files while they are in use,
// while at the same time avoiding starving the cleaner as new readers come along.
// This works because we know that any reader who comes along after the worker thread has
// done the compaction will read the more up to date version of the data (either in a
// newer delta or in a newer base).
List<CompactionInfo> toClean = txnHandler.findReadyToClean();
{
/**
* Since there may be more than 1 instance of Cleaner running we may have state info
* for items which were cleaned by instances. Here we remove them.
*
* In the long run if we add end_time to compaction_queue, then we can check that
* hive_locks.acquired_at > compaction_queue.end_time + safety_buffer in which case
* we know the lock owner is reading files created by this compaction or later.
* The advantage is that we don't have to store the locks.
*/
Set<Long> currentToCleanSet = new HashSet<>();
for (CompactionInfo ci : toClean) {
currentToCleanSet.add(ci.id);
}
Set<Long> cleanPerformedByOthers = new HashSet<>();
for (long id : compactId2CompactInfoMap.keySet()) {
if (!currentToCleanSet.contains(id)) {
cleanPerformedByOthers.add(id);
}
}
for (long id : cleanPerformedByOthers) {
compactId2CompactInfoMap.remove(id);
compactId2LockMap.remove(id);
}
}
if (toClean.size() > 0 || compactId2LockMap.size() > 0) {
ShowLocksResponse locksResponse = txnHandler.showLocks(new ShowLocksRequest());
for (CompactionInfo ci : toClean) {
// add it to our queue.
if (!compactId2LockMap.containsKey(ci.id)) {
compactId2LockMap.put(ci.id, findRelatedLocks(ci, locksResponse));
compactId2CompactInfoMap.put(ci.id, ci);
}
}
// Now, for each entry in the queue, see if all of the associated locks are clear so we
// can clean
Set<Long> currentLocks = buildCurrentLockSet(locksResponse);
List<Long> expiredLocks = new ArrayList<Long>();
List<Long> compactionsCleaned = new ArrayList<Long>();
try {
for (Map.Entry<Long, Set<Long>> queueEntry : compactId2LockMap.entrySet()) {
boolean sawLock = false;
for (Long lockId : queueEntry.getValue()) {
if (currentLocks.contains(lockId)) {
sawLock = true;
break;
} else {
expiredLocks.add(lockId);
}
}
if (!sawLock) {
// Remember to remove this when we're out of the loop,
// we can't do it in the loop or we'll get a concurrent modification exception.
compactionsCleaned.add(queueEntry.getKey());
//Future thought: this may be expensive so consider having a thread pool run in parallel
clean(compactId2CompactInfoMap.get(queueEntry.getKey()));
} else {
// Remove the locks we didn't see so we don't look for them again next time
for (Long lockId : expiredLocks) {
queueEntry.getValue().remove(lockId);
}
}
}
} finally {
if (compactionsCleaned.size() > 0) {
for (Long compactId : compactionsCleaned) {
compactId2LockMap.remove(compactId);
compactId2CompactInfoMap.remove(compactId);
}
}
}
}
} catch (Throwable t) {
LOG.error("Caught an exception in the main loop of compactor cleaner, " + StringUtils.stringifyException(t));
} finally {
if (handle != null) {
handle.releaseLocks();
}
}
if (setLooped) {
looped.set(true);
}
// Now, go back to bed until it's time to do this again
long elapsedTime = System.currentTimeMillis() - startedAt;
if (elapsedTime >= cleanerCheckInterval || stop.get()) {
continue;
} else {
try {
Thread.sleep(cleanerCheckInterval - elapsedTime);
} catch (InterruptedException ie) {
// What can I do about it?
}
}
} while (!stop.get());
}
Aggregations