use of org.apache.hadoop.hive.metastore.api.ShowLocksRequest in project hive by apache.
the class DDLTask method showLocksNewFormat.
private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) throws HiveException {
DbLockManager lockMgr;
if (!(lm instanceof DbLockManager)) {
throw new RuntimeException("New lock format only supported with db lock manager.");
}
lockMgr = (DbLockManager) lm;
String dbName = showLocks.getDbName();
String tblName = showLocks.getTableName();
Map<String, String> partSpec = showLocks.getPartSpec();
if (dbName == null && tblName != null) {
dbName = SessionState.get().getCurrentDatabase();
}
ShowLocksRequest rqst = new ShowLocksRequest();
rqst.setDbname(dbName);
rqst.setTablename(tblName);
if (partSpec != null) {
List<String> keyList = new ArrayList<String>();
List<String> valList = new ArrayList<String>();
for (String partKey : partSpec.keySet()) {
String partVal = partSpec.remove(partKey);
keyList.add(partKey);
valList.add(partVal);
}
String partName = FileUtils.makePartName(keyList, valList);
rqst.setPartname(partName);
}
ShowLocksResponse rsp = lockMgr.getLocks(rqst);
// write the results in the file
DataOutputStream os = getOutputStream(showLocks.getResFile());
try {
dumpLockInfo(os, rsp);
} catch (FileNotFoundException e) {
LOG.warn("show function: ", e);
return 1;
} catch (IOException e) {
LOG.warn("show function: ", e);
return 1;
} catch (Exception e) {
throw new HiveException(e.toString());
} finally {
IOUtils.closeStream(os);
}
return 0;
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksRequest in project hive by apache.
the class TestStreaming method testHeartbeat.
@Test
public void testHeartbeat() throws Exception {
HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, null);
StreamingConnection connection = endPt.newConnection(false, "UT_" + Thread.currentThread().getName());
DelimitedInputWriter writer = new DelimitedInputWriter(fieldNames2, ",", endPt, connection);
TransactionBatch txnBatch = connection.fetchTransactionBatch(5, writer);
txnBatch.beginNextTransaction();
// todo: this should ideally check Transaction heartbeat as well, but heartbeat
// timestamp is not reported yet
// GetOpenTxnsInfoResponse txnresp = msClient.showTxns();
ShowLocksRequest request = new ShowLocksRequest();
request.setDbname(dbName2);
request.setTablename(tblName2);
ShowLocksResponse response = msClient.showLocks(request);
Assert.assertEquals("Wrong nubmer of locks: " + response, 1, response.getLocks().size());
ShowLocksResponseElement lock = response.getLocks().get(0);
long acquiredAt = lock.getAcquiredat();
long heartbeatAt = lock.getLastheartbeat();
txnBatch.heartbeat();
response = msClient.showLocks(request);
Assert.assertEquals("Wrong number of locks2: " + response, 1, response.getLocks().size());
lock = response.getLocks().get(0);
Assert.assertEquals("Acquired timestamp didn't match", acquiredAt, lock.getAcquiredat());
Assert.assertTrue("Expected new heartbeat (" + lock.getLastheartbeat() + ") == old heartbeat(" + heartbeatAt + ")", lock.getLastheartbeat() == heartbeatAt);
txnBatch.close();
int txnBatchSize = 200;
txnBatch = connection.fetchTransactionBatch(txnBatchSize, writer);
for (int i = 0; i < txnBatchSize; i++) {
txnBatch.beginNextTransaction();
if (i % 47 == 0) {
txnBatch.heartbeat();
}
if (i % 10 == 0) {
txnBatch.abort();
} else {
txnBatch.commit();
}
if (i % 37 == 0) {
txnBatch.heartbeat();
}
}
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksRequest in project hive by apache.
the class TestTxnCommands method testTimeOutReaper.
@Test
public void testTimeOutReaper() throws Exception {
runStatementOnDriver("start transaction");
runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 5");
// make sure currently running txn is considered aborted by housekeeper
hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS);
MetastoreTaskThread houseKeeperService = new AcidHouseKeeperService();
houseKeeperService.setConf(hiveConf);
// this will abort the txn
houseKeeperService.run();
// this should fail because txn aborted due to timeout
CommandProcessorException e = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5");
Assert.assertTrue("Actual: " + e.getMessage(), e.getMessage().contains("Transaction manager has aborted the transaction txnid:1"));
// now test that we don't timeout locks we should not
// heartbeater should be running in the background every 1/2 second
hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS);
// Have to reset the conf when we change it so that the change takes affect
houseKeeperService.setConf(hiveConf);
runStatementOnDriver("start transaction");
runStatementOnDriver("select count(*) from " + Table.ACIDTBL + " where a = 17");
pause(750);
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
// since there is txn open, we are heartbeating the txn not individual locks
GetOpenTxnsInfoResponse txnsInfoResponse = txnHandler.getOpenTxnsInfo();
Assert.assertEquals(2, txnsInfoResponse.getOpen_txns().size());
TxnInfo txnInfo = null;
for (TxnInfo ti : txnsInfoResponse.getOpen_txns()) {
if (ti.getState() == TxnState.OPEN) {
txnInfo = ti;
break;
}
}
Assert.assertNotNull(txnInfo);
Assert.assertEquals(16, txnInfo.getId());
Assert.assertEquals(TxnState.OPEN, txnInfo.getState());
String s = TestTxnDbUtil.queryToString(hiveConf, "select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false);
String[] vals = s.split("\\s+");
Assert.assertEquals("Didn't get expected timestamps", 2, vals.length);
long lastHeartbeat = Long.parseLong(vals[1]);
// these 2 values are equal when TXN entry is made. Should never be equal after 1st heartbeat, which we
// expect to have happened by now since HIVE_TXN_TIMEOUT=1sec
Assert.assertNotEquals("Didn't see heartbeat happen", Long.parseLong(vals[0]), lastHeartbeat);
ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest());
TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
pause(750);
houseKeeperService.run();
pause(750);
slr = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals("Unexpected lock count: " + slr, 1, slr.getLocks().size());
TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
pause(750);
houseKeeperService.run();
slr = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals("Unexpected lock count: " + slr, 1, slr.getLocks().size());
TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
// should've done several heartbeats
s = TestTxnDbUtil.queryToString(hiveConf, "select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false);
vals = s.split("\\s+");
Assert.assertEquals("Didn't get expected timestamps", 2, vals.length);
Assert.assertTrue("Heartbeat didn't progress: (old,new) (" + lastHeartbeat + "," + vals[1] + ")", lastHeartbeat < Long.parseLong(vals[1]));
runStatementOnDriver("rollback");
slr = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals("Unexpected lock count", 0, slr.getLocks().size());
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksRequest in project hive by apache.
the class TestStreaming method testTransactionBatchAbortAndCommit.
@Test
public void testTransactionBatchAbortAndCommit() throws Exception {
String agentInfo = "UT_" + Thread.currentThread().getName();
StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
HiveStreamingConnection connection = HiveStreamingConnection.newBuilder().withDatabase(dbName).withTable(tblName).withStaticPartitionValues(partitionVals).withAgentInfo(agentInfo).withRecordWriter(writer).withHiveConf(conf).withTransactionBatchSize(10).connect();
connection.beginTransaction();
connection.write("1,Hello streaming".getBytes());
connection.write("2,Welcome to streaming".getBytes());
ShowLocksResponse resp = msClient.showLocks(new ShowLocksRequest());
Assert.assertEquals("LockCount", 1, resp.getLocksSize());
Assert.assertEquals("LockType", LockType.SHARED_READ, resp.getLocks().get(0).getType());
Assert.assertEquals("LockState", LockState.ACQUIRED, resp.getLocks().get(0).getState());
Assert.assertEquals("AgentInfo", agentInfo, resp.getLocks().get(0).getAgentInfo());
connection.abortTransaction();
checkNothingWritten(partLoc);
Assert.assertEquals(HiveStreamingConnection.TxnState.ABORTED, connection.getCurrentTransactionState());
connection.beginTransaction();
connection.write("1,Hello streaming".getBytes());
connection.write("2,Welcome to streaming".getBytes());
connection.commitTransaction();
checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}");
connection.close();
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksRequest in project hive by apache.
the class TestStreaming method testNoBuckets.
/**
* Test that streaming can write to unbucketed table.
*/
@Test
public void testNoBuckets() throws Exception {
queryTable(driver, "drop table if exists default.streamingnobuckets");
queryTable(driver, "create table default.streamingnobuckets (a string, b string) stored as orc " + "TBLPROPERTIES('transactional'='true')");
queryTable(driver, "insert into default.streamingnobuckets values('foo','bar')");
List<String> rs = queryTable(driver, "select * from default.streamingnobuckets");
Assert.assertEquals(1, rs.size());
Assert.assertEquals("foo\tbar", rs.get(0));
StrictDelimitedInputWriter wr = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
HiveStreamingConnection connection = HiveStreamingConnection.newBuilder().withDatabase("Default").withTable("streamingNoBuckets").withAgentInfo("UT_" + Thread.currentThread().getName()).withTransactionBatchSize(2).withRecordWriter(wr).withHiveConf(conf).connect();
connection.beginTransaction();
connection.write("a1,b2".getBytes());
connection.write("a3,b4".getBytes());
TxnStore txnHandler = TxnUtils.getTxnStore(conf);
ShowLocksResponse resp = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals(resp.getLocksSize(), 1);
Assert.assertEquals("streamingnobuckets", resp.getLocks().get(0).getTablename());
Assert.assertEquals("default", resp.getLocks().get(0).getDbname());
connection.commitTransaction();
connection.beginTransaction();
connection.write("a5,b6".getBytes());
connection.write("a7,b8".getBytes());
connection.commitTransaction();
connection.close();
Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912));
rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID");
Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar"));
Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000_0"));
Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2"));
Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000"));
Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4"));
Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000"));
Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6"));
Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000"));
Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8"));
Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000"));
queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'");
queryTable(driver, "delete from default.streamingnobuckets where a='a1'");
rs = queryTable(driver, "select a, b from default.streamingnobuckets order by a, b");
int row = 0;
Assert.assertEquals("at row=" + row, "0\t0", rs.get(row++));
Assert.assertEquals("at row=" + row, "a3\tb4", rs.get(row++));
Assert.assertEquals("at row=" + row, "a5\tb6", rs.get(row++));
Assert.assertEquals("at row=" + row, "foo\tbar", rs.get(row++));
queryTable(driver, "alter table default.streamingnobuckets compact 'major'");
runWorker(conf);
rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID");
Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar"));
Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000005_v0000024/bucket_00000"));
Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4"));
Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000005_v0000024/bucket_00000"));
Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6"));
Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000005_v0000024/bucket_00000"));
Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t0\t0"));
Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000005_v0000024/bucket_00000"));
}
Aggregations