use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestCompactor method minorCompactAfterAbort.
@Test
public void minorCompactAfterAbort() throws Exception {
String agentInfo = "UT_" + Thread.currentThread().getName();
String dbName = "default";
String tblName = "cws";
List<String> colNames = Arrays.asList("a", "b");
String columnNamesProperty = "a,b";
String columnTypesProperty = "int:string";
executeStatementOnDriver("drop table if exists " + tblName, driver);
executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + //currently ACID requires table to be bucketed
" CLUSTERED BY(a) INTO 1 BUCKETS" + " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
HiveEndPoint endPt = new HiveEndPoint(null, dbName, tblName, null);
DelimitedInputWriter writer = new DelimitedInputWriter(new String[] { "a", "b" }, ",", endPt);
StreamingConnection connection = endPt.newConnection(false, "UT_" + Thread.currentThread().getName());
try {
// Write a couple of batches
for (int i = 0; i < 2; i++) {
writeBatch(connection, writer, false);
}
// Start a third batch, abort everything, don't properly close it
TransactionBatch txnBatch = connection.fetchTransactionBatch(2, writer);
txnBatch.beginNextTransaction();
txnBatch.abort();
txnBatch.beginNextTransaction();
txnBatch.abort();
// Now, compact
TxnStore txnHandler = TxnUtils.getTxnStore(conf);
txnHandler.compact(new CompactionRequest(dbName, tblName, CompactionType.MINOR));
Worker t = new Worker();
t.setThreadId((int) t.getId());
t.setHiveConf(conf);
AtomicBoolean stop = new AtomicBoolean(true);
AtomicBoolean looped = new AtomicBoolean();
t.init(stop, looped);
t.run();
// Find the location of the table
IMetaStoreClient msClient = new HiveMetaStoreClient(conf);
Table table = msClient.getTable(dbName, tblName);
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.deltaFileFilter);
String[] names = new String[stat.length];
Path resultDelta = null;
for (int i = 0; i < names.length; i++) {
names[i] = stat[i].getPath().getName();
if (names[i].equals("delta_0000001_0000004")) {
resultDelta = stat[i].getPath();
}
}
Arrays.sort(names);
String[] expected = new String[] { "delta_0000001_0000002", "delta_0000001_0000004", "delta_0000003_0000004" };
if (!Arrays.deepEquals(expected, names)) {
Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
}
checkExpectedTxnsPresent(null, new Path[] { resultDelta }, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
} finally {
connection.close();
}
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnCommands2 method writeBetweenWorkerAndCleanerForVariousTblProperties.
protected void writeBetweenWorkerAndCleanerForVariousTblProperties(String tblProperties) throws Exception {
String tblName = "hive12352";
runStatementOnDriver("drop table if exists " + tblName);
runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + //currently ACID requires table to be bucketed
" CLUSTERED BY(a) INTO 1 BUCKETS" + " STORED AS ORC TBLPROPERTIES ( " + tblProperties + " )");
//create some data
runStatementOnDriver("insert into " + tblName + " values(1, 'foo'),(2, 'bar'),(3, 'baz')");
runStatementOnDriver("update " + tblName + " set b = 'blah' where a = 3");
//run Worker to execute compaction
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
Worker t = new Worker();
t.setThreadId((int) t.getId());
t.setHiveConf(hiveConf);
AtomicBoolean stop = new AtomicBoolean(true);
AtomicBoolean looped = new AtomicBoolean();
t.init(stop, looped);
t.run();
//delete something, but make sure txn is rolled back
hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true);
runStatementOnDriver("delete from " + tblName + " where a = 1");
hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false);
List<String> expected = new ArrayList<>();
expected.add("1\tfoo");
expected.add("2\tbar");
expected.add("3\tblah");
Assert.assertEquals("", expected, runStatementOnDriver("select a,b from " + tblName + " order by a"));
//run Cleaner
Cleaner c = new Cleaner();
c.setThreadId((int) c.getId());
c.setHiveConf(hiveConf);
c.init(stop, new AtomicBoolean());
c.run();
//this seems odd, but we wan to make sure that to run CompactionTxnHandler.cleanEmptyAbortedTxns()
Initiator i = new Initiator();
i.setThreadId((int) i.getId());
i.setHiveConf(hiveConf);
i.init(stop, new AtomicBoolean());
i.run();
//check that aborted operation didn't become committed
Assert.assertEquals("", expected, runStatementOnDriver("select a,b from " + tblName + " order by a"));
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnCommands2 method testCompactWithDelete.
@Test
public void testCompactWithDelete() throws Exception {
int[][] tableData = { { 1, 2 }, { 3, 4 } };
runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData));
runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MAJOR'");
Worker t = new Worker();
t.setThreadId((int) t.getId());
t.setHiveConf(hiveConf);
AtomicBoolean stop = new AtomicBoolean();
AtomicBoolean looped = new AtomicBoolean();
stop.set(true);
t.init(stop, looped);
t.run();
runStatementOnDriver("delete from " + Table.ACIDTBL + " where b = 4");
runStatementOnDriver("update " + Table.ACIDTBL + " set b = -2 where b = 2");
runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MINOR'");
t.run();
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
Assert.assertEquals("Unexpected 1 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(1).getState());
}
use of org.apache.hadoop.hive.metastore.txn.TxnStore in project hive by apache.
the class TestTxnCommands2 method testOpenTxnsCounter.
@Test
public void testOpenTxnsCounter() throws Exception {
hiveConf.setIntVar(HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS, 3);
hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_COUNT_OPEN_TXNS_INTERVAL, 10, TimeUnit.MILLISECONDS);
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
OpenTxnsResponse openTxnsResponse = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
AcidOpenTxnsCounterService openTxnsCounterService = new AcidOpenTxnsCounterService();
// will update current number of open txns to 3
runHouseKeeperService(openTxnsCounterService, hiveConf);
MetaException exception = null;
// This should fail once it finds out the threshold has been reached
try {
txnHandler.openTxns(new OpenTxnRequest(1, "you", "localhost"));
} catch (MetaException e) {
exception = e;
}
Assert.assertNotNull("Opening new transaction shouldn't be allowed", exception);
Assert.assertTrue(exception.getMessage().equals("Maximum allowed number of open transactions has been reached. See hive.max.open.txns."));
// new transactions should be allowed to open
for (long txnid : openTxnsResponse.getTxn_ids()) {
txnHandler.commitTxn(new CommitTxnRequest(txnid));
}
// will update current number of open txns back to 0
runHouseKeeperService(openTxnsCounterService, hiveConf);
exception = null;
try {
txnHandler.openTxns(new OpenTxnRequest(1, "him", "localhost"));
} catch (MetaException e) {
exception = e;
}
Assert.assertNull(exception);
}
Aggregations