use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestWorker method majorPartitionWithBase.
@Test
public void majorPartitionWithBase() throws Exception {
LOG.debug("Starting majorPartitionWithBase");
Table t = newTable("default", "mapwb", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
burnThroughTransactions("default", "mapwb", 25);
CompactionRequest rqst = new CompactionRequest("default", "mapwb", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
// There should still be four directories in the location.
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation()));
Assert.assertEquals(4, stat.length);
// Find the new delta file and make sure it has the right contents
boolean sawNewBase = false;
for (int i = 0; i < stat.length; i++) {
if (stat[i].getPath().getName().equals("base_0000024")) {
sawNewBase = true;
FileStatus[] buckets = fs.listStatus(stat[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
Assert.assertEquals(2, buckets.length);
Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
Assert.assertEquals(624L, buckets[0].getLen());
Assert.assertEquals(624L, buckets[1].getLen());
} else {
LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
}
}
Assert.assertTrue(sawNewBase);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TestWorker method majorPartitionWithBaseMissingBuckets.
@Test
public void majorPartitionWithBaseMissingBuckets() throws Exception {
LOG.debug("Starting majorPartitionWithBaseMissingBuckets");
Table t = newTable("default", "mapwbmb", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20, 2, false);
addDeltaFile(t, p, 21L, 22L, 2, 2, false);
addDeltaFile(t, p, 23L, 26L, 4);
burnThroughTransactions("default", "mapwbmb", 27);
CompactionRequest rqst = new CompactionRequest("default", "mapwbmb", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
// There should still be four directories in the location.
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation()));
Assert.assertEquals(4, stat.length);
// Find the new delta file and make sure it has the right contents
boolean sawNewBase = false;
for (int i = 0; i < stat.length; i++) {
if (stat[i].getPath().getName().equals("base_0000026")) {
sawNewBase = true;
FileStatus[] buckets = fs.listStatus(stat[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
Assert.assertEquals(2, buckets.length);
Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]"));
Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]"));
// Bucket 0 should be small and bucket 1 should be large, make sure that's the case
Assert.assertTrue(("bucket_00000".equals(buckets[0].getPath().getName()) && 104L == buckets[0].getLen() && "bucket_00001".equals(buckets[1].getPath().getName()) && 676L == buckets[1].getLen()) || ("bucket_00000".equals(buckets[1].getPath().getName()) && 104L == buckets[1].getLen() && "bucket_00001".equals(buckets[0].getPath().getName()) && 676L == buckets[0].getLen()));
} else {
LOG.debug("This is not the file you are looking for " + stat[i].getPath().getName());
}
}
Assert.assertTrue(sawNewBase);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponse in project hive by apache.
the class TxnHandler method showCompact.
@RetrySemantics.ReadOnly
public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException {
ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>());
Connection dbConn = null;
Statement stmt = null;
try {
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
String s = "select cq_database, cq_table, cq_partition, cq_state, cq_type, cq_worker_id, " + // -1 because 'null' literal doesn't work for all DBs...
"cq_start, -1 cc_end, cq_run_as, cq_hadoop_job_id, cq_id from COMPACTION_QUEUE union all " + "select cc_database, cc_table, cc_partition, cc_state, cc_type, cc_worker_id, " + "cc_start, cc_end, cc_run_as, cc_hadoop_job_id, cc_id from COMPLETED_COMPACTIONS";
// what I want is order by cc_end desc, cc_start asc (but derby has a bug https://issues.apache.org/jira/browse/DERBY-6013)
// to sort so that currently running jobs are at the end of the list (bottom of screen)
// and currently running ones are in sorted by start time
// w/o order by likely currently running compactions will be first (LHS of Union)
LOG.debug("Going to execute query <" + s + ">");
ResultSet rs = stmt.executeQuery(s);
while (rs.next()) {
ShowCompactResponseElement e = new ShowCompactResponseElement();
e.setDbname(rs.getString(1));
e.setTablename(rs.getString(2));
e.setPartitionname(rs.getString(3));
e.setState(compactorStateToResponse(rs.getString(4).charAt(0)));
switch(rs.getString(5).charAt(0)) {
case MAJOR_TYPE:
e.setType(CompactionType.MAJOR);
break;
case MINOR_TYPE:
e.setType(CompactionType.MINOR);
break;
default:
}
e.setWorkerid(rs.getString(6));
long start = rs.getLong(7);
if (!rs.wasNull()) {
e.setStart(start);
}
long endTime = rs.getLong(8);
if (endTime != -1) {
e.setEndTime(endTime);
}
e.setRunAs(rs.getString(9));
e.setHadoopJobId(rs.getString(10));
e.setId(rs.getLong(11));
response.addToCompacts(e);
}
LOG.debug("Going to rollback");
dbConn.rollback();
} catch (SQLException e) {
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "showCompact(" + rqst + ")");
throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e));
} finally {
closeStmt(stmt);
closeDbConn(dbConn);
}
return response;
} catch (RetryException e) {
return showCompact(rqst);
}
}
Aggregations