Search in sources :

Example 41 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestInitiator method twoTxnsOnSamePartitionGenerateOneCompactionRequest.

@Test
public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exception {
    Table t = newTable("default", "ttospgocr", true);
    Partition p = newPartition(t, "today");
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 2);
    burnThroughTransactions("default", "ttospgocr", 23);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
    comp.setTablename("ttospgocr");
    comp.setPartitionname("ds=today");
    comp.setOperationType(DataOperationType.UPDATE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    long writeid = allocateWriteId("default", "ttospgocr", txnid);
    Assert.assertEquals(24, writeid);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    txnid = openTxn();
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
    comp.setTablename("ttospgocr");
    comp.setPartitionname("ds=today");
    comp.setOperationType(DataOperationType.UPDATE);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    res = txnHandler.lock(req);
    writeid = allocateWriteId("default", "ttospgocr", txnid);
    Assert.assertEquals(25, writeid);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("ttospgocr", compacts.get(0).getTablename());
    Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
    Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 42 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestInitiator method enoughDeltasNoBase.

@Test
public void enoughDeltasNoBase() throws Exception {
    Table t = newTable("default", "ednb", true);
    Partition p = newPartition(t, "today");
    addDeltaFile(t, p, 1L, 201L, 200);
    addDeltaFile(t, p, 202L, 202L, 1);
    addDeltaFile(t, p, 203L, 203L, 1);
    addDeltaFile(t, p, 204L, 204L, 1);
    addDeltaFile(t, p, 205L, 205L, 1);
    addDeltaFile(t, p, 206L, 206L, 1);
    addDeltaFile(t, p, 207L, 207L, 1);
    addDeltaFile(t, p, 208L, 208L, 1);
    addDeltaFile(t, p, 209L, 209L, 1);
    addDeltaFile(t, p, 210L, 210L, 1);
    addDeltaFile(t, p, 211L, 211L, 1);
    burnThroughTransactions("default", "ednb", 210);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
    comp.setTablename("ednb");
    comp.setPartitionname("ds=today");
    comp.setOperationType(DataOperationType.DELETE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    long writeid = allocateWriteId("default", "ednb", txnid);
    Assert.assertEquals(211, writeid);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("ednb", compacts.get(0).getTablename());
    Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
    Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 43 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestInitiator method chooseMajorOverMinorWhenBothValid.

@Test
public void chooseMajorOverMinorWhenBothValid() throws Exception {
    Table t = newTable("default", "cmomwbv", false);
    addBaseFile(t, null, 200L, 200);
    addDeltaFile(t, null, 201L, 211L, 11);
    addDeltaFile(t, null, 212L, 222L, 11);
    addDeltaFile(t, null, 223L, 233L, 11);
    addDeltaFile(t, null, 234L, 244L, 11);
    addDeltaFile(t, null, 245L, 255L, 11);
    addDeltaFile(t, null, 256L, 266L, 11);
    addDeltaFile(t, null, 267L, 277L, 11);
    addDeltaFile(t, null, 278L, 288L, 11);
    addDeltaFile(t, null, 289L, 299L, 11);
    addDeltaFile(t, null, 300L, 310L, 11);
    addDeltaFile(t, null, 311L, 321L, 11);
    burnThroughTransactions("default", "cmomwbv", 320);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
    comp.setTablename("cmomwbv");
    comp.setOperationType(DataOperationType.UPDATE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    long writeid = allocateWriteId("default", "cmomwbv", txnid);
    Assert.assertEquals(321, writeid);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("cmomwbv", compacts.get(0).getTablename());
    Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ArrayList(java.util.ArrayList) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 44 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestCompactionMetrics method testInitiatorDurationMeasuredCorrectly.

@Test
public void testInitiatorDurationMeasuredCorrectly() throws Exception {
    final String DEFAULT_DB = "default";
    final String TABLE_NAME = "x_table";
    final String PARTITION_NAME = "part";
    List<LockComponent> components = new ArrayList<>();
    Table table = newTable(DEFAULT_DB, TABLE_NAME, true);
    for (int i = 0; i < 10; i++) {
        String partitionName = PARTITION_NAME + i;
        Partition p = newPartition(table, partitionName);
        addBaseFile(table, p, 20L, 20);
        addDeltaFile(table, p, 21L, 22L, 2);
        addDeltaFile(table, p, 23L, 24L, 2);
        addDeltaFile(table, p, 21L, 24L, 4);
        LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, DEFAULT_DB);
        comp.setTablename(TABLE_NAME);
        comp.setPartitionname("ds=" + partitionName);
        comp.setOperationType(DataOperationType.UPDATE);
        components.add(comp);
    }
    burnThroughTransactions(DEFAULT_DB, TABLE_NAME, 25);
    long txnId = openTxn();
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnId);
    LockResponse res = txnHandler.lock(req);
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    allocateWriteId(DEFAULT_DB, TABLE_NAME, txnId);
    txnHandler.commitTxn(new CommitTxnRequest(txnId));
    long initiatorStart = System.currentTimeMillis();
    startInitiator();
    long durationUpperLimit = System.currentTimeMillis() - initiatorStart;
    int initiatorDurationFromMetric = Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_INITIATOR_CYCLE_DURATION).intValue();
    Assert.assertTrue("Initiator duration must be withing the limits", (0 < initiatorDurationFromMetric) && (initiatorDurationFromMetric <= durationUpperLimit));
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) Table(org.apache.hadoop.hive.metastore.api.Table) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 45 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestCompactionMetrics method testInitiatorPerfMetricsEnabled.

@Test
public void testInitiatorPerfMetricsEnabled() throws Exception {
    Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).set(0);
    long initiatorCycles = Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount();
    Table t = newTable("default", "ime", true);
    List<LockComponent> components = new ArrayList<>();
    for (int i = 0; i < 10; i++) {
        Partition p = newPartition(t, "part" + (i + 1));
        addBaseFile(t, p, 20L, 20);
        addDeltaFile(t, p, 21L, 22L, 2);
        addDeltaFile(t, p, 23L, 24L, 2);
        LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
        comp.setTablename("ime");
        comp.setPartitionname("ds=part" + (i + 1));
        comp.setOperationType(DataOperationType.UPDATE);
        components.add(comp);
    }
    burnThroughTransactions("default", "ime", 23);
    long txnid = openTxn();
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    long writeid = allocateWriteId("default", "ime", txnid);
    Assert.assertEquals(24, writeid);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(10, compacts.size());
    Assert.assertEquals(initiatorCycles + 1, Objects.requireNonNull(Metrics.getOrCreateTimer(INITIATOR_CYCLE_KEY)).getCount());
    runAcidMetricService();
    Assert.assertEquals(10, Metrics.getOrCreateGauge(INITIATED_METRICS_KEY).intValue());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Aggregations

CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)46 Test (org.junit.Test)41 ArrayList (java.util.ArrayList)27 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)27 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)27 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)27 Table (org.apache.hadoop.hive.metastore.api.Table)26 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)22 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)22 Partition (org.apache.hadoop.hive.metastore.api.Partition)16 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)16 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)10 GetOpenTxnsResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse)7 OpenTxnsResponse (org.apache.hadoop.hive.metastore.api.OpenTxnsResponse)6 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)5 ValidTxnList (org.apache.hadoop.hive.common.ValidTxnList)4 AllocateTableWriteIdsRequest (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest)4 AllocateTableWriteIdsResponse (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse)4 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)3 GetOpenTxnsInfoResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse)3