Search in sources :

Example 1 with NotificationEventResponse

use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.

the class TestDbNotificationListener method alterIndex.

@Test
public void alterIndex() throws Exception {
    String indexName = "alterIndex";
    String dbName = "default";
    String tableName = "alterIndexTable";
    String indexTableName = tableName + "__" + indexName + "__";
    int startTime = (int) (System.currentTimeMillis() / 1000);
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(new FieldSchema("col1", "int", ""));
    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
    Map<String, String> params = new HashMap<String, String>();
    params.put("key", "value");
    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
    // Event 1
    msClient.createTable(table);
    Index oldIndex = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false);
    Table oldIndexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
    // Event 2, 3
    // creates index and index table
    msClient.createIndex(oldIndex, oldIndexTable);
    Index newIndex = new Index(indexName, null, "default", tableName, startTime, startTime + 1, indexTableName, sd, emptyParameters, false);
    // Event 4
    msClient.alter_index(dbName, tableName, indexName, newIndex);
    // Get notifications from metastore
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(4, rsp.getEventsSize());
    NotificationEvent event = rsp.getEvents().get(3);
    assertEquals(firstEventId + 4, event.getEventId());
    assertTrue(event.getEventTime() >= startTime);
    assertEquals(EventType.ALTER_INDEX.toString(), event.getEventType());
    assertEquals(dbName, event.getDbName());
    // Parse the message field
    AlterIndexMessage alterIdxMsg = md.getAlterIndexMessage(event.getMessage());
    Index indexObj = alterIdxMsg.getIndexObjAfter();
    assertEquals(dbName, indexObj.getDbName());
    assertEquals(indexName, indexObj.getIndexName());
    assertEquals(tableName, indexObj.getOrigTableName());
    assertEquals(indexTableName, indexObj.getIndexTableName());
    assertTrue(indexObj.getCreateTime() < indexObj.getLastAccessTime());
    // When hive.metastore.transactional.event.listeners is set,
    // a failed event should not create a new notification
    DummyRawStoreFailEvent.setEventSucceed(false);
    try {
        msClient.alter_index(dbName, tableName, indexName, newIndex);
        fail("Error: alter index should've failed");
    } catch (Exception ex) {
    // expected
    }
    rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(4, rsp.getEventsSize());
}
Also used : Order(org.apache.hadoop.hive.metastore.api.Order) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Index(org.apache.hadoop.hive.metastore.api.Index) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) AlterIndexMessage(org.apache.hadoop.hive.metastore.messaging.AlterIndexMessage) Test(org.junit.Test)

Example 2 with NotificationEventResponse

use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.

the class TestDbNotificationListener method dropIndex.

@Test
public void dropIndex() throws Exception {
    String indexName = "dropIndex";
    String dbName = "default";
    String tableName = "dropIndexTable";
    String indexTableName = tableName + "__" + indexName + "__";
    int startTime = (int) (System.currentTimeMillis() / 1000);
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(new FieldSchema("col1", "int", ""));
    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
    Map<String, String> params = new HashMap<String, String>();
    params.put("key", "value");
    StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
    Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
    // Event 1
    msClient.createTable(table);
    Index index = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false);
    Table indexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
    // Event 2, 3 (index table and index)
    msClient.createIndex(index, indexTable);
    // Event 4 (drops index and indexTable)
    msClient.dropIndex(dbName, tableName, indexName, true);
    // Get notifications from metastore
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(4, rsp.getEventsSize());
    NotificationEvent event = rsp.getEvents().get(3);
    assertEquals(firstEventId + 4, event.getEventId());
    assertTrue(event.getEventTime() >= startTime);
    assertEquals(EventType.DROP_INDEX.toString(), event.getEventType());
    assertEquals(dbName, event.getDbName());
    // Parse the message field
    DropIndexMessage dropIdxMsg = md.getDropIndexMessage(event.getMessage());
    assertEquals(dbName, dropIdxMsg.getDB());
    assertEquals(indexName.toLowerCase(), dropIdxMsg.getIndexName());
    assertEquals(indexTableName.toLowerCase(), dropIdxMsg.getIndexTableName());
    assertEquals(tableName.toLowerCase(), dropIdxMsg.getOrigTableName());
    // When hive.metastore.transactional.event.listeners is set,
    // a failed event should not create a new notification
    index = new Index("dropIndexTable2", null, "default", tableName, startTime, startTime, "dropIndexTable__dropIndexTable2__", sd, emptyParameters, false);
    Table indexTable2 = new Table("dropIndexTable__dropIndexTable2__", dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
    msClient.createIndex(index, indexTable2);
    DummyRawStoreFailEvent.setEventSucceed(false);
    try {
        // drops index and indexTable
        msClient.dropIndex(dbName, tableName, "dropIndex2", true);
        fail("Error: drop index should've failed");
    } catch (Exception ex) {
    // expected
    }
    rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(6, rsp.getEventsSize());
}
Also used : Order(org.apache.hadoop.hive.metastore.api.Order) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Index(org.apache.hadoop.hive.metastore.api.Index) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) DropIndexMessage(org.apache.hadoop.hive.metastore.messaging.DropIndexMessage) Test(org.junit.Test)

Example 3 with NotificationEventResponse

use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.

the class TestReplicationScenarios method testEventTypesForDynamicAddPartitionByInsert.

@Test
public void testEventTypesForDynamicAddPartitionByInsert() throws IOException {
    String name = testName.getMethodName();
    final String dbName = createDB(name, driver);
    String replDbName = dbName + "_dupe";
    run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver);
    Tuple bootstrap = bootstrapLoadAndVerify(dbName, replDbName);
    String[] ptn_data = new String[] { "ten" };
    run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data[0] + "')", driver);
    // Inject a behaviour where it throws exception if an INSERT event is found
    // As we dynamically add a partition through INSERT INTO cmd, it should just add ADD_PARTITION
    // event not an INSERT event
    BehaviourInjection<NotificationEventResponse, NotificationEventResponse> eventTypeValidator = new BehaviourInjection<NotificationEventResponse, NotificationEventResponse>() {

        @Nullable
        @Override
        public NotificationEventResponse apply(@Nullable NotificationEventResponse eventsList) {
            if (null != eventsList) {
                List<NotificationEvent> events = eventsList.getEvents();
                for (int i = 0; i < events.size(); i++) {
                    NotificationEvent event = events.get(i);
                    // Skip all the events belong to other DBs/tables.
                    if (event.getDbName().equalsIgnoreCase(dbName)) {
                        if (event.getEventType().equalsIgnoreCase("INSERT")) {
                            // If an insert event is found, then return null hence no event is dumped.
                            LOG.error("Encountered INSERT event when it was not expected to");
                            return null;
                        }
                    }
                }
                injectionPathCalled = true;
            }
            return eventsList;
        }
    };
    InjectableBehaviourObjectStore.setGetNextNotificationBehaviour(eventTypeValidator);
    try {
        incrementalLoadAndVerify(dbName, replDbName);
        eventTypeValidator.assertInjectionsPerformed(true, false);
    } finally {
        // reset the behaviour
        InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour();
    }
    verifyRun("SELECT a from " + replDbName + ".ptned where (b=1)", ptn_data, driverMirror);
}
Also used : NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) BehaviourInjection(org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) Nullable(javax.annotation.Nullable) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) Test(org.junit.Test)

Example 4 with NotificationEventResponse

use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.

the class TestReplicationOptimisedBootstrap method testTargetEventIdWithNotificationsExpired.

@Test
public void testTargetEventIdWithNotificationsExpired() throws Throwable {
    // Do a a cycle of bootstrap dump & load.
    List<String> withClause = ReplicationTestUtils.includeExternalTableClause(true);
    withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'");
    // Do a bootstrap cycle(A->B)
    primary.dump(primaryDbName, withClause);
    replica.load(replicatedDbName, primaryDbName, withClause);
    // Add some table & do the first incremental dump.
    primary.run("use " + primaryDbName).run("create external table tablei1 (id int)").run("create table tablem1 (id int)").run("insert into table tablei1 values(1),(2),(3),(4)").run("insert into table tablem1 values(5),(10),(15),(20)").dump(primaryDbName, withClause);
    // Do the incremental load, and check everything is intact.
    replica.load(replicatedDbName, primaryDbName, withClause).run("use " + replicatedDbName).run("select id from tablei1").verifyResults(new String[] { "1", "2", "3", "4" }).run("select id from tablem1").verifyResults(new String[] { "5", "10", "15", "20" });
    // Explicitly make the notification logs.
    // Get the latest notification from the notification log for the target database, just after replication.
    CurrentNotificationEventId notificationIdAfterRepl = replica.getCurrentNotificationEventId();
    // Inject a behaviour where some events missing from notification_log table.
    // This ensures the incremental dump doesn't get all events for replication.
    InjectableBehaviourObjectStore.BehaviourInjection<NotificationEventResponse, NotificationEventResponse> eventIdSkipper = new InjectableBehaviourObjectStore.BehaviourInjection<NotificationEventResponse, NotificationEventResponse>() {

        @Nullable
        @Override
        public NotificationEventResponse apply(@Nullable NotificationEventResponse eventIdList) {
            if (null != eventIdList) {
                List<NotificationEvent> eventIds = eventIdList.getEvents();
                List<NotificationEvent> outEventIds = new ArrayList<>();
                for (NotificationEvent event : eventIds) {
                    // Skip the last db event.
                    if (event.getDbName().equalsIgnoreCase(replicatedDbName)) {
                        injectionPathCalled = true;
                        continue;
                    }
                    outEventIds.add(event);
                }
                // Return the new list
                return new NotificationEventResponse(outEventIds);
            } else {
                return null;
            }
        }
    };
    try {
        InjectableBehaviourObjectStore.setGetNextNotificationBehaviour(eventIdSkipper);
        // Prepare for reverse replication.
        DistributedFileSystem replicaFs = replica.miniDFSCluster.getFileSystem();
        Path newReplDir = new Path(replica.repldDir + "reverse01");
        replicaFs.mkdirs(newReplDir);
        withClause = ReplicationTestUtils.includeExternalTableClause(true);
        withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'");
        try {
            replica.dump(replicatedDbName, withClause);
            fail("Expected the dump to fail since the notification event is missing.");
        } catch (Exception e) {
        // Expected due to missing notification log entry.
        }
        // Check if there is a non-recoverable error or not.
        Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(newReplDir, replicatedDbName, replica.hiveConf);
        assertTrue(replicaFs.exists(nonRecoverablePath));
    } finally {
        // reset the behaviour
        InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) InjectableBehaviourObjectStore(org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore) ArrayList(java.util.ArrayList) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CurrentNotificationEventId(org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) Nullable(javax.annotation.Nullable) Test(org.junit.Test)

Example 5 with NotificationEventResponse

use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.

the class TestDbNotificationListener method dropPartition.

@Test
public void dropPartition() throws Exception {
    String defaultDbName = "default";
    String tblName = "dropptn";
    String tblOwner = "me";
    String serdeLocation = testTempDir;
    FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(col1);
    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
    StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
    FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> partCol1Vals = Arrays.asList("today");
    partCols.add(partCol1);
    Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
    // Event 1
    msClient.createTable(table);
    Partition partition = new Partition(partCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
    // Event 2
    msClient.add_partition(partition);
    // Event 3
    msClient.dropPartition(defaultDbName, tblName, partCol1Vals, false);
    // Get notifications from metastore
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(3, rsp.getEventsSize());
    NotificationEvent event = rsp.getEvents().get(2);
    assertEquals(firstEventId + 3, event.getEventId());
    assertTrue(event.getEventTime() >= startTime);
    assertEquals(EventType.DROP_PARTITION.toString(), event.getEventType());
    assertEquals(defaultDbName, event.getDbName());
    assertEquals(tblName, event.getTableName());
    // Parse the message field
    DropPartitionMessage dropPtnMsg = md.getDropPartitionMessage(event.getMessage());
    assertEquals(defaultDbName, dropPtnMsg.getDB());
    assertEquals(tblName, dropPtnMsg.getTable());
    Table tableObj = dropPtnMsg.getTableObj();
    assertEquals(table.getDbName(), tableObj.getDbName());
    assertEquals(table.getTableName(), tableObj.getTableName());
    assertEquals(table.getOwner(), tableObj.getOwner());
    assertEquals(TableType.MANAGED_TABLE.toString(), dropPtnMsg.getTableType());
    // Verify the eventID was passed to the non-transactional listener
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_PARTITION, firstEventId + 3);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
    // When hive.metastore.transactional.event.listeners is set,
    // a failed event should not create a new notification
    List<String> newpartCol1Vals = Arrays.asList("tomorrow");
    partition = new Partition(newpartCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
    msClient.add_partition(partition);
    DummyRawStoreFailEvent.setEventSucceed(false);
    try {
        msClient.dropPartition(defaultDbName, tblName, newpartCol1Vals, false);
        fail("Error: drop partition should've failed");
    } catch (Exception ex) {
    // expected
    }
    rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(4, rsp.getEventsSize());
    testEventCounts(defaultDbName, firstEventId, null, null, 4);
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) DropPartitionMessage(org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage) Test(org.junit.Test)

Aggregations

NotificationEventResponse (org.apache.hadoop.hive.metastore.api.NotificationEventResponse)44 Test (org.junit.Test)42 NotificationEvent (org.apache.hadoop.hive.metastore.api.NotificationEvent)34 ArrayList (java.util.ArrayList)15 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)12 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)11 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)11 Table (org.apache.hadoop.hive.metastore.api.Table)11 Database (org.apache.hadoop.hive.metastore.api.Database)9 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)5 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)5 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)5 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)5 HashMap (java.util.HashMap)4 Nullable (javax.annotation.Nullable)4 CurrentNotificationEventId (org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId)4 LinkedHashMap (java.util.LinkedHashMap)3 Path (org.apache.hadoop.fs.Path)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3