Search in sources :

Example 51 with SerDeInfo

use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.

the class TestDbNotificationListener method insertPartition.

@Test
public void insertPartition() throws Exception {
    String defaultDbName = "default";
    String tblName = "insertptn";
    String tblOwner = "me";
    String serdeLocation = "file:/tmp";
    String fileAdded = "/warehouse/mytable/b1";
    String checksumAdded = "1234";
    FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(col1);
    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
    StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
    FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> partCol1Vals = Arrays.asList("today");
    List<String> partKeyVals = new ArrayList<String>();
    partKeyVals.add("today");
    partCols.add(partCol1);
    Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
    // Event 1
    msClient.createTable(table);
    Partition partition = new Partition(partCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
    // Event 2
    msClient.add_partition(partition);
    FireEventRequestData data = new FireEventRequestData();
    InsertEventRequestData insertData = new InsertEventRequestData();
    data.setInsertData(insertData);
    insertData.addToFilesAdded(fileAdded);
    insertData.addToFilesAddedChecksum(checksumAdded);
    FireEventRequest rqst = new FireEventRequest(true, data);
    rqst.setDbName(defaultDbName);
    rqst.setTableName(tblName);
    rqst.setPartitionVals(partCol1Vals);
    // Event 3
    msClient.fireListenerEvent(rqst);
    // Get notifications from metastore
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(3, rsp.getEventsSize());
    NotificationEvent event = rsp.getEvents().get(2);
    assertEquals(firstEventId + 3, event.getEventId());
    assertTrue(event.getEventTime() >= startTime);
    assertEquals(EventType.INSERT.toString(), event.getEventType());
    assertEquals(defaultDbName, event.getDbName());
    assertEquals(tblName, event.getTableName());
    // Parse the message field
    verifyInsert(event, defaultDbName, tblName);
    InsertMessage insertMessage = md.getInsertMessage(event.getMessage());
    List<String> ptnValues = insertMessage.getPtnObj().getValues();
    assertEquals(partKeyVals, ptnValues);
    // Verify the eventID was passed to the non-transactional listener
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.INSERT, firstEventId + 3);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) InsertMessage(org.apache.hadoop.hive.metastore.messaging.InsertMessage) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) FireEventRequestData(org.apache.hadoop.hive.metastore.api.FireEventRequestData) FireEventRequest(org.apache.hadoop.hive.metastore.api.FireEventRequest) InsertEventRequestData(org.apache.hadoop.hive.metastore.api.InsertEventRequestData) Test(org.junit.Test)

Example 52 with SerDeInfo

use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.

the class TestDbNotificationListener method addPartition.

@Test
public void addPartition() throws Exception {
    String defaultDbName = "default";
    String tblName = "addptn";
    String tblName2 = "addptn2";
    String tblOwner = "me";
    String serdeLocation = "file:/tmp";
    FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(col1);
    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
    StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
    FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> partCol1Vals = Arrays.asList("today");
    partCols.add(partCol1);
    Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
    // Event 1
    msClient.createTable(table);
    Partition partition = new Partition(partCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
    // Event 2
    msClient.add_partition(partition);
    // Get notifications from metastore
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(2, rsp.getEventsSize());
    NotificationEvent event = rsp.getEvents().get(1);
    assertEquals(firstEventId + 2, event.getEventId());
    assertTrue(event.getEventTime() >= startTime);
    assertEquals(EventType.ADD_PARTITION.toString(), event.getEventType());
    assertEquals(defaultDbName, event.getDbName());
    assertEquals(tblName, event.getTableName());
    // Parse the message field
    AddPartitionMessage addPtnMsg = md.getAddPartitionMessage(event.getMessage());
    assertEquals(defaultDbName, addPtnMsg.getDB());
    assertEquals(tblName, addPtnMsg.getTable());
    Iterator<Partition> ptnIter = addPtnMsg.getPartitionObjs().iterator();
    assertTrue(ptnIter.hasNext());
    assertEquals(partition, ptnIter.next());
    assertEquals(TableType.MANAGED_TABLE.toString(), addPtnMsg.getTableType());
    // Verify the eventID was passed to the non-transactional listener
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
    // When hive.metastore.transactional.event.listeners is set,
    // a failed event should not create a new notification
    partition = new Partition(Arrays.asList("tomorrow"), defaultDbName, tblName2, startTime, startTime, sd, emptyParameters);
    DummyRawStoreFailEvent.setEventSucceed(false);
    try {
        msClient.add_partition(partition);
        fail("Error: add partition should've failed");
    } catch (Exception ex) {
    // expected
    }
    rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(2, rsp.getEventsSize());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) AddPartitionMessage(org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage) Test(org.junit.Test)

Example 53 with SerDeInfo

use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.

the class TestDbNotificationListener method dropPartition.

@Test
public void dropPartition() throws Exception {
    String defaultDbName = "default";
    String tblName = "dropptn";
    String tblOwner = "me";
    String serdeLocation = "file:/tmp";
    FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(col1);
    SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
    StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
    FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> partCol1Vals = Arrays.asList("today");
    partCols.add(partCol1);
    Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
    // Event 1
    msClient.createTable(table);
    Partition partition = new Partition(partCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
    // Event 2
    msClient.add_partition(partition);
    // Event 3
    msClient.dropPartition(defaultDbName, tblName, partCol1Vals, false);
    // Get notifications from metastore
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(3, rsp.getEventsSize());
    NotificationEvent event = rsp.getEvents().get(2);
    assertEquals(firstEventId + 3, event.getEventId());
    assertTrue(event.getEventTime() >= startTime);
    assertEquals(EventType.DROP_PARTITION.toString(), event.getEventType());
    assertEquals(defaultDbName, event.getDbName());
    assertEquals(tblName, event.getTableName());
    // Parse the message field
    DropPartitionMessage dropPtnMsg = md.getDropPartitionMessage(event.getMessage());
    assertEquals(defaultDbName, dropPtnMsg.getDB());
    assertEquals(tblName, dropPtnMsg.getTable());
    Table tableObj = dropPtnMsg.getTableObj();
    assertEquals(table.getDbName(), tableObj.getDbName());
    assertEquals(table.getTableName(), tableObj.getTableName());
    assertEquals(table.getOwner(), tableObj.getOwner());
    assertEquals(TableType.MANAGED_TABLE.toString(), dropPtnMsg.getTableType());
    // Verify the eventID was passed to the non-transactional listener
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_PARTITION, firstEventId + 3);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
    MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
    // When hive.metastore.transactional.event.listeners is set,
    // a failed event should not create a new notification
    List<String> newpartCol1Vals = Arrays.asList("tomorrow");
    partition = new Partition(newpartCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
    msClient.add_partition(partition);
    DummyRawStoreFailEvent.setEventSucceed(false);
    try {
        msClient.dropPartition(defaultDbName, tblName, newpartCol1Vals, false);
        fail("Error: drop partition should've failed");
    } catch (Exception ex) {
    // expected
    }
    rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(4, rsp.getEventsSize());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) NotificationEvent(org.apache.hadoop.hive.metastore.api.NotificationEvent) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) DropPartitionMessage(org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage) Test(org.junit.Test)

Example 54 with SerDeInfo

use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.

the class TestHiveMetaTool method setUp.

@Override
protected void setUp() throws Exception {
    super.setUp();
    try {
        HiveConf hiveConf = new HiveConf(HiveMetaTool.class);
        client = new HiveMetaStoreClient(hiveConf);
        // Setup output stream to redirect output to
        os = new ByteArrayOutputStream();
        ps = new PrintStream(os);
        // create a dummy database and a couple of dummy tables
        Database db = new Database();
        db.setName(dbName);
        client.dropTable(dbName, tblName);
        client.dropTable(dbName, badTblName);
        dropDatabase(dbName);
        client.createDatabase(db);
        locationUri = db.getLocationUri();
        String avroUri = "hdfs://nn.example.com/warehouse/hive/ab.avsc";
        String badAvroUri = new String("hdfs:/hive");
        client.dropType(typeName);
        Type typ1 = new Type();
        typ1.setName(typeName);
        typ1.setFields(new ArrayList<FieldSchema>(2));
        typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
        typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
        client.createType(typ1);
        Table tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(tblName);
        Map<String, String> parameters = new HashMap<>();
        parameters.put(AvroSerdeUtils.SCHEMA_URL, avroUri);
        tbl.setParameters(parameters);
        StorageDescriptor sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.getParameters().put(AvroSerdeUtils.SCHEMA_URL, avroUri);
        sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.avro.AvroSerDe.class.getName());
        sd.setInputFormat(AvroContainerInputFormat.class.getName());
        sd.setOutputFormat(AvroContainerOutputFormat.class.getName());
        tbl.setPartitionKeys(new ArrayList<FieldSchema>());
        client.createTable(tbl);
        // create a table with bad avro uri
        tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(badTblName);
        sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.getParameters().put(AvroSerdeUtils.SCHEMA_URL, badAvroUri);
        sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.avro.AvroSerDe.class.getName());
        sd.setInputFormat(AvroContainerInputFormat.class.getName());
        sd.setOutputFormat(AvroContainerOutputFormat.class.getName());
        tbl.setPartitionKeys(new ArrayList<FieldSchema>());
        client.createTable(tbl);
        client.close();
    } catch (Exception e) {
        System.err.println("Unable to setup the hive metatool test");
        System.err.println(StringUtils.stringifyException(e));
        throw new Exception(e);
    }
}
Also used : PrintStream(java.io.PrintStream) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ByteArrayOutputStream(java.io.ByteArrayOutputStream) AvroContainerInputFormat(org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Type(org.apache.hadoop.hive.metastore.api.Type) AvroContainerOutputFormat(org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat) Database(org.apache.hadoop.hive.metastore.api.Database) HiveConf(org.apache.hadoop.hive.conf.HiveConf)

Example 55 with SerDeInfo

use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.

the class TestReplChangeManager method testRecyclePartTable.

@Test
public void testRecyclePartTable() throws Exception {
    // Create db1/t1/dt=20160101/part
    // /dt=20160102/part
    // /dt=20160103/part
    // Test: recycle single file (dt=20160101/part)
    // recycle single partition (dt=20160102)
    // recycle table t1
    String dbName = "db1";
    client.dropDatabase(dbName, true, true);
    Database db = new Database();
    db.setName(dbName);
    client.createDatabase(db);
    String tblName = "t1";
    List<FieldSchema> columns = new ArrayList<FieldSchema>();
    columns.add(new FieldSchema("foo", "string", ""));
    columns.add(new FieldSchema("bar", "string", ""));
    List<FieldSchema> partColumns = new ArrayList<FieldSchema>();
    partColumns.add(new FieldSchema("dt", "string", ""));
    SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", LazyBinaryColumnarSerDe.class.getCanonicalName(), new HashMap<String, String>());
    StorageDescriptor sd = new StorageDescriptor(columns, null, "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat", false, 0, serdeInfo, null, null, null);
    Map<String, String> tableParameters = new HashMap<String, String>();
    Table tbl = new Table(tblName, dbName, "", 0, 0, 0, sd, partColumns, tableParameters, "", "", "");
    client.createTable(tbl);
    List<String> values = Arrays.asList("20160101");
    Partition part1 = createPartition(dbName, tblName, columns, values, serdeInfo);
    client.add_partition(part1);
    values = Arrays.asList("20160102");
    Partition part2 = createPartition(dbName, tblName, columns, values, serdeInfo);
    client.add_partition(part2);
    values = Arrays.asList("20160103");
    Partition part3 = createPartition(dbName, tblName, columns, values, serdeInfo);
    client.add_partition(part3);
    Path part1Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
    createFile(part1Path, "p1");
    String path1Chksum = ReplChangeManager.checksumFor(part1Path, fs);
    Path part2Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
    createFile(part2Path, "p2");
    String path2Chksum = ReplChangeManager.checksumFor(part2Path, fs);
    Path part3Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
    createFile(part3Path, "p3");
    String path3Chksum = ReplChangeManager.checksumFor(part3Path, fs);
    assertTrue(part1Path.getFileSystem(hiveConf).exists(part1Path));
    assertTrue(part2Path.getFileSystem(hiveConf).exists(part2Path));
    assertTrue(part3Path.getFileSystem(hiveConf).exists(part3Path));
    ReplChangeManager cm = ReplChangeManager.getInstance(hiveConf);
    // verify cm.recycle(db, table, part) api moves file to cmroot dir
    int ret = cm.recycle(part1Path, RecycleType.MOVE, false);
    Assert.assertEquals(ret, 1);
    Path cmPart1Path = ReplChangeManager.getCMPath(hiveConf, part1Path.getName(), path1Chksum);
    assertTrue(cmPart1Path.getFileSystem(hiveConf).exists(cmPart1Path));
    // Verify dropPartition recycle part files
    client.dropPartition(dbName, tblName, Arrays.asList("20160102"));
    assertFalse(part2Path.getFileSystem(hiveConf).exists(part2Path));
    Path cmPart2Path = ReplChangeManager.getCMPath(hiveConf, part2Path.getName(), path2Chksum);
    assertTrue(cmPart2Path.getFileSystem(hiveConf).exists(cmPart2Path));
    // Verify dropTable recycle partition files
    client.dropTable(dbName, tblName);
    assertFalse(part3Path.getFileSystem(hiveConf).exists(part3Path));
    Path cmPart3Path = ReplChangeManager.getCMPath(hiveConf, part3Path.getName(), path3Chksum);
    assertTrue(cmPart3Path.getFileSystem(hiveConf).exists(cmPart3Path));
    client.dropDatabase(dbName, true, true);
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Database(org.apache.hadoop.hive.metastore.api.Database) LazyBinaryColumnarSerDe(org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe) Test(org.junit.Test)

Aggregations

SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)152 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)137 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)115 Table (org.apache.hadoop.hive.metastore.api.Table)114 ArrayList (java.util.ArrayList)112 Test (org.junit.Test)105 Partition (org.apache.hadoop.hive.metastore.api.Partition)65 HashMap (java.util.HashMap)44 ColumnStatistics (org.apache.hadoop.hive.metastore.api.ColumnStatistics)31 ColumnStatisticsData (org.apache.hadoop.hive.metastore.api.ColumnStatisticsData)31 ColumnStatisticsDesc (org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc)31 ColumnStatisticsObj (org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj)31 AggrStats (org.apache.hadoop.hive.metastore.api.AggrStats)30 List (java.util.List)26 Order (org.apache.hadoop.hive.metastore.api.Order)25 Database (org.apache.hadoop.hive.metastore.api.Database)22 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)14 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)13 LongColumnStatsData (org.apache.hadoop.hive.metastore.api.LongColumnStatsData)13 NotificationEvent (org.apache.hadoop.hive.metastore.api.NotificationEvent)12