use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.
the class TestReplicationOptimisedBootstrap method testTargetEventIdGeneration.
@Test
public void testTargetEventIdGeneration() throws Throwable {
// Do a a cycle of bootstrap dump & load.
List<String> withClause = ReplicationTestUtils.includeExternalTableClause(true);
withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'");
// Do a bootstrap cycle(A->B)
primary.dump(primaryDbName, withClause);
replica.load(replicatedDbName, primaryDbName, withClause);
// Add some table & do the first incremental dump.
primary.run("use " + primaryDbName).run("create external table tablei1 (id int)").run("create external table tablei2 (id int)").run("create table tablem1 (id int)").run("create table tablem2 (id int)").run("insert into table tablei1 values(1),(2),(3),(4)").run("insert into table tablei2 values(10),(20),(30),(40)").run("insert into table tablem1 values(5),(10),(15),(20)").run("insert into table tablem2 values(6),(12),(18),(24)").dump(primaryDbName, withClause);
// Do the incremental load, and check everything is intact.
replica.load(replicatedDbName, primaryDbName, withClause).run("use " + replicatedDbName).run("select id from tablei1").verifyResults(new String[] { "1", "2", "3", "4" }).run("select id from tablei2").verifyResults(new String[] { "10", "20", "30", "40" }).run("select id from tablem1").verifyResults(new String[] { "5", "10", "15", "20" }).run("select id from tablem2").verifyResults(new String[] { "6", "12", "18", "24" });
// Do some modifications & call for the second cycle of incremental dump & load.
WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName).run("create external table table1 (id int)").run("insert into table table1 values (25),(35),(82)").run("create table table1_managed (name string)").run("insert into table table1_managed values ('CAD'),('DAS'),('MSA')").run("insert into table tablei1 values(15),(62),(25),(62)").run("insert into table tablei2 values(10),(22),(11),(22)").run("insert into table tablem1 values(5),(10),(15),(20)").run("alter table table1 set TBLPROPERTIES('comment'='abc')").dump(primaryDbName, withClause);
// Do an incremental load
replica.load(replicatedDbName, primaryDbName, withClause);
// Get the latest notification from the notification log for the target database, just after replication.
CurrentNotificationEventId notificationIdAfterRepl = replica.getCurrentNotificationEventId();
// Check the tables are there post incremental load.
replica.run("repl status " + replicatedDbName).verifyResult(tuple.lastReplicationId).run("use " + replicatedDbName).run("select id from table1").verifyResults(new String[] { "25", "35", "82" }).run("select name from table1_managed").verifyResults(new String[] { "CAD", "DAS", "MSA" }).verifyReplTargetProperty(replicatedDbName);
// Do some modifications on the source cluster, so we have some entries in the table diff.
primary.run("use " + primaryDbName).run("create table table2_managed (id string)").run("insert into table table1_managed values ('AAA'),('BBB')").run("insert into table table2_managed values ('A1'),('B1'),('C2')");
// Do some modifications in another database to have unrelated events as well after the last load, which should
// get filtered.
primary.run("create database " + extraPrimaryDb).run("use " + extraPrimaryDb).run("create external table table1 (id int)").run("insert into table table1 values (15),(1),(96)").run("create table table1_managed (id string)").run("insert into table table1_managed values ('SAA'),('PSA')");
// Do some modifications on the target database.
replica.run("use " + replicatedDbName).run("alter database " + replicatedDbName + " set DBPROPERTIES ('repl1'='value1')").run("alter database " + replicatedDbName + " set DBPROPERTIES ('repl2'='value2')");
// Validate the current replication id on original target has changed now.
assertNotEquals(replica.getCurrentNotificationEventId().getEventId(), notificationIdAfterRepl.getEventId());
// Prepare for reverse replication.
DistributedFileSystem replicaFs = replica.miniDFSCluster.getFileSystem();
Path newReplDir = new Path(replica.repldDir + "reverse01");
replicaFs.mkdirs(newReplDir);
withClause = ReplicationTestUtils.includeExternalTableClause(true);
withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'");
tuple = replica.dump(replicatedDbName, withClause);
// Check event ack file should get created.
assertTrue(new Path(tuple.dumpLocation, EVENT_ACK_FILE).toString() + " doesn't exist", replicaFs.exists(new Path(tuple.dumpLocation, EVENT_ACK_FILE)));
// Get the target event id.
NotificationEventResponse nl = new HiveMetaStoreClient(replica.hiveConf).getNextNotification(Long.parseLong(getEventIdFromFile(new Path(tuple.dumpLocation), conf)[1]), 10, new DatabaseAndTableFilter(replicatedDbName, null));
assertEquals(1, nl.getEvents().size());
}
use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.
the class TestReplicationOptimisedBootstrap method testTargetEventIdGenerationAfterFirstIncremental.
@Test
public void testTargetEventIdGenerationAfterFirstIncremental() throws Throwable {
List<String> withClause = ReplicationTestUtils.includeExternalTableClause(true);
withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'");
// Do a bootstrap cycle(A->B)
primary.dump(primaryDbName, withClause);
replica.load(replicatedDbName, primaryDbName, withClause);
// Add some table & do an incremental dump.
WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName).run("create external table table1 (id int)").run("insert into table table1 values (100)").run("create table table1_managed (name string)").run("insert into table table1_managed values ('ABC')").dump(primaryDbName, withClause);
// Do an incremental load
replica.load(replicatedDbName, primaryDbName, withClause);
// Get the latest notification from the notification log for the target database, just after replication.
CurrentNotificationEventId notificationIdAfterRepl = replica.getCurrentNotificationEventId();
// Check the tables are there post incremental load.
replica.run("repl status " + replicatedDbName).verifyResult(tuple.lastReplicationId).run("use " + replicatedDbName).run("select id from table1").verifyResult("100").run("select name from table1_managed").verifyResult("ABC").verifyReplTargetProperty(replicatedDbName);
// Do some modifications on the source cluster, so we have some entries in the table diff.
primary.run("use " + primaryDbName).run("create table table2_managed (id string)").run("insert into table table1_managed values ('SDC')").run("insert into table table2_managed values ('A'),('B'),('C')");
// Do some modifications in another database to have unrelated events as well after the last load, which should
// get filtered.
primary.run("create database " + extraPrimaryDb).run("use " + extraPrimaryDb).run("create external table t1 (id int)").run("insert into table t1 values (15),(1),(96)").run("create table t1_managed (id string)").run("insert into table t1_managed values ('SA'),('PS')");
// Do some modifications on the target database.
replica.run("use " + replicatedDbName).run("alter database " + replicatedDbName + " set DBPROPERTIES ('key1'='value1')").run("alter database " + replicatedDbName + " set DBPROPERTIES ('key2'='value2')");
// Validate the current replication id on original target has changed now.
assertNotEquals(replica.getCurrentNotificationEventId().getEventId(), notificationIdAfterRepl.getEventId());
// Prepare for reverse replication.
DistributedFileSystem replicaFs = replica.miniDFSCluster.getFileSystem();
Path newReplDir = new Path(replica.repldDir + "reverse1");
replicaFs.mkdirs(newReplDir);
withClause = ReplicationTestUtils.includeExternalTableClause(true);
withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'");
tuple = replica.dump(replicatedDbName);
// Check event ack file should get created.
assertTrue(new Path(tuple.dumpLocation, EVENT_ACK_FILE).toString() + " doesn't exist", replicaFs.exists(new Path(tuple.dumpLocation, EVENT_ACK_FILE)));
// Get the target event id.
NotificationEventResponse nl = new HiveMetaStoreClient(replica.hiveConf).getNextNotification(Long.parseLong(getEventIdFromFile(new Path(tuple.dumpLocation), conf)[1]), -1, new DatabaseAndTableFilter(replicatedDbName, null));
// There should be 4 events, one for alter db, second to remove first incremental pending and then two custom
// alter operations.
assertEquals(4, nl.getEvents().size());
}
use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.
the class TestDbNotificationListener method createTable.
@Test
public void createTable() throws Exception {
String defaultDbName = "default";
String tblName = "createtable";
String tblName2 = "createtable2";
String tblOwner = "me";
String serdeLocation = testTempDir;
FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(col1);
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, null, emptyParameters, null, null, TableType.MANAGED_TABLE.toString());
msClient.createTable(table);
// Get notifications from metastore
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(1, rsp.getEventsSize());
NotificationEvent event = rsp.getEvents().get(0);
assertEquals(firstEventId + 1, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.CREATE_TABLE.toString(), event.getEventType());
assertEquals(defaultDbName, event.getDbName());
assertEquals(tblName, event.getTableName());
// Parse the message field
CreateTableMessage createTblMsg = md.getCreateTableMessage(event.getMessage());
assertEquals(defaultDbName, createTblMsg.getDB());
assertEquals(tblName, createTblMsg.getTable());
assertEquals(table, createTblMsg.getTableObj());
assertEquals(TableType.MANAGED_TABLE.toString(), createTblMsg.getTableType());
// Verify the eventID was passed to the non-transactional listener
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
// When hive.metastore.transactional.event.listeners is set,
// a failed event should not create a new notification
table = new Table(tblName2, defaultDbName, tblOwner, startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
DummyRawStoreFailEvent.setEventSucceed(false);
try {
msClient.createTable(table);
fail("Error: create table should've failed");
} catch (Exception ex) {
// expected
}
rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(1, rsp.getEventsSize());
testEventCounts(defaultDbName, firstEventId, null, null, 1);
}
use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.
the class TestDbNotificationListener method alterPartition.
@Test
public void alterPartition() throws Exception {
String defaultDbName = "default";
String tblName = "alterptn";
String tblOwner = "me";
String serdeLocation = testTempDir;
FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(col1);
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
List<String> partCol1Vals = Arrays.asList("today");
partCols.add(partCol1);
Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
// Event 1
msClient.createTable(table);
Partition partition = new Partition(partCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
// Event 2
msClient.add_partition(partition);
Partition newPart = new Partition(Arrays.asList("today"), defaultDbName, tblName, startTime, startTime + 1, sd, emptyParameters);
// Event 3
msClient.alter_partition(defaultDbName, tblName, newPart, null);
// Get notifications from metastore
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(3, rsp.getEventsSize());
NotificationEvent event = rsp.getEvents().get(2);
assertEquals(firstEventId + 3, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.ALTER_PARTITION.toString(), event.getEventType());
assertEquals(defaultDbName, event.getDbName());
assertEquals(tblName, event.getTableName());
// Parse the message field
AlterPartitionMessage alterPtnMsg = md.getAlterPartitionMessage(event.getMessage());
assertEquals(defaultDbName, alterPtnMsg.getDB());
assertEquals(tblName, alterPtnMsg.getTable());
assertEquals(newPart, alterPtnMsg.getPtnObjAfter());
assertEquals(TableType.MANAGED_TABLE.toString(), alterPtnMsg.getTableType());
// Verify the eventID was passed to the non-transactional listener
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
// When hive.metastore.transactional.event.listeners is set,
// a failed event should not create a new notification
DummyRawStoreFailEvent.setEventSucceed(false);
try {
msClient.alter_partition(defaultDbName, tblName, newPart, null);
fail("Error: alter partition should've failed");
} catch (Exception ex) {
// expected
}
rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(3, rsp.getEventsSize());
testEventCounts(defaultDbName, firstEventId, null, null, 3);
}
use of org.apache.hadoop.hive.metastore.api.NotificationEventResponse in project hive by apache.
the class TestDbNotificationListener method filterWithMax.
@Test
public void filterWithMax() throws Exception {
Database db = new Database("f10", "no description", testTempDir, emptyParameters);
msClient.createDatabase(db);
db = new Database("f11", "no description", testTempDir, emptyParameters);
msClient.createDatabase(db);
msClient.dropDatabase("f11");
IMetaStoreClient.NotificationFilter filter = new IMetaStoreClient.NotificationFilter() {
@Override
public boolean accept(NotificationEvent event) {
return event.getEventType().equals(EventType.CREATE_DATABASE.toString());
}
};
// Get notifications from metastore
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 1, filter);
assertEquals(1, rsp.getEventsSize());
assertEquals(firstEventId + 1, rsp.getEvents().get(0).getEventId());
}
Aggregations