use of org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage in project hive by apache.
the class TestDbNotificationListener method dropPartition.
@Test
public void dropPartition() throws Exception {
String defaultDbName = "default";
String tblName = "dropptn";
String tblOwner = "me";
String serdeLocation = testTempDir;
FieldSchema col1 = new FieldSchema("col1", "int", "no comment");
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(col1);
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serde, null, null, emptyParameters);
FieldSchema partCol1 = new FieldSchema("ds", "string", "no comment");
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
List<String> partCol1Vals = Arrays.asList("today");
partCols.add(partCol1);
Table table = new Table(tblName, defaultDbName, tblOwner, startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
// Event 1
msClient.createTable(table);
Partition partition = new Partition(partCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
// Event 2
msClient.add_partition(partition);
// Event 3
msClient.dropPartition(defaultDbName, tblName, partCol1Vals, false);
// Get notifications from metastore
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(3, rsp.getEventsSize());
NotificationEvent event = rsp.getEvents().get(2);
assertEquals(firstEventId + 3, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.DROP_PARTITION.toString(), event.getEventType());
assertEquals(defaultDbName, event.getDbName());
assertEquals(tblName, event.getTableName());
// Parse the message field
DropPartitionMessage dropPtnMsg = md.getDropPartitionMessage(event.getMessage());
assertEquals(defaultDbName, dropPtnMsg.getDB());
assertEquals(tblName, dropPtnMsg.getTable());
Table tableObj = dropPtnMsg.getTableObj();
assertEquals(table.getDbName(), tableObj.getDbName());
assertEquals(table.getTableName(), tableObj.getTableName());
assertEquals(table.getOwner(), tableObj.getOwner());
assertEquals(TableType.MANAGED_TABLE.toString(), dropPtnMsg.getTableType());
// Verify the eventID was passed to the non-transactional listener
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_PARTITION, firstEventId + 3);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 2);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
// When hive.metastore.transactional.event.listeners is set,
// a failed event should not create a new notification
List<String> newpartCol1Vals = Arrays.asList("tomorrow");
partition = new Partition(newpartCol1Vals, defaultDbName, tblName, startTime, startTime, sd, emptyParameters);
msClient.add_partition(partition);
DummyRawStoreFailEvent.setEventSucceed(false);
try {
msClient.dropPartition(defaultDbName, tblName, newpartCol1Vals, false);
fail("Error: drop partition should've failed");
} catch (Exception ex) {
// expected
}
rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(4, rsp.getEventsSize());
testEventCounts(defaultDbName, firstEventId, null, null, 4);
}
use of org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage in project hive by apache.
the class TestDbNotificationListener method exchangePartition.
@Test
public void exchangePartition() throws Exception {
String dbName = "default";
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
partCols.add(new FieldSchema("part", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd1 = new StorageDescriptor(cols, Paths.get(testTempDir, "1").toString(), "input", "output", false, 0, serde, null, null, emptyParameters);
Table tab1 = new Table("tab1", dbName, "me", startTime, startTime, 0, sd1, partCols, emptyParameters, null, null, null);
msClient.createTable(tab1);
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
// add_table
assertEquals(1, rsp.getEventsSize());
StorageDescriptor sd2 = new StorageDescriptor(cols, Paths.get(testTempDir, "2").toString(), "input", "output", false, 0, serde, null, null, emptyParameters);
Table tab2 = new Table("tab2", dbName, "me", startTime, startTime, 0, sd2, partCols, emptyParameters, null, null, // add_table
null);
msClient.createTable(tab2);
rsp = msClient.getNextNotification(firstEventId + 1, 0, null);
assertEquals(1, rsp.getEventsSize());
StorageDescriptor sd1part = new StorageDescriptor(cols, Paths.get(testTempDir, "1", "part=1").toString(), "input", "output", false, 0, serde, null, null, emptyParameters);
StorageDescriptor sd2part = new StorageDescriptor(cols, Paths.get(testTempDir, "1", "part=2").toString(), "input", "output", false, 0, serde, null, null, emptyParameters);
StorageDescriptor sd3part = new StorageDescriptor(cols, Paths.get(testTempDir, "1", "part=3").toString(), "input", "output", false, 0, serde, null, null, emptyParameters);
Partition part1 = new Partition(Arrays.asList("1"), "default", tab1.getTableName(), startTime, startTime, sd1part, emptyParameters);
Partition part2 = new Partition(Arrays.asList("2"), "default", tab1.getTableName(), startTime, startTime, sd2part, emptyParameters);
Partition part3 = new Partition(Arrays.asList("3"), "default", tab1.getTableName(), startTime, startTime, sd3part, emptyParameters);
msClient.add_partitions(Arrays.asList(part1, part2, part3));
rsp = msClient.getNextNotification(firstEventId + 2, 0, null);
// add_partition
assertEquals(1, rsp.getEventsSize());
msClient.exchange_partition(ImmutableMap.of("part", "1"), dbName, tab1.getTableName(), dbName, tab2.getTableName());
rsp = msClient.getNextNotification(firstEventId + 3, 0, null);
assertEquals(2, rsp.getEventsSize());
NotificationEvent event = rsp.getEvents().get(0);
assertEquals(firstEventId + 4, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.ADD_PARTITION.toString(), event.getEventType());
assertEquals(dbName, event.getDbName());
assertEquals(tab2.getTableName(), event.getTableName());
// Parse the message field
AddPartitionMessage addPtnMsg = md.getAddPartitionMessage(event.getMessage());
assertEquals(dbName, addPtnMsg.getDB());
assertEquals(tab2.getTableName(), addPtnMsg.getTable());
Iterator<Partition> ptnIter = addPtnMsg.getPartitionObjs().iterator();
assertEquals(TableType.MANAGED_TABLE.toString(), addPtnMsg.getTableType());
assertTrue(ptnIter.hasNext());
Partition msgPart = ptnIter.next();
assertEquals(part1.getValues(), msgPart.getValues());
assertEquals(dbName, msgPart.getDbName());
assertEquals(tab2.getTableName(), msgPart.getTableName());
event = rsp.getEvents().get(1);
assertEquals(firstEventId + 5, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.DROP_PARTITION.toString(), event.getEventType());
assertEquals(dbName, event.getDbName());
assertEquals(tab1.getTableName(), event.getTableName());
// Parse the message field
DropPartitionMessage dropPtnMsg = md.getDropPartitionMessage(event.getMessage());
assertEquals(dbName, dropPtnMsg.getDB());
assertEquals(tab1.getTableName(), dropPtnMsg.getTable());
assertEquals(TableType.MANAGED_TABLE.toString(), dropPtnMsg.getTableType());
Iterator<Map<String, String>> parts = dropPtnMsg.getPartitions().iterator();
assertTrue(parts.hasNext());
assertEquals(part1.getValues(), Lists.newArrayList(parts.next().values()));
// Verify the eventID was passed to the non-transactional listener
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.DROP_PARTITION, firstEventId + 5);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 4);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.ADD_PARTITION, firstEventId + 3);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 2);
MockMetaStoreEventListener.popAndVerifyLastEventId(EventType.CREATE_TABLE, firstEventId + 1);
testEventCounts(dbName, firstEventId, null, null, 5);
}
use of org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage in project hive by apache.
the class DropPartitionHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
try {
DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
String actualTblName = msg.getTable();
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions());
if (partSpecs.size() > 0) {
AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualTblName, actualDbName), partSpecs, true, context.eventOnlyReplicationSpec());
Task<DDLWork> dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions());
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(dropPtnTask);
} else {
throw new SemanticException("DROP PARTITION EVENT does not return any part descs for event message :" + context.dmd.getPayload());
}
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage in project hive by apache.
the class DbNotificationListener method onDropPartition.
/**
* @param partitionEvent partition event
* @throws MetaException
*/
@Override
public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException {
Table t = partitionEvent.getTable();
DropPartitionMessage msg = MessageBuilder.getInstance().buildDropPartitionMessage(t, partitionEvent.getPartitionIterator());
NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_PARTITION.toString(), msgEncoder.getSerializer().serialize(msg));
event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME);
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
process(event, partitionEvent);
}
Aggregations