use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestAccumuloStorageHandler method testNonExternalExistentTable.
@Test(expected = MetaException.class)
public void testNonExternalExistentTable() throws Exception {
MockInstance inst = new MockInstance(test.getMethodName());
Connector conn = inst.getConnector("root", new PasswordToken(""));
String tableName = "table";
// Create the table
conn.tableOperations().create(tableName);
// Define the SerDe Parameters
Map<String, String> params = new HashMap<String, String>();
params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq");
AccumuloConnectionParameters connectionParams = Mockito.mock(AccumuloConnectionParameters.class);
AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class);
StorageDescriptor sd = Mockito.mock(StorageDescriptor.class);
Table table = Mockito.mock(Table.class);
SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class);
// Call the real preCreateTable method
Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table);
// Return our known table name
Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName);
// Is not an EXTERNAL table
Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false);
// Return the mocked StorageDescriptor
Mockito.when(table.getSd()).thenReturn(sd);
// No location expected with AccumuloStorageHandler
Mockito.when(sd.getLocation()).thenReturn(null);
// Return mocked SerDeInfo
Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo);
// Custom parameters
Mockito.when(serDeInfo.getParameters()).thenReturn(params);
// Return the MockInstance's Connector
Mockito.when(connectionParams.getConnector()).thenReturn(conn);
storageHandler.connectionParams = connectionParams;
storageHandler.preCreateTable(table);
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHiveMetaStore method createStorageDescriptor.
private StorageDescriptor createStorageDescriptor(String tableName, List<FieldSchema> cols, Map<String, String> params, Map<String, String> serdParams) {
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(cols);
sd.setCompressed(false);
sd.setNumBuckets(1);
sd.setParameters(params);
sd.setBucketCols(new ArrayList<String>(2));
sd.getBucketCols().add("name");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tableName);
sd.getSerdeInfo().setParameters(serdParams);
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.setSortCols(new ArrayList<Order>());
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setOutputFormat(HiveOutputFormat.class.getName());
return sd;
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestDbNotificationListener method alterIndex.
@Test
public void alterIndex() throws Exception {
String indexName = "alterIndex";
String dbName = "default";
String tableName = "alterIndexTable";
String indexTableName = tableName + "__" + indexName + "__";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
// Event 1
msClient.createTable(table);
Index oldIndex = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false);
Table oldIndexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
// Event 2, 3
// creates index and index table
msClient.createIndex(oldIndex, oldIndexTable);
Index newIndex = new Index(indexName, null, "default", tableName, startTime, startTime + 1, indexTableName, sd, emptyParameters, false);
// Event 4
msClient.alter_index(dbName, tableName, indexName, newIndex);
// Get notifications from metastore
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(4, rsp.getEventsSize());
NotificationEvent event = rsp.getEvents().get(3);
assertEquals(firstEventId + 4, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.ALTER_INDEX.toString(), event.getEventType());
assertEquals(dbName, event.getDbName());
// Parse the message field
AlterIndexMessage alterIdxMsg = md.getAlterIndexMessage(event.getMessage());
Index indexObj = alterIdxMsg.getIndexObjAfter();
assertEquals(dbName, indexObj.getDbName());
assertEquals(indexName, indexObj.getIndexName());
assertEquals(tableName, indexObj.getOrigTableName());
assertEquals(indexTableName, indexObj.getIndexTableName());
assertTrue(indexObj.getCreateTime() < indexObj.getLastAccessTime());
// When hive.metastore.transactional.event.listeners is set,
// a failed event should not create a new notification
DummyRawStoreFailEvent.setEventSucceed(false);
try {
msClient.alter_index(dbName, tableName, indexName, newIndex);
fail("Error: alter index should've failed");
} catch (Exception ex) {
// expected
}
rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(4, rsp.getEventsSize());
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestDbNotificationListener method dropIndex.
@Test
public void dropIndex() throws Exception {
String indexName = "dropIndex";
String dbName = "default";
String tableName = "dropIndexTable";
String indexTableName = tableName + "__" + indexName + "__";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
// Event 1
msClient.createTable(table);
Index index = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false);
Table indexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
// Event 2, 3 (index table and index)
msClient.createIndex(index, indexTable);
// Event 4 (drops index and indexTable)
msClient.dropIndex(dbName, tableName, indexName, true);
// Get notifications from metastore
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(4, rsp.getEventsSize());
NotificationEvent event = rsp.getEvents().get(3);
assertEquals(firstEventId + 4, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
assertEquals(EventType.DROP_INDEX.toString(), event.getEventType());
assertEquals(dbName, event.getDbName());
// Parse the message field
DropIndexMessage dropIdxMsg = md.getDropIndexMessage(event.getMessage());
assertEquals(dbName, dropIdxMsg.getDB());
assertEquals(indexName.toLowerCase(), dropIdxMsg.getIndexName());
assertEquals(indexTableName.toLowerCase(), dropIdxMsg.getIndexTableName());
assertEquals(tableName.toLowerCase(), dropIdxMsg.getOrigTableName());
// When hive.metastore.transactional.event.listeners is set,
// a failed event should not create a new notification
index = new Index("dropIndexTable2", null, "default", tableName, startTime, startTime, "dropIndexTable__dropIndexTable2__", sd, emptyParameters, false);
Table indexTable2 = new Table("dropIndexTable__dropIndexTable2__", dbName, "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
msClient.createIndex(index, indexTable2);
DummyRawStoreFailEvent.setEventSucceed(false);
try {
// drops index and indexTable
msClient.dropIndex(dbName, tableName, "dropIndex2", true);
fail("Error: drop index should've failed");
} catch (Exception ex) {
// expected
}
rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(6, rsp.getEventsSize());
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class HBaseUtils method deserializeStorageDescriptor.
static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) throws InvalidProtocolBufferException {
HbaseMetastoreProto.StorageDescriptor proto = HbaseMetastoreProto.StorageDescriptor.parseFrom(serialized);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(convertFieldSchemaListFromProto(proto.getColsList()));
if (proto.hasInputFormat())
sd.setInputFormat(proto.getInputFormat());
if (proto.hasOutputFormat())
sd.setOutputFormat(proto.getOutputFormat());
sd.setCompressed(proto.getIsCompressed());
sd.setNumBuckets(proto.getNumBuckets());
if (proto.hasSerdeInfo()) {
SerDeInfo serde = new SerDeInfo();
serde.setName(proto.getSerdeInfo().hasName() ? proto.getSerdeInfo().getName() : null);
serde.setSerializationLib(proto.getSerdeInfo().hasSerializationLib() ? proto.getSerdeInfo().getSerializationLib() : null);
serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters()));
sd.setSerdeInfo(serde);
}
sd.setBucketCols(new ArrayList<>(proto.getBucketColsList()));
List<Order> sortCols = new ArrayList<>();
for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) {
sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder()));
}
sd.setSortCols(sortCols);
if (proto.hasSkewedInfo()) {
SkewedInfo skewed = new SkewedInfo();
skewed.setSkewedColNames(new ArrayList<>(proto.getSkewedInfo().getSkewedColNamesList()));
for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList innerList : proto.getSkewedInfo().getSkewedColValuesList()) {
skewed.addToSkewedColValues(new ArrayList<>(innerList.getSkewedColValueList()));
}
Map<List<String>, String> colMaps = new HashMap<>();
for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap map : proto.getSkewedInfo().getSkewedColValueLocationMapsList()) {
colMaps.put(new ArrayList<>(map.getKeyList()), map.getValue());
}
skewed.setSkewedColValueLocationMaps(colMaps);
sd.setSkewedInfo(skewed);
}
if (proto.hasStoredAsSubDirectories()) {
sd.setStoredAsSubDirectories(proto.getStoredAsSubDirectories());
}
return sd;
}
Aggregations