use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHBaseAggregateStatsNDVUniformDist method TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong.
@Test
public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Exception {
String dbName = "default";
String tableName = "snp";
long now = System.currentTimeMillis();
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema("col5_long", "long", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, Collections.<String, String>emptyMap());
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("ds", "string", ""));
Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, Collections.<String, String>emptyMap(), null, null, null);
store.createTable(table);
List<List<String>> partVals = new ArrayList<>();
for (int i = 0; i < 10; i++) {
List<String> partVal = Arrays.asList("" + i);
partVals.add(partVal);
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/default/hit/ds=" + partVal);
Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, Collections.<String, String>emptyMap());
store.addPartition(part);
if (i == 0 || i == 2 || i == 3 || i == 5 || i == 6 || i == 8) {
ColumnStatistics cs = new ColumnStatistics();
ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
desc.setLastAnalyzed(now);
desc.setPartName("ds=" + partVal);
cs.setStatsDesc(desc);
ColumnStatisticsObj obj = new ColumnStatisticsObj();
obj.setColName("col5_long");
obj.setColType("long");
ColumnStatisticsData data = new ColumnStatisticsData();
LongColumnStatsData dcsd = new LongColumnStatsData();
dcsd.setHighValue(1000 + i);
dcsd.setLowValue(-1000 - i);
dcsd.setNumNulls(i);
dcsd.setNumDVs(10 * i + 1);
dcsd.setBitVectors(bitVectors[i / 5]);
data.setLongStats(dcsd);
obj.setStatsData(data);
cs.addToStatsObj(obj);
store.updatePartitionColumnStatistics(cs, partVal);
}
}
Checker statChecker = new Checker() {
@Override
public void checkStats(AggrStats aggrStats) throws Exception {
Assert.assertEquals(6, aggrStats.getPartsFound());
Assert.assertEquals(1, aggrStats.getColStatsSize());
ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
Assert.assertEquals("col5_long", cso.getColName());
Assert.assertEquals("long", cso.getColType());
LongColumnStatsData lcsd = cso.getStatsData().getLongStats();
Assert.assertEquals(1010, lcsd.getHighValue(), 0.01);
Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01);
Assert.assertEquals(40, lcsd.getNumNulls());
Assert.assertEquals(12, lcsd.getNumDVs());
}
};
List<String> partNames = new ArrayList<>();
for (int i = 0; i < 10; i++) {
partNames.add("ds=" + i);
}
AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, Arrays.asList("col5_long"));
statChecker.checkStats(aggrStats);
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHBaseStore method hashSd.
@Test
public void hashSd() throws Exception {
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, serde, null, null, emptyParameters);
Map<List<String>, String> map = new HashMap<List<String>, String>();
map.put(Arrays.asList("col3"), "col4");
SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), map);
sd.setSkewedInfo(skew);
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] baseHash = HBaseUtils.hashStorageDescriptor(sd, md);
StorageDescriptor changeSchema = new StorageDescriptor(sd);
changeSchema.getCols().add(new FieldSchema("col2", "varchar(32)", "a comment"));
byte[] schemaHash = HBaseUtils.hashStorageDescriptor(changeSchema, md);
Assert.assertFalse(Arrays.equals(baseHash, schemaHash));
StorageDescriptor changeLocation = new StorageDescriptor(sd);
changeLocation.setLocation("file:/somewhere/else");
byte[] locationHash = HBaseUtils.hashStorageDescriptor(changeLocation, md);
Assert.assertArrayEquals(baseHash, locationHash);
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHBaseStore method skewInfo.
@Test
public void skewInfo() throws Exception {
String tableName = "mytable";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, serde, null, null, emptyParameters);
Map<List<String>, String> map = new HashMap<List<String>, String>();
map.put(Arrays.asList("col3"), "col4");
SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), map);
sd.setSkewedInfo(skew);
Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
store.createTable(table);
Table t = store.getTable("default", tableName);
Assert.assertEquals(1, t.getSd().getColsSize());
Assert.assertEquals("col1", t.getSd().getCols().get(0).getName());
Assert.assertEquals("int", t.getSd().getCols().get(0).getType());
Assert.assertEquals("", t.getSd().getCols().get(0).getComment());
Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName());
Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib());
Assert.assertEquals("file:/tmp", t.getSd().getLocation());
Assert.assertEquals("input", t.getSd().getInputFormat());
Assert.assertEquals("output", t.getSd().getOutputFormat());
Assert.assertTrue(t.getSd().isCompressed());
Assert.assertEquals(0, t.getSd().getNumBuckets());
Assert.assertEquals(0, t.getSd().getSortColsSize());
Assert.assertEquals("me", t.getOwner());
Assert.assertEquals("default", t.getDbName());
Assert.assertEquals(tableName, t.getTableName());
Assert.assertEquals(0, t.getParametersSize());
skew = t.getSd().getSkewedInfo();
Assert.assertNotNull(skew);
Assert.assertEquals(1, skew.getSkewedColNamesSize());
Assert.assertEquals("col1", skew.getSkewedColNames().get(0));
Assert.assertEquals(1, skew.getSkewedColValuesSize());
Assert.assertEquals("col2", skew.getSkewedColValues().get(0).get(0));
Assert.assertEquals(1, skew.getSkewedColValueLocationMapsSize());
Assert.assertEquals("col4", skew.getSkewedColValueLocationMaps().get(Arrays.asList("col3")));
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class TestHBaseStore method createMultiColumnTable.
private Table createMultiColumnTable(String tblName, String... types) throws Exception {
List<FieldSchema> cols = new ArrayList<FieldSchema>();
for (int i = 0; i < types.length; i++) cols.add(new FieldSchema("col" + i, types[i], ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
int currentTime = (int) (System.currentTimeMillis() / 1000);
Table table = new Table(tblName, DB, "me", currentTime, currentTime, 0, sd, cols, emptyParameters, null, null, null);
store.createTable(table);
return table;
}
use of org.apache.hadoop.hive.metastore.api.SerDeInfo in project hive by apache.
the class DDLTask method showCreateTable.
private int showCreateTable(Hive db, DataOutputStream outStream, String tableName) throws HiveException {
final String EXTERNAL = "external";
final String TEMPORARY = "temporary";
final String LIST_COLUMNS = "columns";
final String TBL_COMMENT = "tbl_comment";
final String LIST_PARTITIONS = "partitions";
final String SORT_BUCKET = "sort_bucket";
final String SKEWED_INFO = "tbl_skewedinfo";
final String ROW_FORMAT = "row_format";
final String TBL_LOCATION = "tbl_location";
final String TBL_PROPERTIES = "tbl_properties";
boolean needsLocation = true;
StringBuilder createTab_str = new StringBuilder();
Table tbl = db.getTable(tableName, false);
List<String> duplicateProps = new ArrayList<String>();
try {
needsLocation = doesTableNeedLocation(tbl);
if (tbl.isView()) {
String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + tbl.getViewExpandedText();
outStream.write(createTab_stmt.getBytes(StandardCharsets.UTF_8));
return 0;
}
createTab_str.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `");
createTab_str.append(tableName + "`(\n");
createTab_str.append("<" + LIST_COLUMNS + ">)\n");
createTab_str.append("<" + TBL_COMMENT + ">\n");
createTab_str.append("<" + LIST_PARTITIONS + ">\n");
createTab_str.append("<" + SORT_BUCKET + ">\n");
createTab_str.append("<" + SKEWED_INFO + ">\n");
createTab_str.append("<" + ROW_FORMAT + ">\n");
if (needsLocation) {
createTab_str.append("LOCATION\n");
createTab_str.append("<" + TBL_LOCATION + ">\n");
}
createTab_str.append("TBLPROPERTIES (\n");
createTab_str.append("<" + TBL_PROPERTIES + ">)\n");
ST createTab_stmt = new ST(createTab_str.toString());
// For cases where the table is temporary
String tbl_temp = "";
if (tbl.isTemporary()) {
duplicateProps.add("TEMPORARY");
tbl_temp = "TEMPORARY ";
}
// For cases where the table is external
String tbl_external = "";
if (tbl.getTableType() == TableType.EXTERNAL_TABLE) {
duplicateProps.add("EXTERNAL");
tbl_external = "EXTERNAL ";
}
// Columns
String tbl_columns = "";
List<FieldSchema> cols = tbl.getCols();
List<String> columns = new ArrayList<String>();
for (FieldSchema col : cols) {
String columnDesc = " `" + col.getName() + "` " + col.getType();
if (col.getComment() != null) {
columnDesc = columnDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'";
}
columns.add(columnDesc);
}
tbl_columns = StringUtils.join(columns, ", \n");
// Table comment
String tbl_comment = "";
String tabComment = tbl.getProperty("comment");
if (tabComment != null) {
duplicateProps.add("comment");
tbl_comment = "COMMENT '" + HiveStringUtils.escapeHiveCommand(tabComment) + "'";
}
// Partitions
String tbl_partitions = "";
List<FieldSchema> partKeys = tbl.getPartitionKeys();
if (partKeys.size() > 0) {
tbl_partitions += "PARTITIONED BY ( \n";
List<String> partCols = new ArrayList<String>();
for (FieldSchema partKey : partKeys) {
String partColDesc = " `" + partKey.getName() + "` " + partKey.getType();
if (partKey.getComment() != null) {
partColDesc = partColDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'";
}
partCols.add(partColDesc);
}
tbl_partitions += StringUtils.join(partCols, ", \n");
tbl_partitions += ")";
}
// Clusters (Buckets)
String tbl_sort_bucket = "";
List<String> buckCols = tbl.getBucketCols();
if (buckCols.size() > 0) {
duplicateProps.add("SORTBUCKETCOLSPREFIX");
tbl_sort_bucket += "CLUSTERED BY ( \n ";
tbl_sort_bucket += StringUtils.join(buckCols, ", \n ");
tbl_sort_bucket += ") \n";
List<Order> sortCols = tbl.getSortCols();
if (sortCols.size() > 0) {
tbl_sort_bucket += "SORTED BY ( \n";
// Order
List<String> sortKeys = new ArrayList<String>();
for (Order sortCol : sortCols) {
String sortKeyDesc = " " + sortCol.getCol() + " ";
if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
sortKeyDesc = sortKeyDesc + "ASC";
} else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) {
sortKeyDesc = sortKeyDesc + "DESC";
}
sortKeys.add(sortKeyDesc);
}
tbl_sort_bucket += StringUtils.join(sortKeys, ", \n");
tbl_sort_bucket += ") \n";
}
tbl_sort_bucket += "INTO " + tbl.getNumBuckets() + " BUCKETS";
}
// Skewed Info
StringBuilder tbl_skewedinfo = new StringBuilder();
SkewedInfo skewedInfo = tbl.getSkewedInfo();
if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) {
tbl_skewedinfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n");
tbl_skewedinfo.append(" ON (");
List<String> colValueList = new ArrayList<String>();
for (List<String> colValues : skewedInfo.getSkewedColValues()) {
colValueList.add("('" + StringUtils.join(colValues, "','") + "')");
}
tbl_skewedinfo.append(StringUtils.join(colValueList, ",") + ")");
if (tbl.isStoredAsSubDirectories()) {
tbl_skewedinfo.append("\n STORED AS DIRECTORIES");
}
}
// Row format (SerDe)
StringBuilder tbl_row_format = new StringBuilder();
StorageDescriptor sd = tbl.getTTable().getSd();
SerDeInfo serdeInfo = sd.getSerdeInfo();
Map<String, String> serdeParams = serdeInfo.getParameters();
tbl_row_format.append("ROW FORMAT SERDE \n");
tbl_row_format.append(" '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n");
if (tbl.getStorageHandler() == null) {
// SERDE properties
if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) {
serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT);
}
if (!serdeParams.isEmpty()) {
appendSerdeParams(tbl_row_format, serdeParams).append(" \n");
}
tbl_row_format.append("STORED AS INPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n");
tbl_row_format.append("OUTPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'");
} else {
duplicateProps.add(META_TABLE_STORAGE);
tbl_row_format.append("STORED BY \n '" + HiveStringUtils.escapeHiveCommand(tbl.getParameters().get(META_TABLE_STORAGE)) + "' \n");
// SerDe Properties
if (!serdeParams.isEmpty()) {
appendSerdeParams(tbl_row_format, serdeInfo.getParameters());
}
}
String tbl_location = " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'";
// Table properties
duplicateProps.addAll(Arrays.asList(StatsSetupConst.TABLE_PARAMS_STATS_KEYS));
String tbl_properties = propertiesToString(tbl.getParameters(), duplicateProps);
createTab_stmt.add(TEMPORARY, tbl_temp);
createTab_stmt.add(EXTERNAL, tbl_external);
createTab_stmt.add(LIST_COLUMNS, tbl_columns);
createTab_stmt.add(TBL_COMMENT, tbl_comment);
createTab_stmt.add(LIST_PARTITIONS, tbl_partitions);
createTab_stmt.add(SORT_BUCKET, tbl_sort_bucket);
createTab_stmt.add(SKEWED_INFO, tbl_skewedinfo);
createTab_stmt.add(ROW_FORMAT, tbl_row_format);
// Table location should not be printed with hbase backed tables
if (needsLocation) {
createTab_stmt.add(TBL_LOCATION, tbl_location);
}
createTab_stmt.add(TBL_PROPERTIES, tbl_properties);
outStream.write(createTab_stmt.render().getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
LOG.info("show create table: ", e);
return 1;
}
return 0;
}
Aggregations