use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class AlterTableUpdateColumnsOperation method doAlteration.
@Override
protected void doAlteration(Table table, Partition partition) throws HiveException {
// StorageDescriptor sd = getStorageDescriptor(table, partition);
String serializationLib = table.getSd().getSerdeInfo().getSerializationLib();
Collection<String> serdes = MetastoreConf.getStringCollection(context.getConf(), MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA);
if (serdes.contains(serializationLib)) {
throw new HiveException(table.getTableName() + " has serde " + serializationLib + " for which schema " + "is already handled by HMS.");
}
Deserializer deserializer = table.getDeserializer(true);
try {
LOG.info("Updating metastore columns for table: {}", table.getTableName());
List<FieldSchema> fields = HiveMetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), deserializer);
StorageDescriptor sd = getStorageDescriptor(table, partition);
sd.setCols(fields);
} catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) {
LOG.error("alter table update columns: {}", e);
throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
}
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class AlterTableAddColumnsOperation method doAlteration.
@Override
protected void doAlteration(Table table, Partition partition) throws HiveException {
StorageDescriptor sd = getStorageDescriptor(table, partition);
String serializationLib = sd.getSerdeInfo().getSerializationLib();
AvroSerdeUtils.handleAlterTableForAvro(context.getConf(), serializationLib, table.getTTable().getParameters());
List<FieldSchema> oldColumns = (partition == null ? table.getColsForMetastore() : partition.getColsForMetastore());
List<FieldSchema> newColumns = desc.getNewColumns();
if ("org.apache.hadoop.hive.serde.thrift.columnsetSerDe".equals(serializationLib)) {
context.getConsole().printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setCols(newColumns);
} else {
// make sure the columns does not already exist
for (FieldSchema newColumn : newColumns) {
for (FieldSchema oldColumn : oldColumns) {
if (oldColumn.getName().equalsIgnoreCase(newColumn.getName())) {
throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newColumn.getName());
}
}
oldColumns.add(newColumn);
}
sd.setCols(oldColumns);
}
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class DDLPlanUtils method getRowFormat.
private String getRowFormat(Table table) {
StringBuilder rowFormat = new StringBuilder();
StorageDescriptor sd = table.getTTable().getSd();
SerDeInfo serdeInfo = sd.getSerdeInfo();
rowFormat.append("ROW FORMAT SERDE \n").append(" '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n");
Map<String, String> serdeParams = serdeInfo.getParameters();
if (table.getStorageHandler() == null) {
// If serialization.format property has the default value, it will not to be included in SERDE properties
if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) {
serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT);
}
if (!serdeParams.isEmpty()) {
appendSerdeParams(rowFormat, serdeParams);
rowFormat.append(" \n");
}
rowFormat.append("STORED AS INPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n").append("OUTPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'");
} else {
String metaTableStorage = table.getParameters().get(META_TABLE_STORAGE);
rowFormat.append("STORED BY \n '" + HiveStringUtils.escapeHiveCommand(metaTableStorage) + "' \n");
if (!serdeParams.isEmpty()) {
appendSerdeParams(rowFormat, serdeInfo.getParameters());
}
}
return rowFormat.toString();
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class DDLPlanUtils method getLocationBlock.
private String getLocationBlock(Table table) {
if (!CreateTableOperation.doesTableNeedLocation(table)) {
return "";
}
ST locationBlock = new ST(CREATE_TABLE_TEMPLATE_LOCATION);
StorageDescriptor sd = table.getTTable().getSd();
locationBlock.add(LOCATION, " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'");
return locationBlock.render();
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class TestHCatOutputFormat method initTable.
private void initTable() throws Exception {
try {
client.dropTable(dbName, tblName);
} catch (Exception e) {
}
try {
client.dropDatabase(dbName);
} catch (Exception e) {
}
client.createDatabase(new Database(dbName, "", null, null));
assertNotNull((client.getDatabase(dbName).getLocationUri()));
List<FieldSchema> fields = new ArrayList<FieldSchema>();
fields.add(new FieldSchema("colname", serdeConstants.STRING_TYPE_NAME, ""));
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(Lists.newArrayList(new FieldSchema("data_column", serdeConstants.STRING_TYPE_NAME, "")));
tbl.setSd(sd);
sd.setInputFormat(RCFileInputFormat.class.getName());
sd.setOutputFormat(RCFileOutputFormat.class.getName());
sd.setParameters(new HashMap<String, String>());
sd.getParameters().put("test_param_1", "Use this for comments etc");
// sd.setBucketCols(new ArrayList<String>(2));
// sd.getBucketCols().add("name");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
tbl.setPartitionKeys(fields);
Map<String, String> tableParams = new HashMap<String, String>();
tableParams.put("hcat.testarg", "testArgValue");
tbl.setParameters(tableParams);
client.createTable(tbl);
Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1")));
}
Aggregations