use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.
the class ConverterUtil method toTableDto.
/**
* Converts from TableInfo to TableDto.
*
* @param tableInfo connector table info
* @return table dto
*/
public TableDto toTableDto(final TableInfo tableInfo) {
final TableDto result = this.mapper.map(tableInfo, TableDto.class);
//TODO: Add this logic in the mapping
final List<FieldDto> fields = result.getFields();
if (fields != null) {
int index = 0;
for (final FieldDto field : fields) {
field.setPos(index++);
}
}
return result;
}
use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.
the class HiveConvertersImpl method metacatToHiveTable.
/**
* {@inheritDoc}
*/
@Override
public Table metacatToHiveTable(final TableDto dto) {
final Table table = new Table();
String tableName = "";
String databaseName = "";
final QualifiedName name = dto.getName();
if (name != null) {
tableName = name.getTableName();
databaseName = name.getDatabaseName();
}
table.setTableName(tableName);
table.setDbName(databaseName);
final StorageDto storageDto = dto.getSerde();
String owner = "";
if (storageDto != null && storageDto.getOwner() != null) {
owner = storageDto.getOwner();
}
table.setOwner(owner);
final AuditDto auditDto = dto.getAudit();
if (auditDto != null && auditDto.getCreatedDate() != null) {
table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
Map<String, String> params = new HashMap<>();
if (dto.getMetadata() != null) {
params = dto.getMetadata();
}
table.setParameters(params);
// TODO get this
table.setTableType("EXTERNAL_TABLE");
table.setSd(fromStorageDto(storageDto));
final StorageDescriptor sd = table.getSd();
final List<FieldDto> fields = dto.getFields();
if (fields == null) {
table.setPartitionKeys(Collections.emptyList());
sd.setCols(Collections.emptyList());
} else {
final List<FieldSchema> nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
final List<FieldSchema> partitionFields = Lists.newArrayListWithCapacity(fields.size());
for (FieldDto fieldDto : fields) {
final FieldSchema f = metacatToHiveField(fieldDto);
if (fieldDto.isPartition_key()) {
partitionFields.add(f);
} else {
nonPartitionFields.add(f);
}
}
table.setPartitionKeys(partitionFields);
sd.setCols(nonPartitionFields);
}
return table;
}
use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.
the class HiveConvertersImpl method hiveToMetacatField.
private FieldDto hiveToMetacatField(final FieldSchema field, final boolean isPartitionKey) {
final FieldDto dto = new FieldDto();
dto.setName(field.getName());
dto.setType(field.getType());
dto.setSource_type(field.getType());
dto.setComment(field.getComment());
dto.setPartition_key(isPartitionKey);
return dto;
}
use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.
the class HiveConvertersImpl method hiveToMetacatTable.
/**
* {@inheritDoc}
*/
@Override
public TableDto hiveToMetacatTable(final QualifiedName name, final Table table) {
final TableDto dto = new TableDto();
dto.setSerde(toStorageDto(table.getSd(), table.getOwner()));
dto.setAudit(new AuditDto());
dto.setName(name);
if (table.isSetCreateTime()) {
dto.getAudit().setCreatedDate(epochSecondsToDate(table.getCreateTime()));
}
dto.setMetadata(table.getParameters());
final List<FieldSchema> nonPartitionColumns = table.getSd().getCols();
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final List<FieldDto> allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream().map(field -> this.hiveToMetacatField(field, false)).forEachOrdered(allFields::add);
partitionColumns.stream().map(field -> this.hiveToMetacatField(field, true)).forEachOrdered(allFields::add);
dto.setFields(allFields);
return dto;
}
use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.
the class HiveConvertersImpl method metacatToHivePartition.
/**
* {@inheritDoc}
*/
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
final Partition result = new Partition();
final QualifiedName name = partitionDto.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
String databaseName = "";
String tableName = "";
if (name != null) {
if (name.getPartitionName() != null) {
for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
if (nameValues.size() != 2) {
throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName());
}
final String value = nameValues.get(1);
values.add(value);
}
}
if (name.getDatabaseName() != null) {
databaseName = name.getDatabaseName();
}
if (name.getTableName() != null) {
tableName = name.getTableName();
}
}
result.setValues(values);
result.setDbName(databaseName);
result.setTableName(tableName);
Map<String, String> metadata = partitionDto.getMetadata();
if (metadata == null) {
metadata = Maps.newHashMap();
}
result.setParameters(metadata);
result.setSd(fromStorageDto(partitionDto.getSerde()));
final StorageDescriptor sd = result.getSd();
if (tableDto != null) {
if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
}
final List<FieldDto> fields = tableDto.getFields();
if (fields == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(fields.stream().filter(field -> !field.isPartition_key()).map(this::metacatToHiveField).collect(Collectors.toList()));
}
}
final AuditDto auditDto = partitionDto.getAudit();
if (auditDto != null) {
if (auditDto.getCreatedDate() != null) {
result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
if (auditDto.getLastModifiedDate() != null) {
result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
}
}
return result;
}
Aggregations