Search in sources :

Example 1 with FieldDto

use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.

the class ConverterUtil method toTableDto.

/**
     * Converts from TableInfo to TableDto.
     *
     * @param tableInfo connector table info
     * @return table dto
     */
public TableDto toTableDto(final TableInfo tableInfo) {
    final TableDto result = this.mapper.map(tableInfo, TableDto.class);
    //TODO: Add this logic in the mapping
    final List<FieldDto> fields = result.getFields();
    if (fields != null) {
        int index = 0;
        for (final FieldDto field : fields) {
            field.setPos(index++);
        }
    }
    return result;
}
Also used : TableDto(com.netflix.metacat.common.dto.TableDto) FieldDto(com.netflix.metacat.common.dto.FieldDto)

Example 2 with FieldDto

use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.

the class HiveConvertersImpl method metacatToHiveTable.

/**
     * {@inheritDoc}
     */
@Override
public Table metacatToHiveTable(final TableDto dto) {
    final Table table = new Table();
    String tableName = "";
    String databaseName = "";
    final QualifiedName name = dto.getName();
    if (name != null) {
        tableName = name.getTableName();
        databaseName = name.getDatabaseName();
    }
    table.setTableName(tableName);
    table.setDbName(databaseName);
    final StorageDto storageDto = dto.getSerde();
    String owner = "";
    if (storageDto != null && storageDto.getOwner() != null) {
        owner = storageDto.getOwner();
    }
    table.setOwner(owner);
    final AuditDto auditDto = dto.getAudit();
    if (auditDto != null && auditDto.getCreatedDate() != null) {
        table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
    }
    Map<String, String> params = new HashMap<>();
    if (dto.getMetadata() != null) {
        params = dto.getMetadata();
    }
    table.setParameters(params);
    // TODO get this
    table.setTableType("EXTERNAL_TABLE");
    table.setSd(fromStorageDto(storageDto));
    final StorageDescriptor sd = table.getSd();
    final List<FieldDto> fields = dto.getFields();
    if (fields == null) {
        table.setPartitionKeys(Collections.emptyList());
        sd.setCols(Collections.emptyList());
    } else {
        final List<FieldSchema> nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
        final List<FieldSchema> partitionFields = Lists.newArrayListWithCapacity(fields.size());
        for (FieldDto fieldDto : fields) {
            final FieldSchema f = metacatToHiveField(fieldDto);
            if (fieldDto.isPartition_key()) {
                partitionFields.add(f);
            } else {
                nonPartitionFields.add(f);
            }
        }
        table.setPartitionKeys(partitionFields);
        sd.setCols(nonPartitionFields);
    }
    return table;
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) AuditDto(com.netflix.metacat.common.dto.AuditDto) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) QualifiedName(com.netflix.metacat.common.QualifiedName) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) StorageDto(com.netflix.metacat.common.dto.StorageDto) FieldDto(com.netflix.metacat.common.dto.FieldDto)

Example 3 with FieldDto

use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.

the class HiveConvertersImpl method hiveToMetacatField.

private FieldDto hiveToMetacatField(final FieldSchema field, final boolean isPartitionKey) {
    final FieldDto dto = new FieldDto();
    dto.setName(field.getName());
    dto.setType(field.getType());
    dto.setSource_type(field.getType());
    dto.setComment(field.getComment());
    dto.setPartition_key(isPartitionKey);
    return dto;
}
Also used : FieldDto(com.netflix.metacat.common.dto.FieldDto)

Example 4 with FieldDto

use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.

the class HiveConvertersImpl method hiveToMetacatTable.

/**
     * {@inheritDoc}
     */
@Override
public TableDto hiveToMetacatTable(final QualifiedName name, final Table table) {
    final TableDto dto = new TableDto();
    dto.setSerde(toStorageDto(table.getSd(), table.getOwner()));
    dto.setAudit(new AuditDto());
    dto.setName(name);
    if (table.isSetCreateTime()) {
        dto.getAudit().setCreatedDate(epochSecondsToDate(table.getCreateTime()));
    }
    dto.setMetadata(table.getParameters());
    final List<FieldSchema> nonPartitionColumns = table.getSd().getCols();
    final List<FieldSchema> partitionColumns = table.getPartitionKeys();
    final List<FieldDto> allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
    nonPartitionColumns.stream().map(field -> this.hiveToMetacatField(field, false)).forEachOrdered(allFields::add);
    partitionColumns.stream().map(field -> this.hiveToMetacatField(field, true)).forEachOrdered(allFields::add);
    dto.setFields(allFields);
    return dto;
}
Also used : TableDto(com.netflix.metacat.common.dto.TableDto) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Date(java.util.Date) HashMap(java.util.HashMap) DatabaseDto(com.netflix.metacat.common.dto.DatabaseDto) StorageDto(com.netflix.metacat.common.dto.StorageDto) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) Partition(org.apache.hadoop.hive.metastore.api.Partition) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) LinkedHashMap(java.util.LinkedHashMap) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) AuditDto(com.netflix.metacat.common.dto.AuditDto) Splitter(com.google.common.base.Splitter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nullable(javax.annotation.Nullable) QualifiedName(com.netflix.metacat.common.QualifiedName) FieldDto(com.netflix.metacat.common.dto.FieldDto) Instant(java.time.Instant) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) List(java.util.List) PartitionDto(com.netflix.metacat.common.dto.PartitionDto) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Database(org.apache.hadoop.hive.metastore.api.Database) Collections(java.util.Collections) AuditDto(com.netflix.metacat.common.dto.AuditDto) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) TableDto(com.netflix.metacat.common.dto.TableDto) FieldDto(com.netflix.metacat.common.dto.FieldDto)

Example 5 with FieldDto

use of com.netflix.metacat.common.dto.FieldDto in project metacat by Netflix.

the class HiveConvertersImpl method metacatToHivePartition.

/**
     * {@inheritDoc}
     */
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
    final Partition result = new Partition();
    final QualifiedName name = partitionDto.getName();
    final List<String> values = Lists.newArrayListWithCapacity(16);
    String databaseName = "";
    String tableName = "";
    if (name != null) {
        if (name.getPartitionName() != null) {
            for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) {
                final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
                if (nameValues.size() != 2) {
                    throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName());
                }
                final String value = nameValues.get(1);
                values.add(value);
            }
        }
        if (name.getDatabaseName() != null) {
            databaseName = name.getDatabaseName();
        }
        if (name.getTableName() != null) {
            tableName = name.getTableName();
        }
    }
    result.setValues(values);
    result.setDbName(databaseName);
    result.setTableName(tableName);
    Map<String, String> metadata = partitionDto.getMetadata();
    if (metadata == null) {
        metadata = Maps.newHashMap();
    }
    result.setParameters(metadata);
    result.setSd(fromStorageDto(partitionDto.getSerde()));
    final StorageDescriptor sd = result.getSd();
    if (tableDto != null) {
        if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
            sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
        }
        final List<FieldDto> fields = tableDto.getFields();
        if (fields == null) {
            sd.setCols(Collections.emptyList());
        } else {
            sd.setCols(fields.stream().filter(field -> !field.isPartition_key()).map(this::metacatToHiveField).collect(Collectors.toList()));
        }
    }
    final AuditDto auditDto = partitionDto.getAudit();
    if (auditDto != null) {
        if (auditDto.getCreatedDate() != null) {
            result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
        }
        if (auditDto.getLastModifiedDate() != null) {
            result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
        }
    }
    return result;
}
Also used : TableDto(com.netflix.metacat.common.dto.TableDto) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Date(java.util.Date) HashMap(java.util.HashMap) DatabaseDto(com.netflix.metacat.common.dto.DatabaseDto) StorageDto(com.netflix.metacat.common.dto.StorageDto) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) Partition(org.apache.hadoop.hive.metastore.api.Partition) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) LinkedHashMap(java.util.LinkedHashMap) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) AuditDto(com.netflix.metacat.common.dto.AuditDto) Splitter(com.google.common.base.Splitter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nullable(javax.annotation.Nullable) QualifiedName(com.netflix.metacat.common.QualifiedName) FieldDto(com.netflix.metacat.common.dto.FieldDto) Instant(java.time.Instant) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) List(java.util.List) PartitionDto(com.netflix.metacat.common.dto.PartitionDto) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Database(org.apache.hadoop.hive.metastore.api.Database) Collections(java.util.Collections) Partition(org.apache.hadoop.hive.metastore.api.Partition) AuditDto(com.netflix.metacat.common.dto.AuditDto) QualifiedName(com.netflix.metacat.common.QualifiedName) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) FieldDto(com.netflix.metacat.common.dto.FieldDto)

Aggregations

FieldDto (com.netflix.metacat.common.dto.FieldDto)6 QualifiedName (com.netflix.metacat.common.QualifiedName)3 AuditDto (com.netflix.metacat.common.dto.AuditDto)3 StorageDto (com.netflix.metacat.common.dto.StorageDto)3 TableDto (com.netflix.metacat.common.dto.TableDto)3 HashMap (java.util.HashMap)3 LinkedHashMap (java.util.LinkedHashMap)3 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)3 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)3 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)3 Table (org.apache.hadoop.hive.metastore.api.Table)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 Splitter (com.google.common.base.Splitter)2 Strings (com.google.common.base.Strings)2 ImmutableList (com.google.common.collect.ImmutableList)2 Lists (com.google.common.collect.Lists)2 Maps (com.google.common.collect.Maps)2 DatabaseDto (com.netflix.metacat.common.dto.DatabaseDto)2 PartitionDto (com.netflix.metacat.common.dto.PartitionDto)2 Instant (java.time.Instant)2