use of com.netflix.metacat.common.dto.TableDto in project metacat by Netflix.
the class CatalogThriftHiveMetastore method get_partitions_by_filter.
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_by_filter(final String dbName, final String tblName, final String filter, final short maxParts) throws TException {
return requestWrapper("get_partitions_by_filter", new Object[] { dbName, tblName, filter, maxParts }, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
final List<PartitionDto> metacatPartitions = partV1.getPartitions(catalogName, dbName, tblName, filter, null, null, null, maxValues, false);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
});
}
use of com.netflix.metacat.common.dto.TableDto in project metacat by Netflix.
the class CatalogThriftHiveMetastore method rename_partition.
/**
* {@inheritDoc}
*/
@Override
public void rename_partition(final String dbName, final String tblName, final List<String> partVals, final Partition newPart) throws TException {
requestWrapper("rename_partition", new Object[] { dbName, tblName, partVals }, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
final PartitionsSaveRequestDto partitionsSaveRequestDto = new PartitionsSaveRequestDto();
final PartitionDto partitionDto = hiveConverters.hiveToMetacatPartition(tableDto, newPart);
partitionsSaveRequestDto.setPartitions(Lists.newArrayList(partitionDto));
partitionsSaveRequestDto.setPartitionIdsForDeletes(Lists.newArrayList(partName));
partV1.savePartitions(catalogName, dbName, tblName, partitionsSaveRequestDto);
return null;
});
}
use of com.netflix.metacat.common.dto.TableDto in project metacat by Netflix.
the class CatalogThriftHiveMetastore method append_partition_with_environment_context.
/**
* {@inheritDoc}
*/
@Override
public Partition append_partition_with_environment_context(final String dbName, final String tblName, final List<String> partVals, @Nullable final EnvironmentContext environmentContext) throws TException {
return requestWrapper("append_partition_by_name_with_environment_context", new Object[] { dbName, tblName, partVals }, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
appendPartitionsCore(dbName, tblName, partName);
return hiveConverters.metacatToHivePartition(getPartitionDtoByName(tableDto, partName), tableDto);
});
}
use of com.netflix.metacat.common.dto.TableDto in project metacat by Netflix.
the class HiveConvertersImpl method metacatToHivePartition.
/**
* {@inheritDoc}
*/
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
final Partition result = new Partition();
final QualifiedName name = partitionDto.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
String databaseName = "";
String tableName = "";
if (name != null) {
if (name.getPartitionName() != null) {
for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
if (nameValues.size() != 2) {
throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName());
}
final String value = nameValues.get(1);
values.add(value);
}
}
if (name.getDatabaseName() != null) {
databaseName = name.getDatabaseName();
}
if (name.getTableName() != null) {
tableName = name.getTableName();
}
}
result.setValues(values);
result.setDbName(databaseName);
result.setTableName(tableName);
Map<String, String> metadata = partitionDto.getMetadata();
if (metadata == null) {
metadata = Maps.newHashMap();
}
result.setParameters(metadata);
result.setSd(fromStorageDto(partitionDto.getSerde()));
final StorageDescriptor sd = result.getSd();
if (tableDto != null) {
if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
}
final List<FieldDto> fields = tableDto.getFields();
if (fields == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(fields.stream().filter(field -> !field.isPartition_key()).map(this::metacatToHiveField).collect(Collectors.toList()));
}
}
final AuditDto auditDto = partitionDto.getAudit();
if (auditDto != null) {
if (auditDto.getCreatedDate() != null) {
result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
if (auditDto.getLastModifiedDate() != null) {
result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
}
}
return result;
}
use of com.netflix.metacat.common.dto.TableDto in project metacat by Netflix.
the class HiveConvertersImpl method hiveToMetacatTable.
/**
* {@inheritDoc}
*/
@Override
public TableDto hiveToMetacatTable(final QualifiedName name, final Table table) {
final TableDto dto = new TableDto();
dto.setSerde(toStorageDto(table.getSd(), table.getOwner()));
dto.setAudit(new AuditDto());
dto.setName(name);
if (table.isSetCreateTime()) {
dto.getAudit().setCreatedDate(epochSecondsToDate(table.getCreateTime()));
}
dto.setMetadata(table.getParameters());
final List<FieldSchema> nonPartitionColumns = table.getSd().getCols();
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final List<FieldDto> allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream().map(field -> this.hiveToMetacatField(field, false)).forEachOrdered(allFields::add);
partitionColumns.stream().map(field -> this.hiveToMetacatField(field, true)).forEachOrdered(allFields::add);
dto.setFields(allFields);
dto.setView(new ViewDto(table.getViewOriginalText(), table.getViewExpandedText()));
return dto;
}
Aggregations