use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class MysqlUserMetadataService method deleteMetadatas.
@Override
public void deleteMetadatas(final String userId, final List<HasMetadata> holders) {
try {
final Connection conn = poolingDataSource.getConnection();
try {
final List<List<HasMetadata>> subLists = Lists.partition(holders, config.getUserMetadataMaxInClauseItems());
for (List<HasMetadata> hasMetadatas : subLists) {
final List<QualifiedName> names = hasMetadatas.stream().filter(m -> m instanceof HasDefinitionMetadata).map(m -> ((HasDefinitionMetadata) m).getDefinitionName()).collect(Collectors.toList());
if (!names.isEmpty()) {
_deleteDefinitionMetadatas(conn, names);
}
if (config.canSoftDeleteDataMetadata()) {
final List<String> uris = hasMetadatas.stream().filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal()).map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
if (!uris.isEmpty()) {
_softDeleteDataMetadatas(conn, userId, uris);
}
}
}
conn.commit();
} catch (SQLException e) {
conn.rollback();
throw e;
} finally {
conn.close();
}
} catch (SQLException e) {
log.error("Sql exception", e);
throw new UserMetadataServiceException("Failed deleting data metadata", e);
}
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class HiveConvertersImpl method hiveToMetacatTable.
/**
* {@inheritDoc}
*/
@Override
public TableDto hiveToMetacatTable(final QualifiedName name, final Table table) {
final TableDto dto = new TableDto();
dto.setSerde(toStorageDto(table.getSd(), table.getOwner()));
dto.setAudit(new AuditDto());
dto.setName(name);
if (table.isSetCreateTime()) {
dto.getAudit().setCreatedDate(epochSecondsToDate(table.getCreateTime()));
}
dto.setMetadata(table.getParameters());
final List<FieldSchema> nonPartitionColumns = table.getSd().getCols();
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final List<FieldDto> allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream().map(field -> this.hiveToMetacatField(field, false)).forEachOrdered(allFields::add);
partitionColumns.stream().map(field -> this.hiveToMetacatField(field, true)).forEachOrdered(allFields::add);
dto.setFields(allFields);
return dto;
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class HiveConvertersImpl method hiveToMetacatPartition.
/**
* {@inheritDoc}
*/
@Override
public PartitionDto hiveToMetacatPartition(final TableDto tableDto, final Partition partition) {
final QualifiedName tableName = tableDto.getName();
final QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), getNameFromPartVals(tableDto, partition.getValues()));
final PartitionDto result = new PartitionDto();
String owner = "";
if (tableDto.getSerde() != null) {
owner = tableDto.getSerde().getOwner();
}
result.setSerde(toStorageDto(partition.getSd(), owner));
result.setMetadata(partition.getParameters());
final AuditDto auditDto = new AuditDto();
auditDto.setCreatedDate(epochSecondsToDate(partition.getCreateTime()));
auditDto.setLastModifiedDate(epochSecondsToDate(partition.getLastAccessTime()));
result.setAudit(auditDto);
result.setName(partitionName);
return result;
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class HiveConvertersImpl method metacatToHivePartition.
/**
* {@inheritDoc}
*/
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
final Partition result = new Partition();
final QualifiedName name = partitionDto.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
String databaseName = "";
String tableName = "";
if (name != null) {
if (name.getPartitionName() != null) {
for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
if (nameValues.size() != 2) {
throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName());
}
final String value = nameValues.get(1);
values.add(value);
}
}
if (name.getDatabaseName() != null) {
databaseName = name.getDatabaseName();
}
if (name.getTableName() != null) {
tableName = name.getTableName();
}
}
result.setValues(values);
result.setDbName(databaseName);
result.setTableName(tableName);
Map<String, String> metadata = partitionDto.getMetadata();
if (metadata == null) {
metadata = Maps.newHashMap();
}
result.setParameters(metadata);
result.setSd(fromStorageDto(partitionDto.getSerde()));
final StorageDescriptor sd = result.getSd();
if (tableDto != null) {
if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
}
final List<FieldDto> fields = tableDto.getFields();
if (fields == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(fields.stream().filter(field -> !field.isPartition_key()).map(this::metacatToHiveField).collect(Collectors.toList()));
}
}
final AuditDto auditDto = partitionDto.getAudit();
if (auditDto != null) {
if (auditDto.getCreatedDate() != null) {
result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
if (auditDto.getLastModifiedDate() != null) {
result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
}
}
return result;
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class CatalogThriftHiveMetastore method drop_partition_with_environment_context.
/**
* {@inheritDoc}
*/
@Override
public boolean drop_partition_with_environment_context(final String dbName, final String tblName, final List<String> partVals, final boolean deleteData, @Nullable final EnvironmentContext environmentContext) throws TException {
return requestWrapper("drop_partition_with_environment_context", new Object[] { dbName, tblName, partVals, deleteData, environmentContext }, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
final QualifiedName partitionName = getPartitionDtoByName(tableDto, partName).getName();
if (deleteData) {
log.warn("Ignoring command to delete data for {}/{}/{}/{}", partitionName);
}
partV1.deletePartitions(catalogName, tableDto.getName().getDatabaseName(), tableDto.getName().getTableName(), ImmutableList.of(partitionName.getPartitionName()));
return true;
});
}
Aggregations