use of com.netflix.metacat.common.dto.HasMetadata in project metacat by Netflix.
the class PartitionServiceImpl method save.
@Override
public PartitionsSaveResponseDto save(@Nonnull final QualifiedName name, final PartitionsSaveRequestDto dto) {
PartitionsSaveResponseDto result = new PartitionsSaveResponseDto();
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final List<PartitionDto> partitionDtos = dto.getPartitions();
// If no partitions are passed, then return
if (partitionDtos == null || partitionDtos.isEmpty()) {
return result;
}
final List<String> partitionIdsForDeletes = dto.getPartitionIdsForDeletes();
registry.gauge(this.partitionAddedCountId.withTags(new HashMap<>(name.parts())), partitionDtos.size());
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
List<HasMetadata> deletePartitions = Lists.newArrayList();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.postSync(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
registry.gauge(this.partitionDeletedCountId.withTags(new HashMap<>(name.parts())), partitionIdsForDeletes.size());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setIncludePartitionDetails(false);
requestDto.setPartitionNames(partitionIdsForDeletes);
final List<PartitionInfo> deletePartitionInfos = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, null, null));
if (deletePartitionInfos != null) {
deletePartitions = deletePartitionInfos.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
}
}
//
// Save all the new and updated partitions
//
eventBus.postSync(new MetacatSaveTablePartitionPreEvent(name, metacatRequestContext, this, dto));
log.info("Saving partitions({}) for {}", partitionDtos.size(), name);
result = converterUtil.toPartitionsSaveResponseDto(service.savePartitions(connectorContext, name, converterUtil.toPartitionsSaveRequest(dto)));
// Save metadata
log.info("Saving user metadata for partitions for {}", name);
// delete metadata
if (!deletePartitions.isEmpty()) {
log.info("Deleting user metadata for partitions with names {} for {}", partitionIdsForDeletes, name);
deleteMetadatas(metacatRequestContext.getUserName(), deletePartitions);
}
userMetadataService.saveMetadatas(metacatRequestContext.getUserName(), partitionDtos, true);
eventBus.postAsync(new MetacatSaveTablePartitionPostEvent(name, metacatRequestContext, this, partitionDtos, result));
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.postAsync(new MetacatDeleteTablePartitionPostEvent(name, metacatRequestContext, this, partitionIdsForDeletes));
}
return result;
}
use of com.netflix.metacat.common.dto.HasMetadata in project metacat by Netflix.
the class PartitionServiceImpl method delete.
@Override
public void delete(final QualifiedName name, final List<String> partitionIds) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
registry.gauge(this.partitionDeletedCountId.withTags(new HashMap<>(name.parts())), partitionIds.size());
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
if (!partitionIds.isEmpty()) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitionIdsForDeletes(partitionIds);
eventBus.postSync(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
// Get the partitions before calling delete
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setIncludePartitionDetails(false);
requestDto.setPartitionNames(partitionIds);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> partitionInfos = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, null, null));
List<HasMetadata> partitions = Lists.newArrayList();
if (partitionInfos != null) {
partitions = partitionInfos.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
}
log.info("Deleting partitions with names {} for {}", partitionIds, name);
service.deletePartitions(connectorContext, name, partitionIds);
// delete metadata
log.info("Deleting user metadata for partitions with names {} for {}", partitionIds, name);
if (!partitions.isEmpty()) {
deleteMetadatas(metacatRequestContext.getUserName(), partitions);
}
eventBus.postAsync(new MetacatDeleteTablePartitionPostEvent(name, metacatRequestContext, this, partitionIds));
}
}
use of com.netflix.metacat.common.dto.HasMetadata in project metacat by Netflix.
the class MysqlUserMetadataService method saveMetadatas.
@Override
public void saveMetadatas(final String user, final List<? extends HasMetadata> metadatas, final boolean merge) {
try {
final Connection conn = poolingDataSource.getConnection();
try {
@SuppressWarnings("unchecked") final List<List<HasMetadata>> subLists = Lists.partition((List<HasMetadata>) metadatas, config.getUserMetadataMaxInClauseItems());
for (List<HasMetadata> hasMetadatas : subLists) {
final List<String> uris = Lists.newArrayList();
final List<QualifiedName> names = Lists.newArrayList();
// Get the names and uris
final List<HasDefinitionMetadata> definitionMetadatas = Lists.newArrayList();
final List<HasDataMetadata> dataMetadatas = Lists.newArrayList();
hasMetadatas.stream().forEach(hasMetadata -> {
if (hasMetadata instanceof HasDefinitionMetadata) {
final HasDefinitionMetadata oDef = (HasDefinitionMetadata) hasMetadata;
names.add(oDef.getDefinitionName());
if (oDef.getDefinitionMetadata() != null) {
definitionMetadatas.add(oDef);
}
}
if (hasMetadata instanceof HasDataMetadata) {
final HasDataMetadata oData = (HasDataMetadata) hasMetadata;
if (oData.isDataExternal() && oData.getDataMetadata() != null && oData.getDataMetadata().size() > 0) {
uris.add(oData.getDataUri());
dataMetadatas.add(oData);
}
}
});
if (!definitionMetadatas.isEmpty() || !dataMetadatas.isEmpty()) {
// Get the existing metadata based on the names and uris
final Map<String, ObjectNode> definitionMap = getDefinitionMetadataMap(names);
final Map<String, ObjectNode> dataMap = getDataMetadataMap(uris);
// Curate the list of existing and new metadatas
final List<Object[]> insertDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> updateDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> insertDataMetadatas = Lists.newArrayList();
final List<Object[]> updateDataMetadatas = Lists.newArrayList();
definitionMetadatas.stream().forEach(oDef -> {
final QualifiedName qualifiedName = oDef.getDefinitionName();
if (qualifiedName != null && oDef.getDefinitionMetadata() != null && oDef.getDefinitionMetadata().size() != 0) {
final String name = qualifiedName.toString();
final ObjectNode oNode = definitionMap.get(name);
if (oNode == null) {
insertDefinitionMetadatas.add(new Object[] { metacatJson.toJsonString(oDef.getDefinitionMetadata()), user, user, name });
} else {
metacatJson.mergeIntoPrimary(oNode, oDef.getDefinitionMetadata());
updateDefinitionMetadatas.add(new Object[] { metacatJson.toJsonString(oNode), user, name });
}
}
});
dataMetadatas.stream().forEach(oData -> {
final String uri = oData.getDataUri();
final ObjectNode oNode = dataMap.get(uri);
if (oData.getDataMetadata() != null && oData.getDataMetadata().size() != 0) {
if (oNode == null) {
insertDataMetadatas.add(new Object[] { metacatJson.toJsonString(oData.getDataMetadata()), user, user, uri });
} else {
metacatJson.mergeIntoPrimary(oNode, oData.getDataMetadata());
updateDataMetadatas.add(new Object[] { metacatJson.toJsonString(oNode), user, uri });
}
}
});
//Now run the queries
final QueryRunner runner = new QueryRunner();
if (!insertDefinitionMetadatas.isEmpty()) {
runner.batch(conn, SQL.INSERT_DEFINITION_METADATA, insertDefinitionMetadatas.toArray(new Object[insertDefinitionMetadatas.size()][4]));
}
if (!updateDefinitionMetadatas.isEmpty()) {
runner.batch(conn, SQL.UPDATE_DEFINITION_METADATA, updateDefinitionMetadatas.toArray(new Object[updateDefinitionMetadatas.size()][3]));
}
if (!insertDataMetadatas.isEmpty()) {
runner.batch(conn, SQL.INSERT_DATA_METADATA, insertDataMetadatas.toArray(new Object[insertDataMetadatas.size()][4]));
}
if (!updateDataMetadatas.isEmpty()) {
runner.batch(conn, SQL.UPDATE_DATA_METADATA, updateDataMetadatas.toArray(new Object[updateDataMetadatas.size()][3]));
}
}
}
conn.commit();
} catch (SQLException e) {
conn.rollback();
throw e;
} finally {
conn.close();
}
} catch (SQLException e) {
log.error("Sql exception", e);
throw new UserMetadataServiceException("Failed to save metadata", e);
}
}
use of com.netflix.metacat.common.dto.HasMetadata in project metacat by Netflix.
the class MysqlUserMetadataService method deleteMetadatas.
@Override
public void deleteMetadatas(final String userId, final List<HasMetadata> holders) {
try {
final Connection conn = poolingDataSource.getConnection();
try {
final List<List<HasMetadata>> subLists = Lists.partition(holders, config.getUserMetadataMaxInClauseItems());
for (List<HasMetadata> hasMetadatas : subLists) {
final List<QualifiedName> names = hasMetadatas.stream().filter(m -> m instanceof HasDefinitionMetadata).map(m -> ((HasDefinitionMetadata) m).getDefinitionName()).collect(Collectors.toList());
if (!names.isEmpty()) {
_deleteDefinitionMetadatas(conn, names);
}
if (config.canSoftDeleteDataMetadata()) {
final List<String> uris = hasMetadatas.stream().filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal()).map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
if (!uris.isEmpty()) {
_softDeleteDataMetadatas(conn, userId, uris);
}
}
}
conn.commit();
} catch (SQLException e) {
conn.rollback();
throw e;
} finally {
conn.close();
}
} catch (SQLException e) {
log.error("Sql exception", e);
throw new UserMetadataServiceException("Failed deleting data metadata", e);
}
}
Aggregations