use of com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* {@inheritDoc}.
*/
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
// New partitions
final List<Partition> hivePartitions = Lists.newArrayList();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
// Existing partitions
final List<Partition> existingHivePartitions = Lists.newArrayList();
// Existing partition map
Map<String, Partition> existingPartitionMap = Collections.emptyMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionInfos.stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final Partition hivePartition = existingPartitionMap.get(partitionName);
if (hivePartition == null) {
addedPartitionIds.add(partitionName);
hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
} else {
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
existingPartitionIds.add(partitionName);
existingPartition.setParameters(hivePartition.getParameters());
existingPartition.setCreateTime(hivePartition.getCreateTime());
existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
existingHivePartitions.add(existingPartition);
}
}
}
final Set<String> deletePartitionIds = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionIds.addAll(existingPartitionIds);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
copyTableSdToPartitionSd(existingHivePartitions, table);
metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
}
copyTableSdToPartitionSd(hivePartitions, table);
metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionIds);
result.setUpdated(existingPartitionIds);
return result;
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableName, "", exception);
} else {
throw new TableNotFoundException(tableName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
final List<String> ids = getFakePartitionName(hivePartitions);
throw new PartitionAlreadyExistsException(tableName, ids, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest in project metacat by Netflix.
the class S3ConnectorPartitionService method savePartitions.
@Override
public PartitionsSaveResponse savePartitions(@Nonnull final ConnectorContext context, @Nonnull final QualifiedName tableName, @Nonnull final PartitionsSaveRequest partitionsSaveRequest) {
log.debug("Start: Save partitions for table {}", tableName);
// Table
final Table table = getTable(tableName);
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
//
Map<String, Partition> existingPartitionMap = Maps.newHashMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionsSaveRequest.getPartitions().stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, infoConverter.partitionKeys(table));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames);
}
// New partitions
final List<Partition> s3Partitions = Lists.newArrayList();
for (PartitionInfo partition : partitionsSaveRequest.getPartitions()) {
final String partitionName = partition.getName().getPartitionName();
final Partition s3Partition = existingPartitionMap.get(partitionName);
if (s3Partition == null) {
addedPartitionIds.add(partitionName);
s3Partitions.add(infoConverter.toPartition(table, partition));
} else {
final String partitionUri = infoConverter.getUri(partition);
final String s3PartitionUri = s3Partition.getUri();
if (partitionUri != null && !partitionUri.equals(s3PartitionUri)) {
s3Partition.setUri(partitionUri);
existingPartitionIds.add(partitionName);
s3Partitions.add(s3Partition);
}
}
}
final List<String> partitionIdsForDeletes = partitionsSaveRequest.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(), partitionIdsForDeletes);
}
partitionDao.save(s3Partitions);
log.debug("End: Save partitions for table {}", tableName);
return PartitionsSaveResponse.builder().added(addedPartitionIds).updated(existingPartitionIds).build();
}
use of com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest in project metacat by Netflix.
the class S3ConnectorPartitionService method savePartitions.
@Override
public PartitionsSaveResponse savePartitions(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName tableName, @Nonnull final PartitionsSaveRequest partitionsSaveRequest) {
log.debug("Start: Save partitions for table {}", tableName);
// Table
final Table table = getTable(tableName);
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
//
Map<String, Partition> existingPartitionMap = Maps.newHashMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionsSaveRequest.getPartitions().stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, infoConverter.partitionKeys(table));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames);
}
// New partitions
final List<Partition> s3Partitions = Lists.newArrayList();
for (PartitionInfo partition : partitionsSaveRequest.getPartitions()) {
final String partitionName = partition.getName().getPartitionName();
final Partition s3Partition = existingPartitionMap.get(partitionName);
if (s3Partition == null) {
addedPartitionIds.add(partitionName);
s3Partitions.add(infoConverter.toPartition(table, partition));
} else {
final String partitionUri = infoConverter.getUri(partition);
final String s3PartitionUri = s3Partition.getUri();
if (partitionUri != null && !partitionUri.equals(s3PartitionUri)) {
s3Partition.setUri(partitionUri);
existingPartitionIds.add(partitionName);
s3Partitions.add(s3Partition);
}
}
}
final List<String> partitionIdsForDeletes = partitionsSaveRequest.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(), partitionIdsForDeletes);
}
partitionDao.save(s3Partitions);
log.debug("End: Save partitions for table {}", tableName);
return PartitionsSaveResponse.builder().added(addedPartitionIds).updated(existingPartitionIds).build();
}
use of com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* By default(checkIfExists=true and aletrIfExists=false), this method adds the provided list of partitions.
* If a partition already exists, it is dropped first before adding it.
* If checkIfExists=false, the method adds the partitions to the table. If a partition already exists,
* an AlreadyExistsException error is thrown.
* If alterIfExists=true, the method updates existing partitions and adds non-existant partitions.
* If a partition in the provided partition list has all the details, then it is used. If the details are missing,
* then the table details are inherited. This is mostly for the storage information.
*/
@Override
public PartitionsSaveResponse savePartitions(final ConnectorRequestContext requestContext, final QualifiedName tableQName, final PartitionsSaveRequest partitionsSaveRequest) {
final String databaseName = tableQName.getDatabaseName();
final String tableName = tableQName.getTableName();
final Table table;
try {
table = metacatHiveClient.getTableByName(databaseName, tableName);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableQName, exception);
} catch (TException e) {
throw new ConnectorException(String.format("Failed getting hive table %s", tableQName), e);
}
// New partitions
final List<PartitionInfo> addedPartitionInfos = Lists.newArrayList();
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
final List<String> partitionNames = partitionInfos.stream().map(part -> {
final String partitionName = part.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
// New partition names
final List<String> addedPartitionNames = Lists.newArrayList();
// Updated partition names
final List<String> existingPartitionNames = Lists.newArrayList();
// Existing partitions
final List<PartitionHolder> existingPartitionHolders = Lists.newArrayList();
// Existing partition map
Map<String, PartitionHolder> existingPartitionMap = Collections.emptyMap();
//
if (partitionsSaveRequest.getCheckIfExists() || partitionsSaveRequest.getAlterIfExists()) {
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final PartitionHolder existingPartitionHolder = existingPartitionMap.get(partitionName);
if (existingPartitionHolder == null) {
addedPartitionNames.add(partitionName);
addedPartitionInfos.add(partitionInfo);
} else {
final String partitionUri = partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
final String existingPartitionUri = getPartitionUri(existingPartitionHolder);
if (partitionUri == null || !partitionUri.equals(existingPartitionUri)) {
existingPartitionNames.add(partitionName);
// We need to copy the existing partition info and
if (partitionInfo.getSerde() == null) {
partitionInfo.setSerde(new StorageInfo());
}
if (partitionInfo.getAudit() == null) {
partitionInfo.setAudit(new AuditInfo());
}
if (StringUtils.isBlank(partitionUri)) {
partitionInfo.getSerde().setUri(existingPartitionUri);
}
// unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
if (existingPartitionHolder.getPartition() != null) {
final Partition existingPartition = existingPartitionHolder.getPartition();
partitionInfo.getSerde().setParameters(existingPartition.getParameters());
partitionInfo.getAudit().setCreatedDate(HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getCreateTime()));
partitionInfo.getAudit().setLastModifiedDate(HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getLastAccessTime()));
} else {
final PartitionInfo existingPartitionInfo = existingPartitionHolder.getPartitionInfo();
if (existingPartitionInfo.getSerde() != null) {
partitionInfo.getSerde().setParameters(existingPartitionInfo.getSerde().getParameters());
}
if (existingPartitionInfo.getAudit() != null) {
partitionInfo.getAudit().setCreatedDate(existingPartitionInfo.getAudit().getCreatedDate());
partitionInfo.getAudit().setLastModifiedDate(existingPartitionInfo.getAudit().getLastModifiedDate());
}
}
existingPartitionHolder.setPartitionInfo(partitionInfo);
existingPartitionHolders.add(existingPartitionHolder);
} else {
addedPartitionInfos.add(partitionInfo);
}
}
}
}
final Set<String> deletePartitionNames = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionNames.addAll(existingPartitionNames);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionNames.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos, existingPartitionHolders, deletePartitionNames);
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionNames);
result.setUpdated(existingPartitionNames);
return result;
}
Aggregations