use of com.netflix.metacat.connector.s3.model.Partition in project metacat by Netflix.
the class S3ConnectorPartitionService method update.
@Override
public void update(@Nonnull final ConnectorContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Update partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(), name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (partitions.isEmpty()) {
throw new PartitionNotFoundException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.fromPartitionInfo(partitionInfo));
log.debug("End: Update partition {}", name);
}
use of com.netflix.metacat.connector.s3.model.Partition in project metacat by Netflix.
the class S3ConnectorInfoConverter method fromPartitionInfo.
/**
* Converts from partition info to s3 partition object.
* @param partitionInfo partition info
* @return s3 partition
*/
Partition fromPartitionInfo(final PartitionInfo partitionInfo) {
final Partition result = new Partition();
result.setName(partitionInfo.getName().getPartitionName());
result.setUri(partitionInfo.getSerde().getUri());
final AuditInfo auditInfo = partitionInfo.getAudit();
if (auditInfo != null) {
result.setCreatedDate(auditInfo.getCreatedDate());
result.setLastUpdatedDate(auditInfo.getLastModifiedDate());
}
return result;
}
use of com.netflix.metacat.connector.s3.model.Partition in project metacat by Netflix.
the class S3ConnectorInfoConverter method partitionKeys.
/**
* Gets the partition keys for the given table.
* @param table table info
* @return list of keys
*/
public List<String> partitionKeys(final Table table) {
List<String> result = Lists.newArrayList();
if (table.getLocation() != null) {
final Schema schema = table.getLocation().getSchema();
if (schema != null) {
final List<Field> fields = schema.getFields();
result = fields.stream().filter(Field::isPartitionKey).map(Field::getName).collect(Collectors.toList());
}
}
return result;
}
use of com.netflix.metacat.connector.s3.model.Partition in project metacat by Netflix.
the class S3ConnectorPartitionService method create.
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Create partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(), name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (!partitions.isEmpty()) {
throw new PartitionAlreadyExistsException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.toPartition(table, partitionInfo));
log.debug("End: Create partition {}", name);
}
use of com.netflix.metacat.connector.s3.model.Partition in project metacat by Netflix.
the class S3ConnectorPartitionService method _getConnectorPartitions.
@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getConnectorPartitions(final QualifiedName tableName, final String filterExpression, final List<String> partitionIds, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
// Support for dateCreated
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
String dateCreatedSqlCriteria = null;
if (hasDateCreated) {
dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
}
// Table
final Table table = getTable(tableName);
final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
final List<Partition> partitions = partitionDao.getPartitions(table.getId(), partitionIds, singlePartitionExprs, dateCreatedSqlCriteria, sort, Strings.isNullOrEmpty(filterExpression) ? pageable : null);
final FilterPartition filter = new FilterPartition();
List<PartitionInfo> result = partitions.stream().filter(partition -> {
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, (partition.getCreatedDate().getTime() / 1000) + "");
}
return Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, partition.getName(), partition.getUri(), isBatched, values);
}).map(partition -> infoConverter.toPartitionInfo(tableName, table, partition)).collect(Collectors.toList());
//
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
int limit = pageable.getOffset() + pageable.getLimit();
if (result.size() < limit) {
limit = result.size();
}
if (pageable.getOffset() > limit) {
result = Lists.newArrayList();
} else {
result = result.subList(pageable.getOffset(), limit);
}
}
return result;
}
Aggregations