use of io.hops.hopsworks.common.dao.kafka.TopicDTO in project hopsworks by logicalclocks.
the class KafkaServingHelper method setupKafkaTopic.
private ProjectTopics setupKafkaTopic(Project project, ServingWrapper servingWrapper) throws KafkaException, UserException, ProjectException, InterruptedException, ExecutionException {
try {
// Check that the user is not trying to create a topic with more replicas than brokers.
if (servingWrapper.getKafkaTopicDTO().getNumOfReplicas() != null && (servingWrapper.getKafkaTopicDTO().getNumOfReplicas() <= 0 || servingWrapper.getKafkaTopicDTO().getNumOfReplicas() > kafkaBrokers.getBrokerEndpoints().size())) {
throw new KafkaException(RESTCodes.KafkaErrorCode.TOPIC_REPLICATION_ERROR, Level.FINE);
} else if (servingWrapper.getKafkaTopicDTO().getNumOfReplicas() == null) {
// set default value
servingWrapper.getKafkaTopicDTO().setNumOfReplicas(settings.getKafkaDefaultNumReplicas());
}
} catch (IOException | KeeperException | InterruptedException e) {
throw new KafkaException(RESTCodes.KafkaErrorCode.BROKER_METADATA_ERROR, Level.SEVERE, "", e.getMessage(), e);
}
// Check that the user is not trying to create a topic with negative partitions
if (servingWrapper.getKafkaTopicDTO().getNumOfPartitions() != null && servingWrapper.getKafkaTopicDTO().getNumOfPartitions() <= 0) {
throw new KafkaException(RESTCodes.KafkaErrorCode.BAD_NUM_PARTITION, Level.FINE, "less than 0");
} else if (servingWrapper.getKafkaTopicDTO().getNumOfPartitions() == null) {
// set default value
servingWrapper.getKafkaTopicDTO().setNumOfPartitions(settings.getKafkaDefaultNumPartitions());
}
String servingTopicName = getServingTopicName(servingWrapper);
TopicDTO topicDTO = new TopicDTO(servingTopicName, servingWrapper.getKafkaTopicDTO().getNumOfReplicas(), servingWrapper.getKafkaTopicDTO().getNumOfPartitions(), Settings.INFERENCE_SCHEMANAME, Settings.INFERENCE_SCHEMAVERSION);
ProjectTopics pt = kafkaController.createTopicInProject(project, topicDTO);
// Add the ACLs for this topic. By default all users should be able to do everything
for (ProjectTeam projectTeam : project.getProjectTeamCollection()) {
AclDTO aclDto = new AclDTO(project.getName(), projectTeam.getUser().getEmail(), "allow", Settings.KAFKA_ACL_WILDCARD, Settings.KAFKA_ACL_WILDCARD, Settings.KAFKA_ACL_WILDCARD);
kafkaController.addAclsToTopic(topicDTO.getName(), project.getId(), aclDto);
}
return pt;
}
use of io.hops.hopsworks.common.dao.kafka.TopicDTO in project hopsworks by logicalclocks.
the class KafkaResource method getTopics.
@ApiOperation(value = "Retrieve Kafka topics metadata .")
@GET
@Path("/topics")
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_OWNER, AllowedProjectRoles.DATA_SCIENTIST })
@JWTRequired(acceptedTokens = { Audience.API, Audience.JOB }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER", "HOPS_SERVICE_USER" })
@ApiKeyRequired(acceptedScopes = { ApiScope.KAFKA }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER", "HOPS_SERVICE_USER" })
public Response getTopics(@Context UriInfo uriInfo, @BeanParam Pagination pagination, @BeanParam TopicsBeanParam topicsBeanParam, @Context HttpServletRequest req, @Context SecurityContext sc) {
ResourceRequest resourceRequest = new ResourceRequest(ResourceRequest.Name.KAFKA);
resourceRequest.setOffset(pagination.getOffset());
resourceRequest.setLimit(pagination.getLimit());
resourceRequest.setSort(topicsBeanParam.getSortBySet());
resourceRequest.setFilter(topicsBeanParam.getFilter());
TopicDTO dto = topicsBuilder.build(uriInfo, resourceRequest, project);
return Response.ok().entity(dto).build();
}
use of io.hops.hopsworks.common.dao.kafka.TopicDTO in project hopsworks by logicalclocks.
the class TopicsBuilder method sortTopics.
@Override
protected List<TopicDTO> sortTopics(List<TopicDTO> list, Set<? extends AbstractFacade.SortBy> sortBySet) {
Iterator<? extends AbstractFacade.SortBy> it = sortBySet.iterator();
Comparator<TopicDTO> comparator = null;
while (it.hasNext()) {
AbstractFacade.SortBy sort = it.next();
Comparator order = sort.getParam().getValue().equals("DESC") ? Comparator.reverseOrder() : Comparator.naturalOrder();
switch(ProjectTopicsFacade.TopicsSorts.valueOf(sort.getValue())) {
case NAME:
if (comparator == null) {
comparator = Comparator.comparing(TopicDTO::getName, order);
} else {
comparator = comparator.thenComparing(TopicDTO::getName, order);
}
break;
case SCHEMA_NAME:
if (comparator == null) {
comparator = Comparator.comparing(TopicDTO::getSchemaName, order);
} else {
comparator = comparator.thenComparing(TopicDTO::getSchemaName, order);
}
break;
}
}
if (comparator != null) {
list.sort(comparator);
}
return list;
}
use of io.hops.hopsworks.common.dao.kafka.TopicDTO in project hopsworks by logicalclocks.
the class OnlineFeaturegroupController method createFeatureGroupKafkaTopic.
// For ingesting data in the online feature store, we setup a new topic for each feature group
// The topic schema is also registered so it's available both for the hsfs library and for the collector
public void createFeatureGroupKafkaTopic(Project project, String featureGroupEntityName, String topicName, List<FeatureGroupFeatureDTO> features) throws KafkaException, SchemaException, ProjectException, UserException, FeaturestoreException {
String avroSchema = avroSchemaConstructorController.constructSchema(featureGroupEntityName, Utils.getFeaturestoreName(project), features);
schemasController.validateSchema(project, avroSchema);
SubjectDTO topicSubject = subjectsController.registerNewSubject(project, topicName, avroSchema, false);
subjectsCompatibilityController.setSubjectCompatibility(project, topicName, SchemaCompatibility.NONE);
// TODO(Fabio): Make Kafka topics configurable
TopicDTO topicDTO = new TopicDTO(topicName, 1, settings.getOnlineFsThreadNumber(), topicSubject.getSubject(), topicSubject.getVersion());
kafkaController.createTopic(project, topicDTO);
}
use of io.hops.hopsworks.common.dao.kafka.TopicDTO in project hopsworks by logicalclocks.
the class OnlineFeaturegroupController method createOnlineKafkaTopic.
// For ingesting data in the online feature store, we setup a new topic for each feature group
// The topic schema is also registered so it's available both for the hsfs library and for the collector
private void createOnlineKafkaTopic(Project project, Integer featureGroupId, String featureGroupEntityName, String avroSchema) throws KafkaException, SchemaException, ProjectException, UserException {
String topicName = onlineFeatureGroupTopicName(project.getId(), featureGroupId, featureGroupEntityName);
SubjectDTO topicSubject = subjectsController.registerNewSubject(project, topicName, avroSchema, false);
subjectsCompatibilityController.setSubjectCompatibility(project, topicName, SchemaCompatibility.NONE);
// TODO(Fabio): Make Kafka topics configurable
TopicDTO topicDTO = new TopicDTO(topicName, 1, settings.getOnlineFsThreadNumber(), topicSubject.getSubject(), topicSubject.getVersion());
kafkaController.createTopic(project, topicDTO);
}
Aggregations