use of org.apache.pulsar.client.admin.Tenants in project starlight-for-kafka by datastax.
the class MetadataUtils method createKafkaMetadataIfMissing.
/**
* This method creates the Kafka metadata tenant and namespace if they are not currently present.
* <ul>
* <li>If the cluster does not exist this method will throw a PulsarServerException.NotFoundException</li>
* <li>If the tenant does not exist it will be created</li>
* <li>If the tenant exists but the allowedClusters list does not include the cluster this method will
* add the cluster to the allowedClusters list</li>
* <li>If the namespace does not exist it will be created</li>
* <li>If the namespace exists but the replicationClusters list does not include the cluster this method
* will add the cluster to the replicationClusters list</li>
* <li>If the offset topic does not exist it will be created</li>
* <li>If the offset topic exists but some partitions are missing, the missing partitions will be created</li>
* </ul>
*/
private static void createKafkaMetadataIfMissing(String tenant, String namespace, PulsarAdmin pulsarAdmin, ClusterData clusterData, KafkaServiceConfiguration conf, KopTopic kopTopic, int partitionNum, boolean partitioned, boolean infiniteRetention) throws PulsarAdminException {
if (!conf.isKafkaManageSystemNamespaces()) {
log.info("Skipping initialization of topic {} for tenant {}", kopTopic.getFullName(), tenant);
return;
}
String cluster = conf.getClusterName();
String kafkaMetadataNamespace = tenant + "/" + namespace;
boolean clusterExists = false;
boolean tenantExists = false;
boolean namespaceExists = false;
boolean offsetsTopicExists = false;
try {
Clusters clusters = pulsarAdmin.clusters();
if (!clusters.getClusters().contains(cluster)) {
try {
pulsarAdmin.clusters().createCluster(cluster, clusterData);
} catch (PulsarAdminException e) {
if (e instanceof ConflictException) {
log.info("Attempted to create cluster {} however it was created concurrently.", cluster);
} else {
// Re-throw all other exceptions
throw e;
}
}
} else {
ClusterData configuredClusterData = clusters.getCluster(cluster);
log.info("Cluster {} found: {}", cluster, configuredClusterData);
}
clusterExists = true;
// Check if the metadata tenant exists and create it if not
Tenants tenants = pulsarAdmin.tenants();
createTenantIfMissing(tenant, conf, cluster, tenants);
tenantExists = true;
// Check if the metadata namespace exists and create it if not
Namespaces namespaces = pulsarAdmin.namespaces();
createNamespaceIfMissing(tenant, conf, cluster, kafkaMetadataNamespace, namespaces, true, infiniteRetention);
namespaceExists = true;
// Check if the offsets topic exists and create it if not
createTopicIfNotExist(conf, pulsarAdmin, kopTopic.getFullName(), partitionNum, partitioned);
offsetsTopicExists = true;
} catch (PulsarAdminException e) {
if (e instanceof ConflictException) {
log.info("Resources concurrent creating and cause e: ", e);
return;
}
log.error("Failed to successfully initialize Kafka Metadata {}", kafkaMetadataNamespace, e);
throw e;
} finally {
log.info("Current state of kafka metadata, cluster: {} exists: {}, tenant: {} exists: {}," + " namespace: {} exists: {}, topic: {} exists: {}", cluster, clusterExists, tenant, tenantExists, kafkaMetadataNamespace, namespaceExists, kopTopic.getOriginalName(), offsetsTopicExists);
}
}
use of org.apache.pulsar.client.admin.Tenants in project pulsar by apache.
the class TestPulsarConnector method setup.
@BeforeMethod
public void setup() throws Exception {
this.pulsarConnectorConfig = spy(PulsarConnectorConfig.class);
this.pulsarConnectorConfig.setMaxEntryReadBatchSize(1);
this.pulsarConnectorConfig.setMaxSplitEntryQueueSize(10);
this.pulsarConnectorConfig.setMaxSplitMessageQueueSize(100);
Tenants tenants = mock(Tenants.class);
doReturn(new LinkedList<>(topicNames.stream().map(TopicName::getTenant).collect(Collectors.toSet()))).when(tenants).getTenants();
Namespaces namespaces = mock(Namespaces.class);
when(namespaces.getNamespaces(anyString())).thenAnswer(new Answer<List<String>>() {
@Override
public List<String> answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
String tenant = (String) args[0];
List<String> ns = getNamespace(tenant);
if (ns.isEmpty()) {
ClientErrorException cee = new ClientErrorException(Response.status(404).build());
throw new PulsarAdminException(cee, cee.getMessage(), cee.getResponse().getStatus());
}
return ns;
}
});
Topics topics = mock(Topics.class);
when(topics.getList(anyString(), any())).thenAnswer(new Answer<List<String>>() {
@Override
public List<String> answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
String ns = (String) args[0];
List<String> topics = getTopics(ns);
if (topics.isEmpty()) {
ClientErrorException cee = new ClientErrorException(Response.status(404).build());
throw new PulsarAdminException(cee, cee.getMessage(), cee.getResponse().getStatus());
}
return topics;
}
});
when(topics.getPartitionedTopicList(anyString())).thenAnswer(new Answer<List<String>>() {
@Override
public List<String> answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
String ns = (String) args[0];
List<String> topics = getPartitionedTopics(ns);
if (topics.isEmpty()) {
ClientErrorException cee = new ClientErrorException(Response.status(404).build());
throw new PulsarAdminException(cee, cee.getMessage(), cee.getResponse().getStatus());
}
return topics;
}
});
when(topics.getPartitionedTopicMetadata(anyString())).thenAnswer(new Answer<PartitionedTopicMetadata>() {
@Override
public PartitionedTopicMetadata answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
String topic = (String) args[0];
int partitions = partitionedTopicsToPartitions.get(topic) == null ? 0 : partitionedTopicsToPartitions.get(topic);
return new PartitionedTopicMetadata(partitions);
}
});
schemas = mock(Schemas.class);
when(schemas.getSchemaInfo(anyString())).thenAnswer(new Answer<SchemaInfo>() {
@Override
public SchemaInfo answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
String topic = (String) args[0];
if (topicsToSchemas.get(topic) != null) {
return topicsToSchemas.get(topic);
} else {
ClientErrorException cee = new ClientErrorException(Response.status(404).build());
throw new PulsarAdminException(cee, cee.getMessage(), cee.getResponse().getStatus());
}
}
});
pulsarAdmin = mock(PulsarAdmin.class);
doReturn(tenants).when(pulsarAdmin).tenants();
doReturn(namespaces).when(pulsarAdmin).namespaces();
doReturn(topics).when(pulsarAdmin).topics();
doReturn(schemas).when(pulsarAdmin).schemas();
doReturn(pulsarAdmin).when(this.pulsarConnectorConfig).getPulsarAdmin();
this.pulsarMetadata = new PulsarMetadata(pulsarConnectorId, this.pulsarConnectorConfig, dispatchingRowDecoderFactory);
this.pulsarSplitManager = Mockito.spy(new PulsarSplitManager(pulsarConnectorId, this.pulsarConnectorConfig));
ManagedLedgerFactory managedLedgerFactory = mock(ManagedLedgerFactory.class);
when(managedLedgerFactory.openReadOnlyCursor(any(), any(), any())).then(new Answer<ReadOnlyCursor>() {
private Map<String, Integer> positions = new HashMap<>();
private int count = 0;
@Override
public ReadOnlyCursor answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
String topic = (String) args[0];
PositionImpl positionImpl = (PositionImpl) args[1];
int position = positionImpl.getEntryId() == -1 ? 0 : (int) positionImpl.getEntryId();
positions.put(topic, position);
String schemaName = TopicName.get(TopicName.get(topic.replaceAll("/persistent", "")).getPartitionedTopicName()).getSchemaName();
long entries = topicsToNumEntries.get(schemaName);
ReadOnlyCursorImpl readOnlyCursor = mock(ReadOnlyCursorImpl.class);
doReturn(entries).when(readOnlyCursor).getNumberOfEntries();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
Integer skipEntries = (Integer) args[0];
positions.put(topic, positions.get(topic) + skipEntries);
return null;
}
}).when(readOnlyCursor).skipEntries(anyInt());
when(readOnlyCursor.getReadPosition()).thenAnswer(new Answer<PositionImpl>() {
@Override
public PositionImpl answer(InvocationOnMock invocationOnMock) throws Throwable {
return PositionImpl.get(0, positions.get(topic));
}
});
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
Integer readEntries = (Integer) args[0];
AsyncCallbacks.ReadEntriesCallback callback = (AsyncCallbacks.ReadEntriesCallback) args[2];
Object ctx = args[3];
new Thread(new Runnable() {
@Override
public void run() {
List<Entry> entries = new LinkedList<>();
for (int i = 0; i < readEntries; i++) {
TestPulsarConnector.Bar bar = new TestPulsarConnector.Bar();
bar.field1 = fooFunctions.get("bar.field1").apply(count) == null ? null : (int) fooFunctions.get("bar.field1").apply(count);
bar.field2 = fooFunctions.get("bar.field2").apply(count) == null ? null : (String) fooFunctions.get("bar.field2").apply(count);
bar.field3 = (float) fooFunctions.get("bar.field3").apply(count);
Foo foo = new Foo();
foo.field1 = (int) fooFunctions.get("field1").apply(count);
foo.field2 = (String) fooFunctions.get("field2").apply(count);
foo.field3 = (float) fooFunctions.get("field3").apply(count);
foo.field4 = (double) fooFunctions.get("field4").apply(count);
foo.field5 = (boolean) fooFunctions.get("field5").apply(count);
foo.field6 = (long) fooFunctions.get("field6").apply(count);
foo.timestamp = (long) fooFunctions.get("timestamp").apply(count);
foo.time = (int) fooFunctions.get("time").apply(count);
foo.date = (int) fooFunctions.get("date").apply(count);
foo.bar = bar;
foo.field7 = (Foo.TestEnum) fooFunctions.get("field7").apply(count);
MessageMetadata messageMetadata = new MessageMetadata().setProducerName("test-producer").setSequenceId(positions.get(topic)).setPublishTime(System.currentTimeMillis());
Schema schema = topicsToSchemas.get(schemaName).getType() == SchemaType.AVRO ? AvroSchema.of(Foo.class) : JSONSchema.of(Foo.class);
ByteBuf payload = io.netty.buffer.Unpooled.copiedBuffer(schema.encode(foo));
ByteBuf byteBuf = serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, messageMetadata, payload);
completedBytes += byteBuf.readableBytes();
entries.add(EntryImpl.create(0, positions.get(topic), byteBuf));
positions.put(topic, positions.get(topic) + 1);
count++;
}
callback.readEntriesComplete(entries, ctx);
}
}).start();
return null;
}
}).when(readOnlyCursor).asyncReadEntries(anyInt(), anyLong(), any(), any(), any());
when(readOnlyCursor.hasMoreEntries()).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocationOnMock) throws Throwable {
return positions.get(topic) < entries;
}
});
when(readOnlyCursor.findNewestMatching(any(), any())).then(new Answer<Position>() {
@Override
public Position answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
com.google.common.base.Predicate<Entry> predicate = (com.google.common.base.Predicate<Entry>) args[1];
String schemaName = TopicName.get(TopicName.get(topic.replaceAll("/persistent", "")).getPartitionedTopicName()).getSchemaName();
List<Entry> entries = getTopicEntries(schemaName);
Integer target = null;
for (int i = entries.size() - 1; i >= 0; i--) {
Entry entry = entries.get(i);
if (predicate.apply(entry)) {
target = i;
break;
}
}
return target == null ? null : new PositionImpl(0, target);
}
});
when(readOnlyCursor.getNumberOfEntries(any())).then(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocationOnMock) throws Throwable {
Object[] args = invocationOnMock.getArguments();
com.google.common.collect.Range<PositionImpl> range = (com.google.common.collect.Range<PositionImpl>) args[0];
return (range.upperEndpoint().getEntryId() + 1) - range.lowerEndpoint().getEntryId();
}
});
when(readOnlyCursor.getCurrentLedgerInfo()).thenReturn(MLDataFormats.ManagedLedgerInfo.LedgerInfo.newBuilder().setLedgerId(0).build());
return readOnlyCursor;
}
});
PulsarConnectorCache.instance = mock(PulsarConnectorCache.class);
when(PulsarConnectorCache.instance.getManagedLedgerFactory()).thenReturn(managedLedgerFactory);
for (Map.Entry<TopicName, PulsarSplit> split : splits.entrySet()) {
PulsarRecordCursor pulsarRecordCursor = spy(new PulsarRecordCursor(topicsToColumnHandles.get(split.getKey()), split.getValue(), pulsarConnectorConfig, managedLedgerFactory, new ManagedLedgerConfig(), new PulsarConnectorMetricsTracker(new NullStatsProvider()), dispatchingRowDecoderFactory));
this.pulsarRecordCursors.put(split.getKey(), pulsarRecordCursor);
}
}
use of org.apache.pulsar.client.admin.Tenants in project kop by streamnative.
the class MetadataUtils method createKafkaMetadataIfMissing.
/**
* This method creates the Kafka metadata tenant and namespace if they are not currently present.
* <ul>
* <li>If the cluster does not exist this method will throw a PulsarServerException.NotFoundException</li>
* <li>If the tenant does not exist it will be created</li>
* <li>If the tenant exists but the allowedClusters list does not include the cluster this method will
* add the cluster to the allowedClusters list</li>
* <li>If the namespace does not exist it will be created</li>
* <li>If the namespace exists but the replicationClusters list does not include the cluster this method
* will add the cluster to the replicationClusters list</li>
* <li>If the offset topic does not exist it will be created</li>
* <li>If the offset topic exists but some partitions are missing, the missing partitions will be created</li>
* </ul>
*/
private static void createKafkaMetadataIfMissing(String tenant, PulsarAdmin pulsarAdmin, ClusterData clusterData, KafkaServiceConfiguration conf, KopTopic kopTopic, int partitionNum) throws PulsarAdminException {
if (!conf.isKafkaManageSystemNamespaces()) {
log.info("Skipping initialization of topic {} for tenant {}", kopTopic.getFullName(), tenant);
return;
}
String cluster = conf.getClusterName();
String kafkaMetadataNamespace = tenant + "/" + conf.getKafkaMetadataNamespace();
boolean clusterExists = false;
boolean tenantExists = false;
boolean namespaceExists = false;
boolean offsetsTopicExists = false;
try {
Clusters clusters = pulsarAdmin.clusters();
if (!clusters.getClusters().contains(cluster)) {
try {
pulsarAdmin.clusters().createCluster(cluster, clusterData);
} catch (PulsarAdminException e) {
if (e instanceof ConflictException) {
log.info("Attempted to create cluster {} however it was created concurrently.", cluster);
} else {
// Re-throw all other exceptions
throw e;
}
}
} else {
ClusterData configuredClusterData = clusters.getCluster(cluster);
log.info("Cluster {} found: {}", cluster, configuredClusterData);
}
clusterExists = true;
// Check if the metadata tenant exists and create it if not
Tenants tenants = pulsarAdmin.tenants();
createTenantIfMissing(tenant, conf, cluster, tenants);
tenantExists = true;
// Check if the metadata namespace exists and create it if not
Namespaces namespaces = pulsarAdmin.namespaces();
createNamespaceIfMissing(tenant, conf, cluster, kafkaMetadataNamespace, namespaces);
namespaceExists = true;
// Check if the offsets topic exists and create it if not
createTopicIfNotExist(pulsarAdmin, kopTopic.getFullName(), partitionNum);
offsetsTopicExists = true;
} catch (PulsarAdminException e) {
if (e instanceof ConflictException) {
log.info("Resources concurrent creating and cause e: ", e);
return;
}
log.error("Failed to successfully initialize Kafka Metadata {}", kafkaMetadataNamespace, e);
throw e;
} finally {
log.info("Current state of kafka metadata, cluster: {} exists: {}, tenant: {} exists: {}," + " namespace: {} exists: {}, topic: {} exists: {}", cluster, clusterExists, tenant, tenantExists, kafkaMetadataNamespace, namespaceExists, kopTopic.getOriginalName(), offsetsTopicExists);
}
}
use of org.apache.pulsar.client.admin.Tenants in project kop by streamnative.
the class MetadataUtilsTest method testCreateKafkaMetadataIfMissing.
@Test(timeOut = 30000)
public void testCreateKafkaMetadataIfMissing() throws Exception {
String namespacePrefix = "public/default";
KafkaServiceConfiguration conf = new KafkaServiceConfiguration();
assertTrue(conf.isKafkaManageSystemNamespaces());
ClusterData clusterData = ClusterData.builder().build();
conf.setClusterName("test");
conf.setKafkaMetadataTenant("public");
conf.setKafkaMetadataNamespace("default");
conf.setSuperUserRoles(Sets.newHashSet("admin"));
conf.setOffsetsTopicNumPartitions(8);
final KopTopic offsetsTopic = new KopTopic(MetadataUtils.constructOffsetsTopicBaseName(conf.getKafkaMetadataTenant(), conf), namespacePrefix);
final KopTopic txnTopic = new KopTopic(MetadataUtils.constructTxnLogTopicBaseName(conf.getKafkaMetadataTenant(), conf), namespacePrefix);
List<String> emptyList = Lists.newArrayList();
List<String> existingClusters = Lists.newArrayList("test");
Clusters mockClusters = mock(Clusters.class);
doReturn(existingClusters).when(mockClusters).getClusters();
Tenants mockTenants = mock(Tenants.class);
doReturn(emptyList).when(mockTenants).getTenants();
Namespaces mockNamespaces = mock(Namespaces.class);
doReturn(emptyList).when(mockNamespaces).getNamespaces("public");
PartitionedTopicMetadata offsetTopicMetadata = new PartitionedTopicMetadata();
Topics mockTopics = mock(Topics.class);
doReturn(offsetTopicMetadata).when(mockTopics).getPartitionedTopicMetadata(eq(offsetsTopic.getFullName()));
doReturn(offsetTopicMetadata).when(mockTopics).getPartitionedTopicMetadata(eq(txnTopic.getFullName()));
PulsarAdmin mockPulsarAdmin = mock(PulsarAdmin.class);
doReturn(mockClusters).when(mockPulsarAdmin).clusters();
doReturn(mockTenants).when(mockPulsarAdmin).tenants();
doReturn(mockNamespaces).when(mockPulsarAdmin).namespaces();
doReturn(mockTopics).when(mockPulsarAdmin).topics();
TenantInfo partialTenant = TenantInfo.builder().build();
doReturn(partialTenant).when(mockTenants).getTenantInfo(eq(conf.getKafkaMetadataTenant()));
MetadataUtils.createOffsetMetadataIfMissing(conf.getKafkaMetadataTenant(), mockPulsarAdmin, clusterData, conf);
// After call the createOffsetMetadataIfMissing, these methods should return expected data.
doReturn(Lists.newArrayList(conf.getKafkaMetadataTenant())).when(mockTenants).getTenants();
String namespace = conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace();
doReturn(Lists.newArrayList(namespace)).when(mockNamespaces).getNamespaces(conf.getKafkaMetadataTenant());
doReturn(Lists.newArrayList(conf.getClusterName())).when(mockNamespaces).getNamespaceReplicationClusters(eq(namespace));
MetadataUtils.createTxnMetadataIfMissing(conf.getKafkaMetadataTenant(), mockPulsarAdmin, clusterData, conf);
verify(mockTenants, times(1)).createTenant(eq(conf.getKafkaMetadataTenant()), any(TenantInfo.class));
verify(mockNamespaces, times(1)).createNamespace(eq(conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace()), any(Set.class));
verify(mockNamespaces, times(1)).setNamespaceReplicationClusters(eq(conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace()), any(Set.class));
verify(mockNamespaces, times(2)).setRetention(eq(conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace()), any(RetentionPolicies.class));
verify(mockNamespaces, times(2)).setNamespaceMessageTTL(eq(conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace()), any(Integer.class));
verify(mockTopics, times(1)).createPartitionedTopic(eq(offsetsTopic.getFullName()), eq(conf.getOffsetsTopicNumPartitions()));
verify(mockTopics, times(1)).createPartitionedTopic(eq(txnTopic.getFullName()), eq(conf.getKafkaTxnLogTopicNumPartitions()));
// Test that cluster is added to existing Tenant if missing
// Test that the cluster is added to the namespace replication cluster list if it is missing
// Test that missing offset topic partitions are created
reset(mockTenants);
reset(mockNamespaces);
reset(mockTopics);
doReturn(Lists.newArrayList("public")).when(mockTenants).getTenants();
partialTenant = TenantInfo.builder().adminRoles(conf.getSuperUserRoles()).allowedClusters(Sets.newHashSet("other-cluster")).build();
doReturn(partialTenant).when(mockTenants).getTenantInfo(eq(conf.getKafkaMetadataTenant()));
doReturn(Lists.newArrayList("test")).when(mockNamespaces).getNamespaces("public");
doReturn(emptyList).when(mockNamespaces).getNamespaceReplicationClusters(eq(conf.getKafkaMetadataTenant()));
List<String> incompletePartitionList = new ArrayList<String>(conf.getOffsetsTopicNumPartitions());
for (int i = 0; i < conf.getOffsetsTopicNumPartitions() - 2; i++) {
incompletePartitionList.add(offsetsTopic.getPartitionName(i));
}
for (int i = 0; i < conf.getKafkaTxnLogTopicNumPartitions() - 2; i++) {
incompletePartitionList.add(txnTopic.getPartitionName(i));
}
doReturn(new PartitionedTopicMetadata(8)).when(mockTopics).getPartitionedTopicMetadata(eq(offsetsTopic.getFullName()));
doReturn(new PartitionedTopicMetadata(8)).when(mockTopics).getPartitionedTopicMetadata(eq(txnTopic.getFullName()));
doReturn(incompletePartitionList).when(mockTopics).getList(eq(conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace()));
MetadataUtils.createOffsetMetadataIfMissing(conf.getKafkaMetadataTenant(), mockPulsarAdmin, clusterData, conf);
MetadataUtils.createTxnMetadataIfMissing(conf.getKafkaMetadataTenant(), mockPulsarAdmin, clusterData, conf);
verify(mockTenants, times(1)).updateTenant(eq(conf.getKafkaMetadataTenant()), any(TenantInfo.class));
verify(mockNamespaces, times(2)).setNamespaceReplicationClusters(eq(conf.getKafkaMetadataTenant() + "/" + conf.getKafkaMetadataNamespace()), any(Set.class));
verify(mockTopics, times(1)).createMissedPartitions(contains(offsetsTopic.getOriginalName()));
verify(mockTopics, times(1)).createMissedPartitions(contains(txnTopic.getOriginalName()));
}
use of org.apache.pulsar.client.admin.Tenants in project pulsar by yahoo.
the class PulsarFunctionTlsTest method setup.
@BeforeMethod
void setup(Method method) throws Exception {
log.info("--- Setting up method {} ---", method.getName());
// Start local bookkeeper ensemble
bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0);
bkEnsemble.start();
config = spy(ServiceConfiguration.class);
config.setBrokerShutdownTimeoutMs(0L);
config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d));
config.setClusterName("use");
Set<String> superUsers = Sets.newHashSet("superUser", "admin");
config.setSuperUserRoles(superUsers);
config.setZookeeperServers("127.0.0.1" + ":" + bkEnsemble.getZookeeperPort());
Set<String> providers = new HashSet<>();
providers.add(AuthenticationProviderTls.class.getName());
config.setAuthenticationEnabled(true);
config.setAuthorizationEnabled(true);
config.setAuthenticationProviders(providers);
config.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH);
config.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH);
config.setTlsAllowInsecureConnection(true);
config.setAdvertisedAddress("localhost");
PulsarAdmin admin = mock(PulsarAdmin.class);
Tenants tenants = mock(Tenants.class);
when(admin.tenants()).thenReturn(tenants);
Set<String> admins = Sets.newHashSet("superUser", "admin");
TenantInfoImpl tenantInfo = new TenantInfoImpl(admins, null);
when(tenants.getTenantInfo(any())).thenReturn(tenantInfo);
Namespaces namespaces = mock(Namespaces.class);
when(admin.namespaces()).thenReturn(namespaces);
when(namespaces.getNamespaces(any())).thenReturn(namespaceList);
functionsWorkerService = spy(createPulsarFunctionWorker(config, admin));
doNothing().when(functionsWorkerService).initAsStandalone(any(WorkerConfig.class));
when(functionsWorkerService.getBrokerAdmin()).thenReturn(admin);
functionsWorkerService.init(workerConfig, null, false);
AuthenticationService authenticationService = new AuthenticationService(config);
AuthorizationService authorizationService = new AuthorizationService(config, mock(PulsarResources.class));
when(functionsWorkerService.getAuthenticationService()).thenReturn(authenticationService);
when(functionsWorkerService.getAuthorizationService()).thenReturn(authorizationService);
when(functionsWorkerService.isInitialized()).thenReturn(true);
// mock: once authentication passes, function should return response: function already exist
FunctionMetaDataManager dataManager = mock(FunctionMetaDataManager.class);
when(dataManager.containsFunction(any(), any(), any())).thenReturn(true);
when(functionsWorkerService.getFunctionMetaDataManager()).thenReturn(dataManager);
workerServer = new WorkerServer(functionsWorkerService, authenticationService);
workerServer.start();
Thread.sleep(2000);
String functionTlsUrl = String.format("https://%s:%s", functionsWorkerService.getWorkerConfig().getWorkerHostname(), workerServer.getListenPortHTTPS().get());
Map<String, String> authParams = new HashMap<>();
authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
Authentication authTls = new AuthenticationTls();
authTls.configure(authParams);
functionAdmin = PulsarAdmin.builder().serviceHttpUrl(functionTlsUrl).tlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH).allowTlsInsecureConnection(true).authentication(authTls).build();
Thread.sleep(100);
}
Aggregations