use of org.apache.pulsar.client.api.PulsarClient in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testInvalidSequence.
@Test
public void testInvalidSequence() throws Exception {
log.info("-- Starting {} test --", methodName);
PulsarClient client1 = PulsarClient.create("http://127.0.0.1:" + BROKER_WEBSERVICE_PORT);
client1.close();
ConsumerConfiguration consumerConf = new ConsumerConfiguration();
consumerConf.setSubscriptionType(SubscriptionType.Exclusive);
try {
client1.subscribe("persistent://my-property/use/my-ns/my-topic6", "my-subscriber-name", consumerConf);
Assert.fail("Should fail");
} catch (PulsarClientException e) {
Assert.assertTrue(e instanceof PulsarClientException.AlreadyClosedException);
}
try {
client1.createProducer("persistent://my-property/use/my-ns/my-topic6");
Assert.fail("Should fail");
} catch (PulsarClientException e) {
Assert.assertTrue(e instanceof PulsarClientException.AlreadyClosedException);
}
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic6", "my-subscriber-name", consumerConf);
try {
Message msg = MessageBuilder.create().setContent("InvalidMessage".getBytes()).build();
consumer.acknowledge(msg);
} catch (PulsarClientException.InvalidMessageException e) {
// ok
}
consumer.close();
try {
consumer.receive();
Assert.fail("Should fail");
} catch (PulsarClientException.AlreadyClosedException e) {
// ok
}
try {
consumer.unsubscribe();
Assert.fail("Should fail");
} catch (PulsarClientException.AlreadyClosedException e) {
// ok
}
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic6");
producer.close();
try {
producer.send("message".getBytes());
Assert.fail("Should fail");
} catch (PulsarClientException.AlreadyClosedException e) {
// ok
}
}
use of org.apache.pulsar.client.api.PulsarClient in project incubator-pulsar by apache.
the class BrokerClientIntegrationTest method testCloseConnectionOnInternalServerError.
/**
* It verifies that client closes the connection on internalSerevrError which is "ServiceNotReady" from Broker-side
*
* @throws Exception
*/
@Test(timeOut = 5000)
public void testCloseConnectionOnInternalServerError() throws Exception {
final PulsarClient pulsarClient;
final String topicName = "persistent://prop/usw/my-ns/newTopic";
String lookupUrl = new URI("pulsar://localhost:" + BROKER_PORT).toString();
pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl).statsInterval(0, TimeUnit.SECONDS).build();
ProducerImpl<byte[]> producer = (ProducerImpl<byte[]>) pulsarClient.newProducer().topic(topicName).create();
ClientCnx cnx = producer.cnx();
assertTrue(cnx.channel().isActive());
// Need broker to throw InternalServerError. so, make global-zk unavailable
Field globalZkCacheField = PulsarService.class.getDeclaredField("globalZkCache");
globalZkCacheField.setAccessible(true);
globalZkCacheField.set(pulsar, null);
try {
pulsarClient.newProducer().topic(topicName).create();
fail("it should have fail with lookup-exception:");
} catch (Exception e) {
// ok
}
// connection must be closed
assertFalse(cnx.channel().isActive());
pulsarClient.close();
}
use of org.apache.pulsar.client.api.PulsarClient in project incubator-pulsar by apache.
the class BrokerClientIntegrationTest method testCleanProducer.
@Test
public void testCleanProducer() throws Exception {
log.info("-- Starting {} test --", methodName);
admin.clusters().createCluster("global", new ClusterData());
admin.namespaces().createNamespace("my-property/global/lookup");
final int operationTimeOut = 500;
PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl.toString()).statsInterval(0, TimeUnit.SECONDS).operationTimeout(operationTimeOut, TimeUnit.MILLISECONDS).build();
CountDownLatch latch = new CountDownLatch(1);
pulsarClient.newProducer().topic("persistent://my-property/global/lookup/my-topic1").createAsync().handle((producer, e) -> {
latch.countDown();
return null;
});
latch.await(operationTimeOut + 1000, TimeUnit.MILLISECONDS);
Field prodField = PulsarClientImpl.class.getDeclaredField("producers");
prodField.setAccessible(true);
@SuppressWarnings("unchecked") IdentityHashMap<ProducerBase<byte[]>, Boolean> producers = (IdentityHashMap<ProducerBase<byte[]>, Boolean>) prodField.get(pulsarClient);
assertTrue(producers.isEmpty());
pulsarClient.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.PulsarClient in project incubator-pulsar by apache.
the class BrokerClientIntegrationTest method testCloseConnectionOnBrokerRejectedRequest.
/**
* <pre>
* Verifies: that client-cnx gets closed when server gives TooManyRequestException in certain time frame
* 1. Client1: which has set MaxNumberOfRejectedRequestPerConnection=0
* 2. Client2: which has set MaxNumberOfRejectedRequestPerConnection=100
* 3. create multiple producer and make lookup-requests simultaneously
* 4. Client1 receives TooManyLookupException and should close connection
* </pre>
*
* @throws Exception
*/
@Test(timeOut = 5000)
public void testCloseConnectionOnBrokerRejectedRequest() throws Exception {
final PulsarClient pulsarClient;
final PulsarClient pulsarClient2;
final String topicName = "persistent://prop/usw/my-ns/newTopic";
final int maxConccurentLookupRequest = pulsar.getConfiguration().getMaxConcurrentLookupRequest();
try {
final int concurrentLookupRequests = 20;
stopBroker();
pulsar.getConfiguration().setMaxConcurrentLookupRequest(1);
startBroker();
String lookupUrl = new URI("pulsar://localhost:" + BROKER_PORT).toString();
pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl).statsInterval(0, TimeUnit.SECONDS).maxNumberOfRejectedRequestPerConnection(0).build();
pulsarClient2 = PulsarClient.builder().serviceUrl(lookupUrl).statsInterval(0, TimeUnit.SECONDS).ioThreads(concurrentLookupRequests).connectionsPerBroker(20).build();
ProducerImpl<byte[]> producer = (ProducerImpl<byte[]>) pulsarClient.newProducer().topic(topicName).create();
ClientCnx cnx = producer.cnx();
assertTrue(cnx.channel().isActive());
ExecutorService executor = Executors.newFixedThreadPool(concurrentLookupRequests);
final int totalProducer = 100;
CountDownLatch latch = new CountDownLatch(totalProducer * 2);
AtomicInteger failed = new AtomicInteger(0);
for (int i = 0; i < totalProducer; i++) {
executor.submit(() -> {
pulsarClient2.newProducer().topic(topicName).createAsync().handle((ok, e) -> {
if (e != null) {
failed.set(1);
}
latch.countDown();
return null;
});
pulsarClient.newProducer().topic(topicName).createAsync().handle((ok, e) -> {
if (e != null) {
failed.set(1);
}
latch.countDown();
return null;
});
});
}
latch.await(10, TimeUnit.SECONDS);
// connection must be closed
assertTrue(failed.get() == 1);
try {
pulsarClient.close();
pulsarClient2.close();
} catch (Exception e) {
// Ok
}
} finally {
pulsar.getConfiguration().setMaxConcurrentLookupRequest(maxConccurentLookupRequest);
}
}
use of org.apache.pulsar.client.api.PulsarClient in project incubator-pulsar by apache.
the class BrokerBkEnsemblesTests method testCrashBrokerWithoutCursorLedgerLeak.
/**
* It verifies that broker deletes cursor-ledger when broker-crashes without closing topic gracefully
*
* <pre>
* 1. Create topic : publish/consume-ack msgs to update new cursor-ledger
* 2. Verify cursor-ledger is created and ledger-znode present
* 3. Broker crashes: remove topic and managed-ledgers without closing
* 4. Recreate topic: publish/consume-ack msgs to update new cursor-ledger
* 5. Topic is recovered from old-ledger and broker deletes the old ledger
* 6. verify znode of old-ledger is deleted
* </pre>
*
* @throws Exception
*/
@Test
public void testCrashBrokerWithoutCursorLedgerLeak() throws Exception {
ZooKeeper zk = bkEnsemble.getZkClient();
PulsarClient client = PulsarClient.builder().serviceUrl(adminUrl.toString()).statsInterval(0, TimeUnit.SECONDS).build();
final String ns1 = "prop/usc/crash-broker";
admin.namespaces().createNamespace(ns1);
final String topic1 = "persistent://" + ns1 + "/my-topic";
// (1) create topic
// publish and ack messages so, cursor can create cursor-ledger and update metadata
Consumer<byte[]> consumer = client.newConsumer().topic(topic1).subscriptionName("my-subscriber-name").subscribe();
Producer<byte[]> producer = client.newProducer().topic(topic1).create();
for (int i = 0; i < 10; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
Message<byte[]> msg = null;
for (int i = 0; i < 10; i++) {
msg = consumer.receive(1, TimeUnit.SECONDS);
consumer.acknowledge(msg);
}
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopic(topic1).get();
ManagedCursorImpl cursor = (ManagedCursorImpl) topic.getManagedLedger().getCursors().iterator().next();
retryStrategically((test) -> cursor.getState().equals("Open"), 5, 100);
// (2) validate cursor ledger is created and znode is present
long cursorLedgerId = cursor.getCursorLedger();
String ledgerPath = "/ledgers" + StringUtils.getHybridHierarchicalLedgerPath(cursorLedgerId);
Assert.assertNotNull(zk.exists(ledgerPath, false));
// (3) remove topic and managed-ledger from broker which means topic is not closed gracefully
consumer.close();
producer.close();
pulsar.getBrokerService().removeTopicFromCache(topic1);
ManagedLedgerFactoryImpl factory = (ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory();
Field field = ManagedLedgerFactoryImpl.class.getDeclaredField("ledgers");
field.setAccessible(true);
@SuppressWarnings("unchecked") ConcurrentHashMap<String, CompletableFuture<ManagedLedgerImpl>> ledgers = (ConcurrentHashMap<String, CompletableFuture<ManagedLedgerImpl>>) field.get(factory);
ledgers.clear();
// (4) Recreate topic
// publish and ack messages so, cursor can create cursor-ledger and update metadata
consumer = client.newConsumer().topic(topic1).subscriptionName("my-subscriber-name").subscribe();
producer = client.newProducer().topic(topic1).create();
for (int i = 0; i < 10; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
for (int i = 0; i < 10; i++) {
msg = consumer.receive(1, TimeUnit.SECONDS);
consumer.acknowledge(msg);
}
// (5) Broker should create new cursor-ledger and remove old cursor-ledger
topic = (PersistentTopic) pulsar.getBrokerService().getTopic(topic1).get();
final ManagedCursorImpl cursor1 = (ManagedCursorImpl) topic.getManagedLedger().getCursors().iterator().next();
retryStrategically((test) -> cursor1.getState().equals("Open"), 5, 100);
long newCursorLedgerId = cursor1.getCursorLedger();
Assert.assertNotEquals(newCursorLedgerId, -1);
Assert.assertNotEquals(cursorLedgerId, newCursorLedgerId);
// cursor node must be deleted
Assert.assertNull(zk.exists(ledgerPath, false));
producer.close();
consumer.close();
client.close();
}
Aggregations