use of org.apache.pulsar.client.impl.schema.JSONSchema in project pulsar by apache.
the class ProducerImpl method connectionOpened.
@Override
public void connectionOpened(final ClientCnx cnx) {
previousExceptions.clear();
chunkMaxMessageSize = Math.min(chunkMaxMessageSize, ClientCnx.getMaxMessageSize());
final long epoch;
synchronized (this) {
// as long as the change from current state to connecting is a valid state change.
if (!changeToConnecting()) {
return;
}
// We set the cnx reference before registering the producer on the cnx, so if the cnx breaks before creating
// the producer, it will try to grab a new cnx. We also increment and get the epoch value for the producer.
epoch = connectionHandler.switchClientCnx(cnx);
}
cnx.registerProducer(producerId, this);
log.info("[{}] [{}] Creating producer on cnx {}", topic, producerName, cnx.ctx().channel());
long requestId = client.newRequestId();
PRODUCER_DEADLINE_UPDATER.compareAndSet(this, 0, System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs());
SchemaInfo schemaInfo = null;
if (schema != null) {
if (schema.getSchemaInfo() != null) {
if (schema.getSchemaInfo().getType() == SchemaType.JSON) {
// but now we have standardized on every schema to generate an Avro based schema
if (Commands.peerSupportJsonSchemaAvroFormat(cnx.getRemoteEndpointProtocolVersion())) {
schemaInfo = schema.getSchemaInfo();
} else if (schema instanceof JSONSchema) {
JSONSchema jsonSchema = (JSONSchema) schema;
schemaInfo = jsonSchema.getBackwardsCompatibleJsonSchemaInfo();
} else {
schemaInfo = schema.getSchemaInfo();
}
} else if (schema.getSchemaInfo().getType() == SchemaType.BYTES || schema.getSchemaInfo().getType() == SchemaType.NONE) {
// don't set schema info for Schema.BYTES
schemaInfo = null;
} else {
schemaInfo = schema.getSchemaInfo();
}
}
}
cnx.sendRequestWithId(Commands.newProducer(topic, producerId, requestId, producerName, conf.isEncryptionEnabled(), metadata, schemaInfo, epoch, userProvidedProducerName, conf.getAccessMode(), topicEpoch, client.conf.isEnableTransaction(), conf.getInitialSubscriptionName()), requestId).thenAccept(response -> {
String producerName = response.getProducerName();
long lastSequenceId = response.getLastSequenceId();
schemaVersion = Optional.ofNullable(response.getSchemaVersion());
schemaVersion.ifPresent(v -> schemaCache.put(SchemaHash.of(schema), v));
// set the cnx pointer so that new messages will be sent immediately
synchronized (ProducerImpl.this) {
if (getState() == State.Closing || getState() == State.Closed) {
// Producer was closed while reconnecting, close the connection to make sure the broker
// drops the producer on its side
cnx.removeProducer(producerId);
cnx.channel().close();
return;
}
resetBackoff();
log.info("[{}] [{}] Created producer on cnx {}", topic, producerName, cnx.ctx().channel());
connectionId = cnx.ctx().channel().toString();
connectedSince = DateFormatter.now();
if (conf.getAccessMode() != ProducerAccessMode.Shared && !topicEpoch.isPresent()) {
log.info("[{}] [{}] Producer epoch is {}", topic, producerName, response.getTopicEpoch());
}
topicEpoch = response.getTopicEpoch();
if (this.producerName == null) {
this.producerName = producerName;
}
if (this.msgIdGenerator == 0 && conf.getInitialSequenceId() == null) {
// Only update sequence id generator if it wasn't already modified. That means we only want
// to update the id generator the first time the producer gets established, and ignore the
// sequence id sent by broker in subsequent producer reconnects
this.lastSequenceIdPublished = lastSequenceId;
this.msgIdGenerator = lastSequenceId + 1;
}
if (!producerCreatedFuture.isDone() && isBatchMessagingEnabled()) {
// schedule the first batch message task
batchTimerTask = cnx.ctx().executor().scheduleWithFixedDelay(catchingAndLoggingThrowables(() -> {
if (log.isTraceEnabled()) {
log.trace("[{}] [{}] Batching the messages from the batch container from " + "timer thread", topic, producerName);
}
// semaphore acquired when message was enqueued to container
synchronized (ProducerImpl.this) {
// schedule next timeout.
if (getState() == State.Closing || getState() == State.Closed) {
return;
}
batchMessageAndSend();
}
}), 0, conf.getBatchingMaxPublishDelayMicros(), TimeUnit.MICROSECONDS);
}
resendMessages(cnx, epoch);
}
}).exceptionally((e) -> {
Throwable cause = e.getCause();
cnx.removeProducer(producerId);
if (getState() == State.Closing || getState() == State.Closed) {
// Producer was closed while reconnecting, close the connection to make sure the broker
// drops the producer on its side
cnx.channel().close();
return null;
}
if (cause instanceof TimeoutException) {
// Creating the producer has timed out. We need to ensure the broker closes the producer
// in case it was indeed created, otherwise it might prevent new create producer operation,
// since we are not necessarily closing the connection.
long closeRequestId = client.newRequestId();
ByteBuf cmd = Commands.newCloseProducer(producerId, closeRequestId);
cnx.sendRequestWithId(cmd, closeRequestId);
}
if (cause instanceof PulsarClientException.ProducerFencedException) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Failed to create producer: {}", topic, producerName, cause.getMessage());
}
} else {
log.error("[{}] [{}] Failed to create producer: {}", topic, producerName, cause.getMessage());
}
// Close the producer since topic does not exist.
if (cause instanceof PulsarClientException.TopicDoesNotExistException) {
closeAsync().whenComplete((v, ex) -> {
if (ex != null) {
log.error("Failed to close producer on TopicDoesNotExistException.", ex);
}
producerCreatedFuture.completeExceptionally(cause);
});
return null;
}
if (cause instanceof PulsarClientException.ProducerBlockedQuotaExceededException) {
synchronized (this) {
log.warn("[{}] [{}] Topic backlog quota exceeded. Throwing Exception on producer.", topic, producerName);
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Pending messages: {}", topic, producerName, pendingMessages.messagesCount());
}
PulsarClientException bqe = new PulsarClientException.ProducerBlockedQuotaExceededException(format("The backlog quota of the topic %s that the producer %s produces to is exceeded", topic, producerName));
failPendingMessages(cnx(), bqe);
}
} else if (cause instanceof PulsarClientException.ProducerBlockedQuotaExceededError) {
log.warn("[{}] [{}] Producer is blocked on creation because backlog exceeded on topic.", producerName, topic);
}
if (cause instanceof PulsarClientException.TopicTerminatedException) {
setState(State.Terminated);
synchronized (this) {
failPendingMessages(cnx(), (PulsarClientException) cause);
}
producerCreatedFuture.completeExceptionally(cause);
closeProducerTasks();
client.cleanupProducer(this);
} else if (cause instanceof PulsarClientException.ProducerFencedException) {
setState(State.ProducerFenced);
synchronized (this) {
failPendingMessages(cnx(), (PulsarClientException) cause);
}
producerCreatedFuture.completeExceptionally(cause);
closeProducerTasks();
client.cleanupProducer(this);
} else if (//
producerCreatedFuture.isDone() || (cause instanceof PulsarClientException && PulsarClientException.isRetriableError(cause) && System.currentTimeMillis() < PRODUCER_DEADLINE_UPDATER.get(ProducerImpl.this))) {
// Either we had already created the producer once (producerCreatedFuture.isDone()) or we are
// still within the initial timeout budget and we are dealing with a retriable error
reconnectLater(cause);
} else {
setState(State.Failed);
producerCreatedFuture.completeExceptionally(cause);
closeProducerTasks();
client.cleanupProducer(this);
Timeout timeout = sendTimeout;
if (timeout != null) {
timeout.cancel();
sendTimeout = null;
}
}
return null;
});
}
use of org.apache.pulsar.client.impl.schema.JSONSchema in project pulsar by apache.
the class TestJsonDecoder method testCyclicDefinitionDetect.
@Test(singleThreaded = true)
public void testCyclicDefinitionDetect() {
JSONSchema cyclicSchema = JSONSchema.of(DecoderTestMessage.CyclicFoo.class);
PrestoException exception = expectThrows(PrestoException.class, () -> {
decoderFactory.extractColumnMetadata(topicName, cyclicSchema.getSchemaInfo(), PulsarColumnHandle.HandleKeyValueType.NONE);
});
assertEquals("Topic " + topicName.toString() + " schema may contains cyclic definitions.", exception.getMessage());
}
use of org.apache.pulsar.client.impl.schema.JSONSchema in project pulsar-flink by streamnative.
the class FlinkPulsarTableITest method testWriteThenRead.
@Test(timeout = 40 * 1000L)
public void testWriteThenRead() throws Exception {
String tp = newTopic();
String tableName = TopicName.get(tp).getLocalName();
StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
see.setParallelism(1);
DataStreamSource ds = see.fromCollection(fooList);
ds.addSink(new FlinkPulsarSink(serviceUrl, adminUrl, Optional.of(tp), getSinkProperties(), new PulsarSerializationSchemaWrapper.Builder<>((SerializationSchema<SchemaData.Foo>) element -> {
JSONSchema<SchemaData.Foo> jsonSchema = JSONSchema.of(SchemaData.Foo.class);
return jsonSchema.encode(element);
}).usePojoMode(SchemaData.Foo.class, RecordSchemaType.JSON).build()));
see.execute("write first");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
TableSchema tSchema = getTableSchema(tp);
tEnv.executeSql(createTableSql(tableName, tp, tSchema, "json")).print();
Table t = tEnv.sqlQuery("select i, f, bar from " + tableName);
tEnv.toDataStream(t, SchemaData.Foo.class).map(new FailingIdentityMapper<>(fooList.size())).addSink(new SingletonStreamSink.StringSink<>()).setParallelism(1);
TestUtils.tryExecute(env, "count elements from topics");
SingletonStreamSink.compareWithList(fooList.subList(0, fooList.size() - 1).stream().map(Objects::toString).collect(Collectors.toList()));
}
use of org.apache.pulsar.client.impl.schema.JSONSchema in project incubator-pulsar by apache.
the class ProducerImpl method connectionOpened.
@Override
public void connectionOpened(final ClientCnx cnx) {
previousExceptions.clear();
chunkMaxMessageSize = Math.min(chunkMaxMessageSize, ClientCnx.getMaxMessageSize());
final long epoch;
synchronized (this) {
// as long as the change from current state to connecting is a valid state change.
if (!changeToConnecting()) {
return;
}
// We set the cnx reference before registering the producer on the cnx, so if the cnx breaks before creating
// the producer, it will try to grab a new cnx. We also increment and get the epoch value for the producer.
epoch = connectionHandler.switchClientCnx(cnx);
}
cnx.registerProducer(producerId, this);
log.info("[{}] [{}] Creating producer on cnx {}", topic, producerName, cnx.ctx().channel());
long requestId = client.newRequestId();
PRODUCER_DEADLINE_UPDATER.compareAndSet(this, 0, System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs());
SchemaInfo schemaInfo = null;
if (schema != null) {
if (schema.getSchemaInfo() != null) {
if (schema.getSchemaInfo().getType() == SchemaType.JSON) {
// but now we have standardized on every schema to generate an Avro based schema
if (Commands.peerSupportJsonSchemaAvroFormat(cnx.getRemoteEndpointProtocolVersion())) {
schemaInfo = schema.getSchemaInfo();
} else if (schema instanceof JSONSchema) {
JSONSchema jsonSchema = (JSONSchema) schema;
schemaInfo = jsonSchema.getBackwardsCompatibleJsonSchemaInfo();
} else {
schemaInfo = schema.getSchemaInfo();
}
} else if (schema.getSchemaInfo().getType() == SchemaType.BYTES || schema.getSchemaInfo().getType() == SchemaType.NONE) {
// don't set schema info for Schema.BYTES
schemaInfo = null;
} else {
schemaInfo = schema.getSchemaInfo();
}
}
}
cnx.sendRequestWithId(Commands.newProducer(topic, producerId, requestId, producerName, conf.isEncryptionEnabled(), metadata, schemaInfo, epoch, userProvidedProducerName, conf.getAccessMode(), topicEpoch, client.conf.isEnableTransaction(), conf.getInitialSubscriptionName()), requestId).thenAccept(response -> {
String producerName = response.getProducerName();
long lastSequenceId = response.getLastSequenceId();
schemaVersion = Optional.ofNullable(response.getSchemaVersion());
schemaVersion.ifPresent(v -> schemaCache.put(SchemaHash.of(schema), v));
// set the cnx pointer so that new messages will be sent immediately
synchronized (ProducerImpl.this) {
if (getState() == State.Closing || getState() == State.Closed) {
// Producer was closed while reconnecting, close the connection to make sure the broker
// drops the producer on its side
cnx.removeProducer(producerId);
cnx.channel().close();
return;
}
resetBackoff();
log.info("[{}] [{}] Created producer on cnx {}", topic, producerName, cnx.ctx().channel());
connectionId = cnx.ctx().channel().toString();
connectedSince = DateFormatter.now();
if (conf.getAccessMode() != ProducerAccessMode.Shared && !topicEpoch.isPresent()) {
log.info("[{}] [{}] Producer epoch is {}", topic, producerName, response.getTopicEpoch());
}
topicEpoch = response.getTopicEpoch();
if (this.producerName == null) {
this.producerName = producerName;
}
if (this.msgIdGenerator == 0 && conf.getInitialSequenceId() == null) {
// Only update sequence id generator if it wasn't already modified. That means we only want
// to update the id generator the first time the producer gets established, and ignore the
// sequence id sent by broker in subsequent producer reconnects
this.lastSequenceIdPublished = lastSequenceId;
this.msgIdGenerator = lastSequenceId + 1;
}
resendMessages(cnx, epoch);
}
}).exceptionally((e) -> {
Throwable cause = e.getCause();
cnx.removeProducer(producerId);
if (getState() == State.Closing || getState() == State.Closed) {
// Producer was closed while reconnecting, close the connection to make sure the broker
// drops the producer on its side
cnx.channel().close();
return null;
}
if (cause instanceof TimeoutException) {
// Creating the producer has timed out. We need to ensure the broker closes the producer
// in case it was indeed created, otherwise it might prevent new create producer operation,
// since we are not necessarily closing the connection.
long closeRequestId = client.newRequestId();
ByteBuf cmd = Commands.newCloseProducer(producerId, closeRequestId);
cnx.sendRequestWithId(cmd, closeRequestId);
}
if (cause instanceof PulsarClientException.ProducerFencedException) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Failed to create producer: {}", topic, producerName, cause.getMessage());
}
} else {
log.error("[{}] [{}] Failed to create producer: {}", topic, producerName, cause.getMessage());
}
// Close the producer since topic does not exist.
if (cause instanceof PulsarClientException.TopicDoesNotExistException) {
closeAsync().whenComplete((v, ex) -> {
if (ex != null) {
log.error("Failed to close producer on TopicDoesNotExistException.", ex);
}
producerCreatedFuture.completeExceptionally(cause);
});
return null;
}
if (cause instanceof PulsarClientException.ProducerBlockedQuotaExceededException) {
synchronized (this) {
log.warn("[{}] [{}] Topic backlog quota exceeded. Throwing Exception on producer.", topic, producerName);
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Pending messages: {}", topic, producerName, pendingMessages.messagesCount());
}
PulsarClientException bqe = new PulsarClientException.ProducerBlockedQuotaExceededException(format("The backlog quota of the topic %s that the producer %s produces to is exceeded", topic, producerName));
failPendingMessages(cnx(), bqe);
}
} else if (cause instanceof PulsarClientException.ProducerBlockedQuotaExceededError) {
log.warn("[{}] [{}] Producer is blocked on creation because backlog exceeded on topic.", producerName, topic);
}
if (cause instanceof PulsarClientException.TopicTerminatedException) {
setState(State.Terminated);
synchronized (this) {
failPendingMessages(cnx(), (PulsarClientException) cause);
}
producerCreatedFuture.completeExceptionally(cause);
closeProducerTasks();
client.cleanupProducer(this);
} else if (cause instanceof PulsarClientException.ProducerFencedException) {
setState(State.ProducerFenced);
synchronized (this) {
failPendingMessages(cnx(), (PulsarClientException) cause);
}
producerCreatedFuture.completeExceptionally(cause);
closeProducerTasks();
client.cleanupProducer(this);
} else if (//
producerCreatedFuture.isDone() || (cause instanceof PulsarClientException && PulsarClientException.isRetriableError(cause) && System.currentTimeMillis() < PRODUCER_DEADLINE_UPDATER.get(ProducerImpl.this))) {
// Either we had already created the producer once (producerCreatedFuture.isDone()) or we are
// still within the initial timeout budget and we are dealing with a retriable error
reconnectLater(cause);
} else {
setState(State.Failed);
producerCreatedFuture.completeExceptionally(cause);
closeProducerTasks();
client.cleanupProducer(this);
Timeout timeout = sendTimeout;
if (timeout != null) {
timeout.cancel();
sendTimeout = null;
}
}
return null;
});
}
use of org.apache.pulsar.client.impl.schema.JSONSchema in project incubator-pulsar by apache.
the class TestJsonDecoder method testCyclicDefinitionDetect.
@Test(singleThreaded = true)
public void testCyclicDefinitionDetect() {
JSONSchema cyclicSchema = JSONSchema.of(DecoderTestMessage.CyclicFoo.class);
PrestoException exception = expectThrows(PrestoException.class, () -> {
decoderFactory.extractColumnMetadata(topicName, cyclicSchema.getSchemaInfo(), PulsarColumnHandle.HandleKeyValueType.NONE);
});
assertEquals("Topic " + topicName.toString() + " schema may contains cyclic definitions.", exception.getMessage());
}
Aggregations