use of org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException in project incubator-pulsar by apache.
the class BrokerService method checkTopicNsOwnership.
void checkTopicNsOwnership(final String topic) throws RuntimeException {
TopicName topicName = TopicName.get(topic);
boolean ownedByThisInstance;
try {
ownedByThisInstance = pulsar.getNamespaceService().isServiceUnitOwned(topicName);
} catch (Exception e) {
log.debug(String.format("Failed to check the ownership of the topic: %s", topicName), e);
throw new RuntimeException(new ServerMetadataException(e));
}
if (!ownedByThisInstance) {
String msg = String.format("Namespace not served by this instance. Please redo the lookup. " + "Request is denied: namespace=%s", topicName.getNamespace());
log.warn(msg);
throw new RuntimeException(new ServiceUnitNotReadyException(msg));
}
}
use of org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException in project incubator-pulsar by apache.
the class BrokerService method createPersistentTopic.
private void createPersistentTopic(final String topic, CompletableFuture<Topic> topicFuture) {
final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
TopicName topicName = TopicName.get(topic);
if (!pulsar.getNamespaceService().isServiceUnitActive(topicName)) {
// namespace is being unloaded
String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic);
log.warn(msg);
pulsar.getExecutor().submit(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(new ServiceUnitNotReadyException(msg));
return;
}
getManagedLedgerConfig(topicName).thenAccept(managedLedgerConfig -> {
// Once we have the configuration, we can proceed with the async open operation
managedLedgerFactory.asyncOpen(topicName.getPersistenceNamingEncoding(), managedLedgerConfig, new OpenLedgerCallback() {
@Override
public void openLedgerComplete(ManagedLedger ledger, Object ctx) {
try {
PersistentTopic persistentTopic = new PersistentTopic(topic, ledger, BrokerService.this);
CompletableFuture<Void> replicationFuture = persistentTopic.checkReplication();
replicationFuture.thenCompose(v -> {
// Also check dedup status
return persistentTopic.checkDeduplicationStatus();
}).thenRun(() -> {
log.info("Created topic {} - dedup is {}", topic, persistentTopic.isDeduplicationEnabled() ? "enabled" : "disabled");
long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs;
pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs);
addTopicToStatsMaps(topicName, persistentTopic);
topicFuture.complete(persistentTopic);
}).exceptionally((ex) -> {
log.warn("Replication or dedup check failed. Removing topic from topics list {}, {}", topic, ex);
persistentTopic.stopReplProducers().whenComplete((v, exception) -> {
topics.remove(topic, topicFuture);
topicFuture.completeExceptionally(ex);
});
return null;
});
} catch (NamingException e) {
log.warn("Failed to create topic {}-{}", topic, e.getMessage());
pulsar.getExecutor().submit(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(e);
}
}
@Override
public void openLedgerFailed(ManagedLedgerException exception, Object ctx) {
log.warn("Failed to create topic {}", topic, exception);
pulsar.getExecutor().submit(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(new PersistenceException(exception));
}
}, null);
}).exceptionally((exception) -> {
log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception);
// remove topic from topics-map in different thread to avoid possible deadlock if
// createPersistentTopic-thread only tries to handle this future-result
pulsar.getExecutor().submit(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(exception);
return null;
});
}
use of org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException in project incubator-pulsar by apache.
the class ServerCnx method handleProducer.
@Override
protected void handleProducer(final CommandProducer cmdProducer) {
checkArgument(state == State.Connected);
final long producerId = cmdProducer.getProducerId();
final long requestId = cmdProducer.getRequestId();
// Use producer name provided by client if present
final String producerName = cmdProducer.hasProducerName() ? cmdProducer.getProducerName() : service.generateUniqueProducerName();
final boolean isEncrypted = cmdProducer.getEncrypted();
final Map<String, String> metadata = CommandUtils.metadataFromCommand(cmdProducer);
TopicName topicName = validateTopicName(cmdProducer.getTopic(), requestId, cmdProducer);
if (topicName == null) {
return;
}
if (invalidOriginalPrincipal(originalPrincipal)) {
final String msg = "Valid Proxy Client role should be provided while creating producer ";
log.warn("[{}] {} with role {} and proxyClientAuthRole {} on topic {}", remoteAddress, msg, authRole, originalPrincipal, topicName);
ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg));
return;
}
CompletableFuture<Boolean> isProxyAuthorizedFuture;
if (service.isAuthorizationEnabled() && originalPrincipal != null) {
isProxyAuthorizedFuture = service.getAuthorizationService().canProduceAsync(topicName, authRole, authenticationData);
} else {
isProxyAuthorizedFuture = CompletableFuture.completedFuture(true);
}
isProxyAuthorizedFuture.thenApply(isProxyAuthorized -> {
if (isProxyAuthorized) {
CompletableFuture<Boolean> authorizationFuture;
if (service.isAuthorizationEnabled()) {
authorizationFuture = service.getAuthorizationService().canProduceAsync(topicName, originalPrincipal != null ? originalPrincipal : authRole, authenticationData);
} else {
authorizationFuture = CompletableFuture.completedFuture(true);
}
authorizationFuture.thenApply(isAuthorized -> {
if (isAuthorized) {
if (log.isDebugEnabled()) {
log.debug("[{}] Client is authorized to Produce with role {}", remoteAddress, authRole);
}
CompletableFuture<Producer> producerFuture = new CompletableFuture<>();
CompletableFuture<Producer> existingProducerFuture = producers.putIfAbsent(producerId, producerFuture);
if (existingProducerFuture != null) {
if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) {
Producer producer = existingProducerFuture.getNow(null);
log.info("[{}] Producer with the same id is already created: {}", remoteAddress, producer);
ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producer.getProducerName(), producer.getSchemaVersion()));
return null;
} else {
// There was an early request to create a producer with
// same producerId. This can happen when
// client
// timeout is lower the broker timeouts. We need to wait
// until the previous producer creation
// request
// either complete or fails.
ServerError error = !existingProducerFuture.isDone() ? ServerError.ServiceNotReady : getErrorCode(existingProducerFuture);
log.warn("[{}][{}] Producer is already present on the connection", remoteAddress, topicName);
ctx.writeAndFlush(Commands.newError(requestId, error, "Producer is already present on the connection"));
return null;
}
}
log.info("[{}][{}] Creating producer. producerId={}", remoteAddress, topicName, producerId);
service.getTopic(topicName.toString()).thenAccept((Topic topic) -> {
// on topic
if (topic.isBacklogQuotaExceeded(producerName)) {
IllegalStateException illegalStateException = new IllegalStateException("Cannot create producer on topic with backlog quota exceeded");
BacklogQuota.RetentionPolicy retentionPolicy = topic.getBacklogQuota().getPolicy();
if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_request_hold) {
ctx.writeAndFlush(Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededError, illegalStateException.getMessage()));
} else if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_exception) {
ctx.writeAndFlush(Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededException, illegalStateException.getMessage()));
}
producerFuture.completeExceptionally(illegalStateException);
producers.remove(producerId, producerFuture);
return;
}
// Check whether the producer will publish encrypted messages or not
if (topic.isEncryptionRequired() && !isEncrypted) {
String msg = String.format("Encryption is required in %s", topicName);
log.warn("[{}] {}", remoteAddress, msg);
ctx.writeAndFlush(Commands.newError(requestId, ServerError.MetadataError, msg));
return;
}
disableTcpNoDelayIfNeeded(topicName.toString(), producerName);
CompletableFuture<SchemaVersion> schemaVersionFuture;
if (cmdProducer.hasSchema()) {
schemaVersionFuture = topic.addSchema(getSchema(cmdProducer.getSchema()));
} else {
schemaVersionFuture = CompletableFuture.completedFuture(SchemaVersion.Empty);
}
schemaVersionFuture.exceptionally(exception -> {
ctx.writeAndFlush(Commands.newError(requestId, ServerError.UnknownError, exception.getMessage()));
producers.remove(producerId, producerFuture);
return null;
});
schemaVersionFuture.thenAccept(schemaVersion -> {
Producer producer = new Producer(topic, ServerCnx.this, producerId, producerName, authRole, isEncrypted, metadata, schemaVersion);
try {
topic.addProducer(producer);
if (isActive()) {
if (producerFuture.complete(producer)) {
log.info("[{}] Created new producer: {}", remoteAddress, producer);
ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producerName, producer.getLastSequenceId(), producer.getSchemaVersion()));
return;
} else {
// The producer's future was completed before by
// a close command
producer.closeNow();
log.info("[{}] Cleared producer created after timeout on client side {}", remoteAddress, producer);
}
} else {
producer.closeNow();
log.info("[{}] Cleared producer created after connection was closed: {}", remoteAddress, producer);
producerFuture.completeExceptionally(new IllegalStateException("Producer created after connection was closed"));
}
} catch (BrokerServiceException ise) {
log.error("[{}] Failed to add producer to topic {}: {}", remoteAddress, topicName, ise.getMessage());
ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(ise), ise.getMessage()));
producerFuture.completeExceptionally(ise);
}
producers.remove(producerId, producerFuture);
});
}).exceptionally(exception -> {
Throwable cause = exception.getCause();
if (!(cause instanceof ServiceUnitNotReadyException)) {
// Do not print stack traces for expected exceptions
log.error("[{}] Failed to create topic {}", remoteAddress, topicName, exception);
}
// client, only if not completed already.
if (producerFuture.completeExceptionally(exception)) {
ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(cause), cause.getMessage()));
}
producers.remove(producerId, producerFuture);
return null;
});
} else {
String msg = "Client is not authorized to Produce";
log.warn("[{}] {} with role {}", remoteAddress, msg, authRole);
ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg));
}
return null;
}).exceptionally(e -> {
String msg = String.format("[%s] %s with role %s", remoteAddress, e.getMessage(), authRole);
log.warn(msg);
ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, e.getMessage()));
return null;
});
} else {
final String msg = "Proxy Client is not authorized to Produce";
log.warn("[{}] {} with role {} on topic {}", remoteAddress, msg, authRole, topicName);
ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg));
}
return null;
}).exceptionally(ex -> {
String msg = String.format("[%s] %s with role %s", remoteAddress, ex.getMessage(), authRole);
log.warn(msg);
ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, ex.getMessage()));
return null;
});
}
use of org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException in project incubator-pulsar by apache.
the class NamespaceService method splitAndOwnBundleOnceAndRetry.
void splitAndOwnBundleOnceAndRetry(NamespaceBundle bundle, boolean unload, AtomicInteger counter, CompletableFuture<Void> unloadFuture) {
CompletableFuture<NamespaceBundles> updateFuture = new CompletableFuture<>();
final Pair<NamespaceBundles, List<NamespaceBundle>> splittedBundles = bundleFactory.splitBundles(bundle, 2);
// Split and updateNamespaceBundles. Update may fail because of concurrent write to Zookeeper.
if (splittedBundles != null) {
checkNotNull(splittedBundles.getLeft());
checkNotNull(splittedBundles.getRight());
checkArgument(splittedBundles.getRight().size() == 2, "bundle has to be split in two bundles");
NamespaceName nsname = bundle.getNamespaceObject();
if (LOG.isDebugEnabled()) {
LOG.debug("[{}] splitAndOwnBundleOnce: {}, counter: {}, 2 bundles: {}, {}", nsname.toString(), bundle.getBundleRange(), counter.get(), splittedBundles != null ? splittedBundles.getRight().get(0).getBundleRange() : "null splittedBundles", splittedBundles != null ? splittedBundles.getRight().get(1).getBundleRange() : "null splittedBundles");
}
try {
// take ownership of newly split bundles
for (NamespaceBundle sBundle : splittedBundles.getRight()) {
checkNotNull(ownershipCache.tryAcquiringOwnership(sBundle));
}
updateNamespaceBundles(nsname, splittedBundles.getLeft(), (rc, path, zkCtx, stat) -> {
if (rc == Code.OK.intValue()) {
// invalidate cache as zookeeper has new split
// namespace bundle
bundleFactory.invalidateBundleCache(bundle.getNamespaceObject());
updateFuture.complete(splittedBundles.getLeft());
} else if (rc == Code.BADVERSION.intValue()) {
KeeperException keeperException = KeeperException.create(KeeperException.Code.get(rc));
String msg = format("failed to update namespace policies [%s], NamespaceBundle: %s " + "due to %s, counter: %d", nsname.toString(), bundle.getBundleRange(), keeperException.getMessage(), counter.get());
LOG.warn(msg);
updateFuture.completeExceptionally(new ServerMetadataException(keeperException));
} else {
String msg = format("failed to update namespace policies [%s], NamespaceBundle: %s due to %s", nsname.toString(), bundle.getBundleRange(), KeeperException.create(KeeperException.Code.get(rc)).getMessage());
LOG.warn(msg);
updateFuture.completeExceptionally(new ServiceUnitNotReadyException(msg));
}
});
} catch (Exception e) {
String msg = format("failed to acquire ownership of split bundle for namespace [%s], %s", nsname.toString(), e.getMessage());
LOG.warn(msg, e);
updateFuture.completeExceptionally(new ServiceUnitNotReadyException(msg));
}
} else {
String msg = format("bundle %s not found under namespace", bundle.toString());
LOG.warn(msg);
updateFuture.completeExceptionally(new ServiceUnitNotReadyException(msg));
}
// If success updateNamespaceBundles, then do invalidateBundleCache and unload.
// Else retry splitAndOwnBundleOnceAndRetry.
updateFuture.whenCompleteAsync((r, t) -> {
if (t != null) {
// retry several times on BadVersion
if ((t instanceof ServerMetadataException) && (counter.decrementAndGet() >= 0)) {
pulsar.getOrderedExecutor().submit(() -> splitAndOwnBundleOnceAndRetry(bundle, unload, counter, unloadFuture));
} else {
// Retry enough, or meet other exception
String msg2 = format(" %s not success update nsBundles, counter %d, reason %s", bundle.toString(), counter.get(), t.getMessage());
LOG.warn(msg2);
unloadFuture.completeExceptionally(new ServiceUnitNotReadyException(msg2));
}
return;
}
// success updateNamespaceBundles
try {
// disable old bundle in memory
getOwnershipCache().updateBundleState(bundle, false);
// update bundled_topic cache for load-report-generation
pulsar.getBrokerService().refreshTopicToStatsMaps(bundle);
loadManager.get().setLoadReportForceUpdateFlag();
if (unload) {
// unload new split bundles
r.getBundles().forEach(splitBundle -> {
try {
unloadNamespaceBundle(splitBundle);
} catch (Exception e) {
LOG.warn("Failed to unload split bundle {}", splitBundle, e);
throw new RuntimeException("Failed to unload split bundle " + splitBundle, e);
}
});
}
unloadFuture.complete(null);
} catch (Exception e) {
String msg1 = format("failed to disable bundle %s under namespace [%s] with error %s", bundle.getNamespaceObject().toString(), bundle.toString(), e.getMessage());
LOG.warn(msg1, e);
unloadFuture.completeExceptionally(new ServiceUnitNotReadyException(msg1));
}
return;
}, pulsar.getOrderedExecutor());
}
Aggregations