Search in sources :

Example 1 with ClusterEnvironment

use of com.couchbase.client.java.env.ClusterEnvironment in project couchbase-elasticsearch-connector by couchbase.

the class ElasticsearchConnector method run.

public static void run(ConnectorConfig config, PanicButton panicButton, Duration startupQuietPeriod) throws Throwable {
    final Throwable fatalError;
    final Membership membership = config.group().staticMembership();
    LOGGER.info("Read configuration: {}", redactSystem(config));
    final ScheduledExecutorService checkpointExecutor = Executors.newSingleThreadScheduledExecutor();
    try (Slf4jReporter metricReporter = newSlf4jReporter(config.metrics().logInterval());
        HttpServer httpServer = new HttpServer(config.metrics().httpPort(), membership);
        RestHighLevelClient esClient = newElasticsearchClient(config.elasticsearch(), config.trustStore())) {
        DocumentLifecycle.setLogLevel(config.logging().logDocumentLifecycle() ? LogLevel.INFO : LogLevel.DEBUG);
        LogRedaction.setRedactionLevel(config.logging().redactionLevel());
        DcpHelper.setRedactionLevel(config.logging().redactionLevel());
        final ClusterEnvironment env = CouchbaseHelper.environmentBuilder(config.couchbase(), config.trustStore()).build();
        final Cluster cluster = CouchbaseHelper.createCluster(config.couchbase(), env);
        final Version elasticsearchVersion = waitForElasticsearchAndRequireVersion(esClient, new Version(2, 0, 0), new Version(5, 6, 16));
        LOGGER.info("Elasticsearch version {}", elasticsearchVersion);
        validateConfig(elasticsearchVersion, config.elasticsearch());
        // Wait for couchbase server to come online, then open the bucket.
        final Bucket bucket = CouchbaseHelper.waitForBucket(cluster, config.couchbase().bucket());
        final Set<SeedNode> kvNodes = CouchbaseHelper.getKvNodes(config.couchbase(), bucket);
        final boolean storeMetadataInSourceBucket = config.couchbase().metadataBucket().equals(config.couchbase().bucket());
        final Bucket metadataBucket = storeMetadataInSourceBucket ? bucket : CouchbaseHelper.waitForBucket(cluster, config.couchbase().metadataBucket());
        final Collection metadataCollection = CouchbaseHelper.getMetadataCollection(metadataBucket, config.couchbase());
        final CheckpointDao checkpointDao = new CouchbaseCheckpointDao(metadataCollection, config.group().name());
        // todo get this from dcp client
        final String bucketUuid = "";
        final CheckpointService checkpointService = new CheckpointService(bucketUuid, checkpointDao);
        final RequestFactory requestFactory = new RequestFactory(config.elasticsearch().types(), config.elasticsearch().docStructure(), config.elasticsearch().rejectLog());
        final ElasticsearchWorkerGroup workers = new ElasticsearchWorkerGroup(esClient, checkpointService, requestFactory, ErrorListener.NOOP, config.elasticsearch().bulkRequest());
        Metrics.gauge("write.queue", "Document events currently buffered in memory.", workers, ElasticsearchWorkerGroup::getQueueSize);
        // High value indicates the connector has stalled
        Metrics.gauge("es.wait.ms", null, workers, ElasticsearchWorkerGroup::getCurrentRequestMillis);
        // Same as "es.wait.ms" but normalized to seconds for Prometheus
        Metrics.gauge("es.wait.seconds", "Duration of in-flight Elasticsearch bulk request (including any retries). Long duration may indicate connector has stalled.", workers, value -> value.getCurrentRequestMillis() / (double) SECONDS.toMillis(1));
        final Client dcpClient = DcpHelper.newClient(config.group().name(), config.couchbase(), kvNodes, config.trustStore());
        initEventListener(dcpClient, panicButton, workers::submit);
        final Thread saveCheckpoints = new Thread(checkpointService::save, "save-checkpoints");
        try {
            try {
                dcpClient.connect().block(Duration.ofMillis(config.couchbase().dcp().connectTimeout().millis()));
            } catch (Throwable t) {
                panicButton.panic("Failed to establish initial DCP connection within " + config.couchbase().dcp().connectTimeout(), t);
            }
            final int numPartitions = dcpClient.numPartitions();
            LOGGER.info("Bucket has {} partitions. Membership = {}", numPartitions, membership);
            final Set<Integer> partitions = membership.getPartitions(numPartitions);
            if (partitions.isEmpty()) {
                // need to do this check, because if we started streaming with an empty list, the DCP client would open streams for *all* partitions
                throw new IllegalArgumentException("There are more workers than Couchbase vbuckets; this worker doesn't have any work to do.");
            }
            checkpointService.init(numPartitions, () -> DcpHelper.getCurrentSeqnosAsMap(dcpClient, partitions, Duration.ofSeconds(5)));
            dcpClient.initializeState(StreamFrom.BEGINNING, StreamTo.INFINITY).block();
            initSessionState(dcpClient, checkpointService, partitions);
            // configuration problems.
            if (!startupQuietPeriod.isZero()) {
                LOGGER.info("Entering startup quiet period; sleeping for {} so peers can terminate in case of unsafe scaling.", startupQuietPeriod);
                MILLISECONDS.sleep(startupQuietPeriod.toMillis());
                LOGGER.info("Startup quiet period complete.");
            }
            checkpointExecutor.scheduleWithFixedDelay(checkpointService::save, 10, 10, SECONDS);
            RuntimeHelper.addShutdownHook(saveCheckpoints);
            // Unless shutdown is due to panic...
            panicButton.addPrePanicHook(() -> RuntimeHelper.removeShutdownHook(saveCheckpoints));
            try {
                LOGGER.debug("Opening DCP streams for partitions: {}", partitions);
                dcpClient.startStreaming(partitions).block();
            } catch (RuntimeException e) {
                ThrowableHelper.propagateCauseIfPossible(e, InterruptedException.class);
                throw e;
            }
            // Start HTTP server *after* other setup is complete, so the metrics endpoint
            // can be used as a "successful startup" probe.
            httpServer.start();
            if (config.metrics().httpPort() >= 0) {
                LOGGER.info("Prometheus metrics available at http://localhost:{}/metrics/prometheus", httpServer.getBoundPort());
                LOGGER.info("Dropwizard metrics available at http://localhost:{}/metrics/dropwizard?pretty", httpServer.getBoundPort());
            } else {
                LOGGER.info("Metrics HTTP server is disabled. Edit the [metrics] 'httpPort' config property to enable.");
            }
            LOGGER.info("Elasticsearch connector startup complete.");
            fatalError = workers.awaitFatalError();
            LOGGER.error("Terminating due to fatal error from worker", fatalError);
        } catch (InterruptedException shutdownRequest) {
            LOGGER.info("Graceful shutdown requested. Saving checkpoints and cleaning up.");
            checkpointService.save();
            throw shutdownRequest;
        } catch (Throwable t) {
            LOGGER.error("Terminating due to fatal error during setup", t);
            throw t;
        } finally {
            // If we get here it means there was a fatal exception, or the connector is running in distributed
            // or test mode and a graceful shutdown was requested. Don't need the shutdown hook for any of those cases.
            RuntimeHelper.removeShutdownHook(saveCheckpoints);
            checkpointExecutor.shutdown();
            metricReporter.stop();
            dcpClient.disconnect().block();
            // to avoid buffer leak, must close *after* dcp client stops feeding it events
            workers.close();
            checkpointExecutor.awaitTermination(10, SECONDS);
            cluster.disconnect();
            // can't reuse, because connector config might have different SSL settings next time
            env.shutdown();
        }
    }
    // give stdout a chance to quiet down so the stack trace on stderr isn't interleaved with stdout.
    MILLISECONDS.sleep(500);
    throw fatalError;
}
Also used : VersionHelper.getVersionString(com.couchbase.connector.VersionHelper.getVersionString) CouchbaseCheckpointDao(com.couchbase.connector.dcp.CouchbaseCheckpointDao) CheckpointService(com.couchbase.connector.dcp.CheckpointService) RequestFactory(com.couchbase.connector.elasticsearch.io.RequestFactory) Version(com.couchbase.client.dcp.util.Version) ElasticsearchHelper.waitForElasticsearchAndRequireVersion(com.couchbase.connector.elasticsearch.ElasticsearchHelper.waitForElasticsearchAndRequireVersion) HttpServer(com.couchbase.connector.util.HttpServer) Membership(com.couchbase.connector.cluster.Membership) DefaultKubernetesClient(io.fabric8.kubernetes.client.DefaultKubernetesClient) ElasticsearchHelper.newElasticsearchClient(com.couchbase.connector.elasticsearch.ElasticsearchHelper.newElasticsearchClient) Client(com.couchbase.client.dcp.Client) RestHighLevelClient(org.elasticsearch.client.RestHighLevelClient) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SeedNode(com.couchbase.client.core.env.SeedNode) Cluster(com.couchbase.client.java.Cluster) RestHighLevelClient(org.elasticsearch.client.RestHighLevelClient) ClusterEnvironment(com.couchbase.client.java.env.ClusterEnvironment) CheckpointDao(com.couchbase.connector.dcp.CheckpointDao) CouchbaseCheckpointDao(com.couchbase.connector.dcp.CouchbaseCheckpointDao) Bucket(com.couchbase.client.java.Bucket) Slf4jReporter(com.codahale.metrics.Slf4jReporter) Collection(com.couchbase.client.java.Collection)

Example 2 with ClusterEnvironment

use of com.couchbase.client.java.env.ClusterEnvironment in project couchbase-jvm-clients by couchbase.

the class RawManager method callManagement.

private static Mono<RawManagerResponse> callManagement(final Cluster cluster, final RawManagerRequest request, final RawManagerOptions options) {
    final ClusterEnvironment environment = cluster.environment();
    final RawManagerOptions.Built opts = options.build();
    JsonSerializer serializer = opts.serializer() != null ? opts.serializer() : environment.jsonSerializer();
    Duration timeout = opts.timeout().orElse(environment.timeoutConfig().managementTimeout());
    RetryStrategy retryStrategy = opts.retryStrategy().orElse(environment.retryStrategy());
    final GenericManagerRequest req = new GenericManagerRequest(timeout, cluster.core().context(), retryStrategy, () -> {
        FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, request.method(), request.uri());
        for (Map.Entry<String, Object> e : opts.httpHeaders().entrySet()) {
            httpRequest.headers().set(e.getKey(), e.getValue());
        }
        return httpRequest;
    }, request.method().equals(HttpMethod.GET), null);
    cluster.core().send(req);
    return Reactor.wrap(req, req.response(), true).map(res -> new RawManagerResponse(request.serviceType(), serializer, res.httpStatus(), res.content()));
}
Also used : FullHttpRequest(com.couchbase.client.core.deps.io.netty.handler.codec.http.FullHttpRequest) DefaultFullHttpRequest(com.couchbase.client.core.deps.io.netty.handler.codec.http.DefaultFullHttpRequest) DefaultFullHttpRequest(com.couchbase.client.core.deps.io.netty.handler.codec.http.DefaultFullHttpRequest) Duration(java.time.Duration) JsonSerializer(com.couchbase.client.java.codec.JsonSerializer) ClusterEnvironment(com.couchbase.client.java.env.ClusterEnvironment) GenericManagerRequest(com.couchbase.client.core.msg.manager.GenericManagerRequest) RetryStrategy(com.couchbase.client.core.retry.RetryStrategy) Map(java.util.Map)

Example 3 with ClusterEnvironment

use of com.couchbase.client.java.env.ClusterEnvironment in project couchbase-jvm-clients by couchbase.

the class AsyncCluster method extractClusterEnvironment.

/**
 * Helper method to extract the cluster environment from the connection string and options.
 *
 * @param connectionString the connection string which is used to populate settings into it.
 * @param opts the cluster options.
 * @return the cluster environment, created if not passed in or the one supplied from the user.
 */
static Supplier<ClusterEnvironment> extractClusterEnvironment(final String connectionString, final ClusterOptions.Built opts) {
    ConnectionString connStr = ConnectionString.create(connectionString);
    Supplier<ClusterEnvironment> envSupplier;
    if (opts.environment() == null) {
        ClusterEnvironment.Builder builder = ClusterEnvironment.builder();
        if (opts.environmentCustomizer() != null) {
            opts.environmentCustomizer().accept(builder);
        }
        builder.load(new ConnectionStringPropertyLoader(connStr));
        envSupplier = new OwnedSupplier<>(builder.build());
    } else {
        envSupplier = opts::environment;
    }
    boolean ownsEnvironment = envSupplier instanceof OwnedSupplier;
    checkConnectionString(envSupplier.get(), ownsEnvironment, connStr);
    return envSupplier;
}
Also used : OwnedSupplier(com.couchbase.client.core.env.OwnedSupplier) ClusterEnvironment(com.couchbase.client.java.env.ClusterEnvironment) ConnectionStringPropertyLoader(com.couchbase.client.core.env.ConnectionStringPropertyLoader) ConnectionStringUtil.asConnectionString(com.couchbase.client.core.util.ConnectionStringUtil.asConnectionString) ConnectionStringUtil.checkConnectionString(com.couchbase.client.core.util.ConnectionStringUtil.checkConnectionString) ConnectionString(com.couchbase.client.core.util.ConnectionString)

Example 4 with ClusterEnvironment

use of com.couchbase.client.java.env.ClusterEnvironment in project couchbase-jvm-clients by couchbase.

the class CustomEnvironment method simpleEnvironmentConfiguration.

/**
 * If you only need one Cluster, or if you need multiple Clusters with
 * different configurations, customize the environment when creating
 * the Cluster(s) like in this example.
 */
public static void simpleEnvironmentConfiguration() {
    Cluster cluster = Cluster.connect("127.0.0.1", clusterOptions("Administrator", "password").environment(env -> env.timeoutConfig(TimeoutConfig.kvTimeout(Duration.ofSeconds(2)))));
    Bucket bucket = cluster.bucket("travel-sample");
    Collection collection = bucket.defaultCollection();
    // [amazing application code here]
    // When you're done with the cluster, disconnect it to release resources.
    // Because we didn't configure the cluster with a pre-built ClusterEnvironment,
    // the cluster owns its environment and will automatically shut it down.
    cluster.disconnect();
}
Also used : Collection(com.couchbase.client.java.Collection) Bucket(com.couchbase.client.java.Bucket) ClusterOptions.clusterOptions(com.couchbase.client.java.ClusterOptions.clusterOptions) Cluster(com.couchbase.client.java.Cluster) Duration(java.time.Duration) ClusterEnvironment(com.couchbase.client.java.env.ClusterEnvironment) TimeoutConfig(com.couchbase.client.core.env.TimeoutConfig) Bucket(com.couchbase.client.java.Bucket) Cluster(com.couchbase.client.java.Cluster) Collection(com.couchbase.client.java.Collection)

Example 5 with ClusterEnvironment

use of com.couchbase.client.java.env.ClusterEnvironment in project couchbase-jvm-clients by couchbase.

the class CustomEnvironment method shareEnvironmentBetweenClusters.

/**
 * In this example we're going to build a ClusterEnvironment ourselves
 * and share it between multiple clusters.
 * <p>
 * Because we are creating the shared environment, we are responsible for
 * shutting it down after all the clusters that use it have disconnected.
 * <p>
 * <b>CAUTION:</b> Failing to shut down the shared environment results
 * in a resource leak.
 */
public static void shareEnvironmentBetweenClusters() {
    ClusterEnvironment sharedEnvironment = ClusterEnvironment.builder().timeoutConfig(TimeoutConfig.kvTimeout(Duration.ofSeconds(2))).build();
    Cluster cluster1 = Cluster.connect("127.0.0.1", clusterOptions("Administrator", "password").environment(sharedEnvironment));
    Cluster cluster2 = Cluster.connect("127.0.0.1", clusterOptions("Administrator", "password").environment(sharedEnvironment));
    // From here on everything works the same.
    Bucket bucket1 = cluster1.bucket("travel-sample");
    Bucket bucket2 = cluster2.bucket("travel-sample");
    Collection collection1 = bucket1.defaultCollection();
    Collection collection2 = bucket2.defaultCollection();
    // [amazing application code here]
    /*
         * Do not forget to first shut down the clusters and then also the environment afterwards!
         */
    cluster1.disconnect();
    cluster2.disconnect();
    sharedEnvironment.shutdown();
}
Also used : ClusterEnvironment(com.couchbase.client.java.env.ClusterEnvironment) Bucket(com.couchbase.client.java.Bucket) Cluster(com.couchbase.client.java.Cluster) Collection(com.couchbase.client.java.Collection)

Aggregations

ClusterEnvironment (com.couchbase.client.java.env.ClusterEnvironment)13 Cluster (com.couchbase.client.java.Cluster)7 Bucket (com.couchbase.client.java.Bucket)5 Collection (com.couchbase.client.java.Collection)4 Test (org.junit.jupiter.api.Test)4 JsonSerializer (com.couchbase.client.java.codec.JsonSerializer)3 SeedNode (com.couchbase.client.core.env.SeedNode)2 JacksonJsonSerializer (com.couchbase.client.java.codec.JacksonJsonSerializer)2 CheckpointDao (com.couchbase.connector.dcp.CheckpointDao)2 CouchbaseCheckpointDao (com.couchbase.connector.dcp.CouchbaseCheckpointDao)2 Duration (java.time.Duration)2 ComponentException (org.talend.sdk.component.api.exception.ComponentException)2 Slf4jReporter (com.codahale.metrics.Slf4jReporter)1 DefaultFullHttpRequest (com.couchbase.client.core.deps.io.netty.handler.codec.http.DefaultFullHttpRequest)1 FullHttpRequest (com.couchbase.client.core.deps.io.netty.handler.codec.http.FullHttpRequest)1 PingResult (com.couchbase.client.core.diagnostics.PingResult)1 ConnectionStringPropertyLoader (com.couchbase.client.core.env.ConnectionStringPropertyLoader)1 OwnedSupplier (com.couchbase.client.core.env.OwnedSupplier)1 TimeoutConfig (com.couchbase.client.core.env.TimeoutConfig)1 Builder (com.couchbase.client.core.env.TimeoutConfig.Builder)1