use of com.couchbase.client.dcp.Client in project couchbase-elasticsearch-connector by couchbase.
the class CheckpointClear method setCheckpointToNow.
private static void setCheckpointToNow(ConnectorConfig config, Set<SeedNode> kvNodes, CheckpointDao checkpointDao) throws IOException {
final Client dcpClient = DcpHelper.newClient(config.group().name(), config.couchbase(), kvNodes, config.trustStore());
try {
dcpClient.connect().block();
final int numPartitions = dcpClient.numPartitions();
final Set<Integer> allPartitions = new HashSet<>(allPartitions(numPartitions));
DcpHelper.getCurrentSeqnos(dcpClient, allPartitions);
final SessionState sessionState = dcpClient.sessionState();
final Map<Integer, Checkpoint> now = new HashMap<>();
for (int i = 0; i < allPartitions.size(); i++) {
PartitionState p = sessionState.get(i);
final long seqno = p.getStartSeqno();
now.put(i, new Checkpoint(p.getLastUuid(), seqno, new SnapshotMarker(seqno, seqno)));
}
checkpointDao.save("", now);
} finally {
dcpClient.disconnect().block();
}
}
use of com.couchbase.client.dcp.Client in project couchbase-elasticsearch-connector by couchbase.
the class DcpHelper method newClient.
public static Client newClient(String groupName, CouchbaseConfig config, Set<SeedNode> kvNodes, Supplier<KeyStore> trustStore) {
// ES connector bootstraps using Manager port, but the DCP client wants KV port.
// Get the KV ports from the bucket config!
Set<String> seedNodes = kvNodes.stream().map(node -> new HostAndPort(node.address(), node.kvPort().orElseThrow(() -> new AssertionError("seed node missing KV port")))).map(HostAndPort::format).collect(toSet());
Set<String> collectionNames = config.collections().stream().map(ScopeAndCollection::format).collect(toSet());
final Client.Builder builder = Client.builder().meterRegistry(Metrics.registry()).userAgent("elasticsearch-connector", VersionHelper.getVersion(), groupName).bootstrapTimeout(Duration.ofMillis(config.dcp().connectTimeout().millis())).seedNodes(seedNodes).networkResolution(NetworkResolution.valueOf(config.network().name())).bucket(config.bucket()).authenticator(authenticator(config)).controlParam(DcpControl.Names.SET_NOOP_INTERVAL, 20).compression(config.dcp().compression()).collectionsAware(true).scopeName(config.scope()).collectionNames(collectionNames).mitigateRollbacks(config.dcp().persistencePollingInterval().duration(), config.dcp().persistencePollingInterval().timeUnit()).flowControl(toSaturatedInt(config.dcp().flowControlBuffer().getBytes())).bufferAckWatermark(60);
if (config.secureConnection()) {
builder.securityConfig(SecurityConfig.builder().enableTls(true).trustStore(trustStore.get()).enableHostnameVerification(config.hostnameVerification()));
}
return builder.build();
}
use of com.couchbase.client.dcp.Client in project couchbase-elasticsearch-connector by couchbase.
the class ElasticsearchConnector method run.
public static void run(ConnectorConfig config, PanicButton panicButton, Duration startupQuietPeriod) throws Throwable {
final Throwable fatalError;
final Membership membership = config.group().staticMembership();
LOGGER.info("Read configuration: {}", redactSystem(config));
final ScheduledExecutorService checkpointExecutor = Executors.newSingleThreadScheduledExecutor();
try (Slf4jReporter metricReporter = newSlf4jReporter(config.metrics().logInterval());
HttpServer httpServer = new HttpServer(config.metrics().httpPort(), membership);
RestHighLevelClient esClient = newElasticsearchClient(config.elasticsearch(), config.trustStore())) {
DocumentLifecycle.setLogLevel(config.logging().logDocumentLifecycle() ? LogLevel.INFO : LogLevel.DEBUG);
LogRedaction.setRedactionLevel(config.logging().redactionLevel());
DcpHelper.setRedactionLevel(config.logging().redactionLevel());
final ClusterEnvironment env = CouchbaseHelper.environmentBuilder(config.couchbase(), config.trustStore()).build();
final Cluster cluster = CouchbaseHelper.createCluster(config.couchbase(), env);
final Version elasticsearchVersion = waitForElasticsearchAndRequireVersion(esClient, new Version(2, 0, 0), new Version(5, 6, 16));
LOGGER.info("Elasticsearch version {}", elasticsearchVersion);
validateConfig(elasticsearchVersion, config.elasticsearch());
// Wait for couchbase server to come online, then open the bucket.
final Bucket bucket = CouchbaseHelper.waitForBucket(cluster, config.couchbase().bucket());
final Set<SeedNode> kvNodes = CouchbaseHelper.getKvNodes(config.couchbase(), bucket);
final boolean storeMetadataInSourceBucket = config.couchbase().metadataBucket().equals(config.couchbase().bucket());
final Bucket metadataBucket = storeMetadataInSourceBucket ? bucket : CouchbaseHelper.waitForBucket(cluster, config.couchbase().metadataBucket());
final Collection metadataCollection = CouchbaseHelper.getMetadataCollection(metadataBucket, config.couchbase());
final CheckpointDao checkpointDao = new CouchbaseCheckpointDao(metadataCollection, config.group().name());
// todo get this from dcp client
final String bucketUuid = "";
final CheckpointService checkpointService = new CheckpointService(bucketUuid, checkpointDao);
final RequestFactory requestFactory = new RequestFactory(config.elasticsearch().types(), config.elasticsearch().docStructure(), config.elasticsearch().rejectLog());
final ElasticsearchWorkerGroup workers = new ElasticsearchWorkerGroup(esClient, checkpointService, requestFactory, ErrorListener.NOOP, config.elasticsearch().bulkRequest());
Metrics.gauge("write.queue", "Document events currently buffered in memory.", workers, ElasticsearchWorkerGroup::getQueueSize);
// High value indicates the connector has stalled
Metrics.gauge("es.wait.ms", null, workers, ElasticsearchWorkerGroup::getCurrentRequestMillis);
// Same as "es.wait.ms" but normalized to seconds for Prometheus
Metrics.gauge("es.wait.seconds", "Duration of in-flight Elasticsearch bulk request (including any retries). Long duration may indicate connector has stalled.", workers, value -> value.getCurrentRequestMillis() / (double) SECONDS.toMillis(1));
final Client dcpClient = DcpHelper.newClient(config.group().name(), config.couchbase(), kvNodes, config.trustStore());
initEventListener(dcpClient, panicButton, workers::submit);
final Thread saveCheckpoints = new Thread(checkpointService::save, "save-checkpoints");
try {
try {
dcpClient.connect().block(Duration.ofMillis(config.couchbase().dcp().connectTimeout().millis()));
} catch (Throwable t) {
panicButton.panic("Failed to establish initial DCP connection within " + config.couchbase().dcp().connectTimeout(), t);
}
final int numPartitions = dcpClient.numPartitions();
LOGGER.info("Bucket has {} partitions. Membership = {}", numPartitions, membership);
final Set<Integer> partitions = membership.getPartitions(numPartitions);
if (partitions.isEmpty()) {
// need to do this check, because if we started streaming with an empty list, the DCP client would open streams for *all* partitions
throw new IllegalArgumentException("There are more workers than Couchbase vbuckets; this worker doesn't have any work to do.");
}
checkpointService.init(numPartitions, () -> DcpHelper.getCurrentSeqnosAsMap(dcpClient, partitions, Duration.ofSeconds(5)));
dcpClient.initializeState(StreamFrom.BEGINNING, StreamTo.INFINITY).block();
initSessionState(dcpClient, checkpointService, partitions);
// configuration problems.
if (!startupQuietPeriod.isZero()) {
LOGGER.info("Entering startup quiet period; sleeping for {} so peers can terminate in case of unsafe scaling.", startupQuietPeriod);
MILLISECONDS.sleep(startupQuietPeriod.toMillis());
LOGGER.info("Startup quiet period complete.");
}
checkpointExecutor.scheduleWithFixedDelay(checkpointService::save, 10, 10, SECONDS);
RuntimeHelper.addShutdownHook(saveCheckpoints);
// Unless shutdown is due to panic...
panicButton.addPrePanicHook(() -> RuntimeHelper.removeShutdownHook(saveCheckpoints));
try {
LOGGER.debug("Opening DCP streams for partitions: {}", partitions);
dcpClient.startStreaming(partitions).block();
} catch (RuntimeException e) {
ThrowableHelper.propagateCauseIfPossible(e, InterruptedException.class);
throw e;
}
// Start HTTP server *after* other setup is complete, so the metrics endpoint
// can be used as a "successful startup" probe.
httpServer.start();
if (config.metrics().httpPort() >= 0) {
LOGGER.info("Prometheus metrics available at http://localhost:{}/metrics/prometheus", httpServer.getBoundPort());
LOGGER.info("Dropwizard metrics available at http://localhost:{}/metrics/dropwizard?pretty", httpServer.getBoundPort());
} else {
LOGGER.info("Metrics HTTP server is disabled. Edit the [metrics] 'httpPort' config property to enable.");
}
LOGGER.info("Elasticsearch connector startup complete.");
fatalError = workers.awaitFatalError();
LOGGER.error("Terminating due to fatal error from worker", fatalError);
} catch (InterruptedException shutdownRequest) {
LOGGER.info("Graceful shutdown requested. Saving checkpoints and cleaning up.");
checkpointService.save();
throw shutdownRequest;
} catch (Throwable t) {
LOGGER.error("Terminating due to fatal error during setup", t);
throw t;
} finally {
// If we get here it means there was a fatal exception, or the connector is running in distributed
// or test mode and a graceful shutdown was requested. Don't need the shutdown hook for any of those cases.
RuntimeHelper.removeShutdownHook(saveCheckpoints);
checkpointExecutor.shutdown();
metricReporter.stop();
dcpClient.disconnect().block();
// to avoid buffer leak, must close *after* dcp client stops feeding it events
workers.close();
checkpointExecutor.awaitTermination(10, SECONDS);
cluster.disconnect();
// can't reuse, because connector config might have different SSL settings next time
env.shutdown();
}
}
// give stdout a chance to quiet down so the stack trace on stderr isn't interleaved with stdout.
MILLISECONDS.sleep(500);
throw fatalError;
}
use of com.couchbase.client.dcp.Client in project components by Talend.
the class CouchbaseStreamingConnectionTest method setup.
@Before
public void setup() {
PowerMockito.mockStatic(Client.class);
Builder builder = Mockito.mock(Builder.class);
Mockito.when(builder.connectTimeout(Mockito.anyLong())).thenReturn(builder);
Mockito.when(builder.hostnames(Mockito.anyString())).thenReturn(builder);
Mockito.when(builder.bucket(Mockito.anyString())).thenReturn(builder);
Mockito.when(builder.password(Mockito.anyString())).thenReturn(builder);
Mockito.when(builder.controlParam(Mockito.any(Names.class), Mockito.any())).thenReturn(builder);
Mockito.when(builder.bufferAckWatermark(Mockito.anyInt())).thenReturn(builder);
client = Mockito.mock(Client.class);
PowerMockito.when(Client.configure()).thenReturn(builder);
Mockito.when(builder.build()).thenReturn(client);
streamingConnection = new CouchbaseStreamingConnection("localhost", "", "testPassword");
}
use of com.couchbase.client.dcp.Client in project couchbase-elasticsearch-connector by couchbase.
the class ElasticsearchConnector method main.
public static void main(String... args) throws Throwable {
LOGGER.info("Couchbase Elasticsearch Connector version {}", getVersionString());
final OptionsParser parser = new OptionsParser();
final OptionSet options = parser.parse(args);
final File configFile = options.valueOf(parser.configFile);
System.out.println("Reading connector configuration from " + configFile.getAbsoluteFile());
ConnectorConfig config = ConnectorConfig.from(configFile);
final PanicButton panicButton = new DefaultPanicButton();
boolean watchK8sReplicas = "true".equals(System.getenv("CBES_K8S_WATCH_REPLICAS"));
boolean getMemberNumberFromHostname = watchK8sReplicas || "true".equals(System.getenv("CBES_K8S_STATEFUL_SET"));
if (getMemberNumberFromHostname) {
int memberNumber = StatefulSetInfo.fromHostname().podOrdinal + 1;
LOGGER.info("Getting group member number from Kubernetes pod hostname: {}", memberNumber);
// This is a kludge. The Membership class validates its arguments, so you can't have a Membership
// of "4 of 1", for example. If we plan to get the group size from the Kubernetes StatefulSet,
// bypass this validation by temporarily setting the group size to the largest sane value (1024).
// We'll dial it down to the actual size of the StatefulSet a bit later on.
int clusterSize = watchK8sReplicas ? 1024 : config.group().staticMembership().getClusterSize();
config = transformMembership(config, m -> Membership.of(memberNumber, clusterSize));
}
KubernetesClient k8sClient = null;
try {
if (watchK8sReplicas) {
k8sClient = new DefaultKubernetesClient();
LOGGER.info("Activating native Kubernetes integration; connector will use StatefulSet spec" + " to determine group size." + " This mode requires a Kubernetes service account with 'get' and 'watch', and 'list'" + " permissions for the StatefulSet.");
int k8sReplicas = ReplicaChangeWatcher.getReplicasAndPanicOnChange(k8sClient, panicButton);
config = transformMembership(config, m -> Membership.of(m.getMemberNumber(), k8sReplicas));
}
if (watchK8sReplicas || getMemberNumberFromHostname) {
LOGGER.info("Patched configuration with info from Kubernetes environment; membership = {}", config.group().staticMembership());
}
if (config.group().staticMembership().getClusterSize() > 1024) {
panicButton.panic("Invalid group size configuration; totalMembers must be <= 1024." + " Did you forget to set the CBES_TOTAL_MEMBERS environment variable?");
}
Duration startupQuietPeriod = watchK8sReplicas ? ReplicaChangeWatcher.startupQuietPeriod() : Duration.ZERO;
run(config, panicButton, startupQuietPeriod);
} finally {
if (k8sClient != null) {
// so client threads don't prevent app from exiting
k8sClient.close();
}
}
}
Aggregations