use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.
the class LoggingChangeST method testDynamicallySetUnknownKafkaLoggerValue.
@ParallelNamespaceTest
void testDynamicallySetUnknownKafkaLoggerValue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
InlineLogging il = new InlineLogging();
il.setLoggers(Collections.singletonMap("kafka.root.logger.level", "PAPRIKA"));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setLogging(il);
}, namespaceName);
RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaPods);
assertThat("Kafka pod should not roll", RollingUpdateUtils.componentHasRolled(namespaceName, kafkaSelector, kafkaPods), is(false));
}
use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.
the class LoggingChangeST method testLoggingHierarchy.
@ParallelNamespaceTest
void testLoggingHierarchy(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).build());
KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), testStorage.getClusterName(), 1).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").endSpec().build();
resourceManager.createResource(extensionContext, connect, ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build(), KafkaConnectorTemplates.defaultKafkaConnector(testStorage.getClusterName(), testStorage.getClusterName(), 1).build());
String connectorClassName = "org.apache.kafka.connect.file.FileStreamSourceConnector";
final String scraperPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName();
LOGGER.info("Changing rootLogger level in KafkaConnector to ERROR with inline logging");
InlineLogging inlineError = new InlineLogging();
inlineError.setLoggers(Collections.singletonMap("log4j.logger." + connectorClassName, "ERROR"));
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(testStorage.getClusterName(), kc -> kc.getSpec().setLogging(inlineError), testStorage.getNamespaceName());
LOGGER.info("Waiting for Connect API loggers will contain desired settings");
TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPod(scraperPodName, "curl", "http://" + KafkaConnectResources.serviceName(testStorage.getClusterName()) + ":8083/admin/loggers/" + connectorClassName).out().contains("ERROR"));
LOGGER.info("Restarting Kafka connector {} with class name {}", testStorage.getClusterName(), connectorClassName);
cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPod(scraperPodName, "curl", "-X", "POST", "http://" + KafkaConnectResources.serviceName(testStorage.getClusterName()) + ":8083/connectors/" + testStorage.getClusterName() + "/restart");
KafkaConnectorUtils.waitForConnectorWorkerStatus(testStorage.getNamespaceName(), scraperPodName, testStorage.getClusterName(), testStorage.getClusterName(), "RUNNING");
LOGGER.info("Checking that logger is same for connector with class name {}", connectorClassName);
String connectorLogger = cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPod(scraperPodName, "curl", "http://" + KafkaConnectResources.serviceName(testStorage.getClusterName()) + ":8083/admin/loggers/" + connectorClassName).out();
assertTrue(connectorLogger.contains("ERROR"));
LOGGER.info("Changing KafkaConnect's root logger to WARN, KafkaConnector: {} shouldn't inherit it", testStorage.getClusterName());
InlineLogging inlineWarn = new InlineLogging();
inlineWarn.setLoggers(Collections.singletonMap("connect.root.logger.level", "WARN"));
inlineWarn.setLoggers(Map.of("connect.root.logger.level", "WARN", "log4j.logger." + connectorClassName, "ERROR"));
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(testStorage.getClusterName(), kc -> kc.getSpec().setLogging(inlineWarn), testStorage.getNamespaceName());
TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPod(scraperPodName, "curl", "http://" + KafkaConnectResources.serviceName(testStorage.getClusterName()) + ":8083/admin/loggers/root").out().contains("WARN"));
LOGGER.info("Checking if KafkaConnector {} doesn't inherit logger from KafkaConnect", connectorClassName);
KafkaConnectorUtils.loggerStabilityWait(testStorage.getNamespaceName(), testStorage.getClusterName(), scraperPodName, "ERROR", connectorClassName);
}
use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.
the class LoggingChangeST method testDynamicallySetUnknownKafkaLogger.
@ParallelNamespaceTest
void testDynamicallySetUnknownKafkaLogger(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
InlineLogging il = new InlineLogging();
il.setLoggers(Collections.singletonMap("log4j.logger.paprika", "INFO"));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getKafka().setLogging(il);
}, namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, kafkaPods);
TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type broker-loggers --entity-name 0").out().contains("paprika=INFO"));
}
use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.
the class LogSettingST method testCruiseControlLogChange.
@IsolatedTest("Updating shared Kafka")
// This test might be flaky, as it gets real logs from CruiseControl pod
void testCruiseControlLogChange(ExtensionContext extensionContext) {
final String debugText = " DEBUG ";
String cruiseControlPodName = PodUtils.getPodNameByPrefix(namespace, LOG_SETTING_CLUSTER_NAME + "-" + Constants.CRUISE_CONTROL_CONTAINER_NAME);
LOGGER.info("Check that default/actual root logging level is info");
String containerLogLevel = cmdKubeClient().namespace(namespace).execInPod(cruiseControlPodName, "grep", "-i", "rootlogger.level", Constants.CRUISE_CONTROL_LOG_FILE_PATH).out().trim().split("=")[1];
assertThat(containerLogLevel.toUpperCase(Locale.ENGLISH), is(not(debugText.strip())));
LOGGER.info("Check logs in CruiseControl - make sure no DEBUG is found there.");
String logOut = StUtils.getLogFromPodByTime(namespace, cruiseControlPodName, Constants.CRUISE_CONTROL_CONTAINER_NAME, "20s");
assertThat(logOut.toUpperCase(Locale.ENGLISH), not(containsString(debugText)));
InlineLogging logging = new InlineLogging();
logging.setLoggers(Collections.singletonMap("rootLogger.level", debugText.strip()));
KafkaResource.replaceKafkaResourceInSpecificNamespace(LOG_SETTING_CLUSTER_NAME, kafka -> kafka.getSpec().getCruiseControl().setLogging(logging), namespace);
LOGGER.info("Wait for change of root logger in {}.", cruiseControlPodName);
TestUtils.waitFor("Waiting for log to be changed", CC_LOG_CONFIG_RELOAD, CO_OPERATION_TIMEOUT_MEDIUM, () -> {
String line = StUtils.getLineFromPodContainer(namespace, cruiseControlPodName, null, Constants.CRUISE_CONTROL_LOG_FILE_PATH, "rootlogger.level");
return line.toUpperCase(Locale.ENGLISH).contains(debugText.strip());
});
LOGGER.info("Check cruise control logs in pod {} and it's container {} .", cruiseControlPodName, Constants.CRUISE_CONTROL_CONTAINER_NAME);
TestUtils.waitFor("Wait for debug log line to show in logs", CC_LOG_CONFIG_RELOAD, TIMEOUT_FOR_LOG, () -> {
String log = StUtils.getLogFromPodByTime(namespace, cruiseControlPodName, Constants.CRUISE_CONTROL_CONTAINER_NAME, "20s");
return log.toUpperCase(Locale.ENGLISH).contains(debugText);
});
}
use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.
the class CruiseControl method fromCrd.
/**
* Creates an instance of the Cruise Control model from the custom resource. When Cruise Control is not enabled,
* this will return null.
*
* @param reconciliation Reconciliation marker used for logging
* @param kafkaCr The Kafka custom resource
* @param versions Supported Kafka versions
* @param storage The actual storage configuration used by the cluster. This might differ from the storage
* configuration configured by the user in the Kafka CR due to unallowed changes.
*
* @return Instance of the Cruise Control model
*/
@SuppressWarnings({ "checkstyle:NPathComplexity", "checkstyle:CyclomaticComplexity" })
public static CruiseControl fromCrd(Reconciliation reconciliation, Kafka kafkaCr, KafkaVersion.Lookup versions, Storage storage) {
CruiseControlSpec ccSpec = kafkaCr.getSpec().getCruiseControl();
KafkaClusterSpec kafkaClusterSpec = kafkaCr.getSpec().getKafka();
if (ccSpec != null) {
CruiseControl cruiseControl = new CruiseControl(reconciliation, kafkaCr);
cruiseControl.setReplicas(DEFAULT_REPLICAS);
String image = ccSpec.getImage();
if (image == null) {
image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE, versions.kafkaImage(kafkaClusterSpec.getImage(), versions.defaultVersion().version()));
}
cruiseControl.setImage(image);
cruiseControl.updateConfiguration(ccSpec);
CruiseControlConfiguration ccConfiguration = (CruiseControlConfiguration) cruiseControl.getConfiguration();
cruiseControl.sslEnabled = ccConfiguration.isApiSslEnabled();
cruiseControl.authEnabled = ccConfiguration.isApiAuthEnabled();
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
if (configuration.getConfigOption(MIN_INSYNC_REPLICAS) != null) {
cruiseControl.minInsyncReplicas = configuration.getConfigOption(MIN_INSYNC_REPLICAS);
}
// To avoid illegal storage configurations provided by the user,
// we rely on the storage configuration provided by the KafkaAssemblyOperator
cruiseControl.capacity = new Capacity(kafkaCr.getSpec(), storage);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(cruiseControl, ccSpec);
if (ccSpec.getReadinessProbe() != null) {
cruiseControl.setReadinessProbe(ccSpec.getReadinessProbe());
}
if (ccSpec.getLivenessProbe() != null) {
cruiseControl.setLivenessProbe(ccSpec.getLivenessProbe());
}
Logging logging = ccSpec.getLogging();
cruiseControl.setLogging(logging == null ? new InlineLogging() : logging);
cruiseControl.setGcLoggingEnabled(ccSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : ccSpec.getJvmOptions().isGcLoggingEnabled());
cruiseControl.setJvmOptions(ccSpec.getJvmOptions());
cruiseControl.setResources(ccSpec.getResources());
cruiseControl.setOwnerReference(kafkaCr);
if (ccSpec.getTemplate() != null) {
CruiseControlTemplate template = ccSpec.getTemplate();
ModelUtils.parsePodTemplate(cruiseControl, template.getPod());
ModelUtils.parseInternalServiceTemplate(cruiseControl, template.getApiService());
if (template.getDeployment() != null && template.getDeployment().getMetadata() != null) {
cruiseControl.templateDeploymentLabels = template.getDeployment().getMetadata().getLabels();
cruiseControl.templateDeploymentAnnotations = template.getDeployment().getMetadata().getAnnotations();
}
if (template.getCruiseControlContainer() != null && template.getCruiseControlContainer().getEnv() != null) {
cruiseControl.templateCruiseControlContainerEnvVars = template.getCruiseControlContainer().getEnv();
}
if (template.getCruiseControlContainer() != null && template.getCruiseControlContainer().getSecurityContext() != null) {
cruiseControl.templateCruiseControlContainerSecurityContext = template.getCruiseControlContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
cruiseControl.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
cruiseControl.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(cruiseControl, template.getPodDisruptionBudget());
}
cruiseControl.templatePodLabels = Util.mergeLabelsOrAnnotations(cruiseControl.templatePodLabels, DEFAULT_POD_LABELS);
return cruiseControl;
} else {
return null;
}
}
Aggregations