use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class SynchronizationTest method testSimultaneousUpwardAndDownwardDelegating.
// If the test times out, then there's a deadlock in the test but not necessarily the code
@Test(timeout = 15000L)
public void testSimultaneousUpwardAndDownwardDelegating() throws Exception {
String t1Class = TestPlugins.SAMPLING_CONVERTER;
// Grab a reference to the target PluginClassLoader before activating breakpoints
ClassLoader connectorLoader = plugins.delegatingLoader().connectorLoader(t1Class);
// THREAD 1: loads a class by delegating downward starting from the DelegatingClassLoader
// DelegatingClassLoader breakpoint will only trigger on this thread
dclBreakpoint.set(t1Class::equals);
Runnable thread1 = () -> {
// Use the DelegatingClassLoader as the current context loader
ClassLoader savedLoader = Plugins.compareAndSwapLoaders(plugins.delegatingLoader());
// Load an isolated plugin from the delegating classloader, which will
// 1. Enter the DelegatingClassLoader
// 2. Wait on dclBreakpoint for test to continue
// 3. Enter the PluginClassLoader
// 4. Load the isolated plugin class and return
new AbstractConfig(new ConfigDef().define("a.class", Type.CLASS, Importance.HIGH, ""), Collections.singletonMap("a.class", t1Class));
Plugins.compareAndSwapLoaders(savedLoader);
};
// THREAD 2: loads a class by delegating upward starting from the PluginClassLoader
String t2Class = JsonConverter.class.getName();
// PluginClassLoader breakpoint will only trigger on this thread
pclBreakpoint.set(t2Class::equals);
Runnable thread2 = () -> {
// Use the PluginClassLoader as the current context loader
ClassLoader savedLoader = Plugins.compareAndSwapLoaders(connectorLoader);
// Load a non-isolated class from the plugin classloader, which will
// 1. Enter the PluginClassLoader
// 2. Wait for the test to continue
// 3. Enter the DelegatingClassLoader
// 4. Load the non-isolated class and return
new AbstractConfig(new ConfigDef().define("a.class", Type.CLASS, Importance.HIGH, ""), Collections.singletonMap("a.class", t2Class));
Plugins.compareAndSwapLoaders(savedLoader);
};
// STEP 1: Have T1 enter the DelegatingClassLoader and pause
exec.submit(thread1);
// T1 enters ConfigDef::parseType
// T1 enters DelegatingClassLoader::loadClass
dclBreakpoint.testAwait();
dclBreakpoint.testAwait();
// T1 exits DelegatingClassLoader::loadClass
// T1 enters Class::forName
// T1 enters DelegatingClassLoader::loadClass
dclBreakpoint.testAwait();
// T1 waits in the delegating classloader while we set up the other thread
dumpThreads("step 1, T1 waiting in DelegatingClassLoader");
// STEP 2: Have T2 enter PluginClassLoader, delegate upward to the Delegating classloader
exec.submit(thread2);
// T2 enters PluginClassLoader::loadClass
pclBreakpoint.testAwait();
// T2 falls through to ClassLoader::loadClass
pclBreakpoint.testAwait();
// T2 delegates upwards to DelegatingClassLoader::loadClass
// T2 enters ClassLoader::loadClass and loads the class from the parent (CLASSPATH)
dumpThreads("step 2, T2 entered DelegatingClassLoader and is loading class from parent");
// STEP 3: Resume T1 and have it enter the PluginClassLoader
dclBreakpoint.testAwait();
// T1 enters PluginClassLoader::loadClass
dumpThreads("step 3, T1 entered PluginClassLoader and is/was loading class from isolated jar");
// If the DelegatingClassLoader and PluginClassLoader are both not parallel capable, then this test will deadlock
// Otherwise, T1 should be able to complete it's load from the PluginClassLoader concurrently with T2,
// before releasing the DelegatingClassLoader and allowing T2 to complete.
// As the DelegatingClassLoader is not parallel capable, it must be the case that PluginClassLoader is.
assertNoDeadlocks();
}
use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class PluginsTest method newPluginShouldServiceLoadWithPluginClassLoader.
@Test
public void newPluginShouldServiceLoadWithPluginClassLoader() {
TestPlugins.assertAvailable();
Converter plugin = plugins.newPlugin(TestPlugins.SERVICE_LOADER, new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class);
assertInstanceOf(SamplingTestPlugin.class, plugin, "Cannot collect samples");
Map<String, SamplingTestPlugin> samples = ((SamplingTestPlugin) plugin).flatten();
// Assert that the service loaded subclass is found in both environments
assertTrue(samples.containsKey("ServiceLoadedSubclass.static"));
assertTrue(samples.containsKey("ServiceLoadedSubclass.dynamic"));
assertPluginClassLoaderAlwaysActive(samples);
}
use of org.apache.kafka.common.config.ConfigDef in project debezium by debezium.
the class PostgresConnectorIT method validateFieldDef.
private void validateFieldDef(Field expected) {
ConfigDef configDef = connector.config();
assertThat(configDef.names()).contains(expected.name());
ConfigDef.ConfigKey key = configDef.configKeys().get(expected.name());
assertThat(key).isNotNull();
assertThat(key.name).isEqualTo(expected.name());
assertThat(key.displayName).isEqualTo(expected.displayName());
assertThat(key.importance).isEqualTo(expected.importance());
assertThat(key.documentation).isEqualTo(expected.description());
assertThat(key.type).isEqualTo(expected.type());
assertThat(key.defaultValue).isEqualTo(expected.defaultValue());
assertThat(key.dependents).isEqualTo(expected.dependents());
assertThat(key.width).isNotNull();
assertThat(key.group).isNotNull();
assertThat(key.orderInGroup).isGreaterThan(0);
assertThat(key.validator).isNull();
assertThat(key.recommender).isNull();
}
use of org.apache.kafka.common.config.ConfigDef in project debezium by debezium.
the class MySqlConnectorConfig method configDef.
protected static ConfigDef configDef() {
ConfigDef config = new ConfigDef();
Field.group(config, "MySQL", HOSTNAME, PORT, USER, PASSWORD, SERVER_NAME, SERVER_ID, SSL_MODE, SSL_KEYSTORE, SSL_KEYSTORE_PASSWORD, SSL_TRUSTSTORE, SSL_TRUSTSTORE_PASSWORD, JDBC_DRIVER);
Field.group(config, "History Storage", KafkaDatabaseHistory.BOOTSTRAP_SERVERS, KafkaDatabaseHistory.TOPIC, KafkaDatabaseHistory.RECOVERY_POLL_ATTEMPTS, KafkaDatabaseHistory.RECOVERY_POLL_INTERVAL_MS, DATABASE_HISTORY, DatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS, DatabaseHistory.DDL_FILTER, DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL);
Field.group(config, "Events", INCLUDE_SCHEMA_CHANGES, TABLES_IGNORE_BUILTIN, DATABASE_WHITELIST, TABLE_WHITELIST, COLUMN_BLACKLIST, TABLE_BLACKLIST, DATABASE_BLACKLIST, GTID_SOURCE_INCLUDES, GTID_SOURCE_EXCLUDES, GTID_SOURCE_FILTER_DML_EVENTS, BUFFER_SIZE_FOR_BINLOG_READER, Heartbeat.HEARTBEAT_INTERVAL, Heartbeat.HEARTBEAT_TOPICS_PREFIX, EVENT_DESERIALIZATION_FAILURE_HANDLING_MODE, INCONSISTENT_SCHEMA_HANDLING_MODE, CommonConnectorConfig.TOMBSTONES_ON_DELETE);
Field.group(config, "Connector", CONNECTION_TIMEOUT_MS, KEEP_ALIVE, KEEP_ALIVE_INTERVAL_MS, CommonConnectorConfig.MAX_QUEUE_SIZE, CommonConnectorConfig.MAX_BATCH_SIZE, CommonConnectorConfig.POLL_INTERVAL_MS, SNAPSHOT_MODE, SNAPSHOT_LOCKING_MODE, SNAPSHOT_MINIMAL_LOCKING, TIME_PRECISION_MODE, DECIMAL_HANDLING_MODE, BIGINT_UNSIGNED_HANDLING_MODE);
return config;
}
Aggregations