Search in sources :

Example 16 with SnapshotRegistry

use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.

the class ConfigurationControlManagerTest method testIncrementalAlterConfigsWithPolicy.

@Test
public void testIncrementalAlterConfigsWithPolicy() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    MockAlterConfigsPolicy policy = new MockAlterConfigsPolicy(asList(new RequestMetadata(MYTOPIC, Collections.emptyMap()), new RequestMetadata(BROKER0, toMap(entry("foo.bar", "123"), entry("quux", "456")))));
    ConfigurationControlManager manager = new ConfigurationControlManager(new LogContext(), snapshotRegistry, CONFIGS, Optional.of(policy), ConfigurationValidator.NO_OP);
    assertEquals(ControllerResult.atomicOf(asList(new ApiMessageAndVersion(new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0").setName("foo.bar").setValue("123"), (short) 0), new ApiMessageAndVersion(new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0").setName("quux").setValue("456"), (short) 0)), toMap(entry(MYTOPIC, new ApiError(Errors.POLICY_VIOLATION, "Expected: AlterConfigPolicy.RequestMetadata(resource=ConfigResource(" + "type=TOPIC, name='mytopic'), configs={}). Got: " + "AlterConfigPolicy.RequestMetadata(resource=ConfigResource(" + "type=TOPIC, name='mytopic'), configs={foo.bar=123})")), entry(BROKER0, ApiError.NONE))), manager.incrementalAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("foo.bar", entry(SET, "123")))), entry(BROKER0, toMap(entry("foo.bar", entry(SET, "123")), entry("quux", entry(SET, "456"))))), NO_OP_EXISTENCE_CHECKER));
}
Also used : ConfigRecord(org.apache.kafka.common.metadata.ConfigRecord) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) LogContext(org.apache.kafka.common.utils.LogContext) ApiError(org.apache.kafka.common.requests.ApiError) RequestMetadata(org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata) Test(org.junit.jupiter.api.Test)

Example 17 with SnapshotRegistry

use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.

the class ConfigurationControlManagerTest method testIncrementalAlterConfigs.

@Test
public void testIncrementalAlterConfigs() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    ConfigurationControlManager manager = new ConfigurationControlManager(new LogContext(), snapshotRegistry, CONFIGS, Optional.empty(), ConfigurationValidator.NO_OP);
    ControllerResult<Map<ConfigResource, ApiError>> result = manager.incrementalAlterConfigs(toMap(entry(BROKER0, toMap(entry("baz", entry(SUBTRACT, "abc")), entry("quux", entry(SET, "abc")))), entry(MYTOPIC, toMap(entry("abc", entry(APPEND, "123"))))), NO_OP_EXISTENCE_CHECKER);
    assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion(new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic").setName("abc").setValue("123"), (short) 0)), toMap(entry(BROKER0, new ApiError(Errors.INVALID_CONFIG, "Can't SUBTRACT to key baz because its type is not LIST.")), entry(MYTOPIC, ApiError.NONE))), result);
    RecordTestUtils.replayAll(manager, result.records());
    assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion(new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic").setName("abc").setValue(null), (short) 0)), toMap(entry(MYTOPIC, ApiError.NONE))), manager.incrementalAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("abc", entry(DELETE, "xyz"))))), NO_OP_EXISTENCE_CHECKER));
}
Also used : ConfigRecord(org.apache.kafka.common.metadata.ConfigRecord) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) LogContext(org.apache.kafka.common.utils.LogContext) ApiError(org.apache.kafka.common.requests.ApiError) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 18 with SnapshotRegistry

use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.

the class ConfigurationControlManagerTest method testLegacyAlterConfigs.

@Test
public void testLegacyAlterConfigs() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    ConfigurationControlManager manager = new ConfigurationControlManager(new LogContext(), snapshotRegistry, CONFIGS, Optional.empty(), ConfigurationValidator.NO_OP);
    List<ApiMessageAndVersion> expectedRecords1 = asList(new ApiMessageAndVersion(new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic").setName("abc").setValue("456"), (short) 0), new ApiMessageAndVersion(new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic").setName("def").setValue("901"), (short) 0));
    assertEquals(ControllerResult.atomicOf(expectedRecords1, toMap(entry(MYTOPIC, ApiError.NONE))), manager.legacyAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("abc", "456"), entry("def", "901")))), NO_OP_EXISTENCE_CHECKER));
    for (ApiMessageAndVersion message : expectedRecords1) {
        manager.replay((ConfigRecord) message.message());
    }
    assertEquals(ControllerResult.atomicOf(asList(new ApiMessageAndVersion(new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic").setName("abc").setValue(null), (short) 0)), toMap(entry(MYTOPIC, ApiError.NONE))), manager.legacyAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("def", "901")))), NO_OP_EXISTENCE_CHECKER));
}
Also used : ConfigRecord(org.apache.kafka.common.metadata.ConfigRecord) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) LogContext(org.apache.kafka.common.utils.LogContext) Test(org.junit.jupiter.api.Test)

Example 19 with SnapshotRegistry

use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.

the class ConfigurationControlManagerTest method testIsSplittable.

@Test
public void testIsSplittable() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    ConfigurationControlManager manager = new ConfigurationControlManager(new LogContext(), snapshotRegistry, CONFIGS, Optional.empty(), ConfigurationValidator.NO_OP);
    assertTrue(manager.isSplittable(BROKER, "foo.bar"));
    assertFalse(manager.isSplittable(BROKER, "baz"));
    assertFalse(manager.isSplittable(BROKER, "foo.baz.quux"));
    assertFalse(manager.isSplittable(TOPIC, "baz"));
    assertTrue(manager.isSplittable(TOPIC, "abc"));
}
Also used : SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) LogContext(org.apache.kafka.common.utils.LogContext) Test(org.junit.jupiter.api.Test)

Example 20 with SnapshotRegistry

use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.

the class FeatureControlManagerTest method testUpdateFeatures.

@Test
public void testUpdateFeatures() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    snapshotRegistry.getOrCreateSnapshot(-1);
    FeatureControlManager manager = new FeatureControlManager(rangeMap("foo", 1, 2), snapshotRegistry);
    assertEquals(new FeatureMapAndEpoch(new FeatureMap(Collections.emptyMap()), -1), manager.finalizedFeatures(-1));
    assertEquals(ControllerResult.atomicOf(Collections.emptyList(), Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, "The controller does not support the given feature range."))), manager.updateFeatures(rangeMap("foo", 1, 3), Collections.singleton("foo"), Collections.emptyMap()));
    ControllerResult<Map<String, ApiError>> result = manager.updateFeatures(rangeMap("foo", 1, 2, "bar", 1, 1), Collections.emptySet(), Collections.emptyMap());
    Map<String, ApiError> expectedMap = new HashMap<>();
    expectedMap.put("foo", ApiError.NONE);
    expectedMap.put("bar", new ApiError(Errors.INVALID_UPDATE_VERSION, "The controller does not support the given feature range."));
    assertEquals(expectedMap, result.response());
    List<ApiMessageAndVersion> expectedMessages = new ArrayList<>();
    expectedMessages.add(new ApiMessageAndVersion(new FeatureLevelRecord().setName("foo").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 2), (short) 0));
    assertEquals(expectedMessages, result.records());
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LogContext(org.apache.kafka.common.utils.LogContext) FeatureMapAndEpoch(org.apache.kafka.metadata.FeatureMapAndEpoch) FeatureLevelRecord(org.apache.kafka.common.metadata.FeatureLevelRecord) FeatureMap(org.apache.kafka.metadata.FeatureMap) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ApiError(org.apache.kafka.common.requests.ApiError) HashMap(java.util.HashMap) FeatureMap(org.apache.kafka.metadata.FeatureMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Aggregations

LogContext (org.apache.kafka.common.utils.LogContext)28 SnapshotRegistry (org.apache.kafka.timeline.SnapshotRegistry)28 Test (org.junit.jupiter.api.Test)24 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)12 HashMap (java.util.HashMap)6 MockTime (org.apache.kafka.common.utils.MockTime)6 ArrayList (java.util.ArrayList)5 Map (java.util.Map)5 Random (java.util.Random)5 RegisterBrokerRecord (org.apache.kafka.common.metadata.RegisterBrokerRecord)5 FeatureMap (org.apache.kafka.metadata.FeatureMap)5 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)5 Endpoint (org.apache.kafka.common.Endpoint)4 ConfigRecord (org.apache.kafka.common.metadata.ConfigRecord)4 FeatureLevelRecord (org.apache.kafka.common.metadata.FeatureLevelRecord)4 BrokerEndpoint (org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint)4 ClientQuotaEntity (org.apache.kafka.common.quota.ClientQuotaEntity)4 ApiError (org.apache.kafka.common.requests.ApiError)4 HashSet (java.util.HashSet)3 List (java.util.List)3