Search in sources :

Example 1 with FeatureLevelRecord

use of org.apache.kafka.common.metadata.FeatureLevelRecord in project kafka by apache.

the class FeatureControlManagerTest method testReplay.

@Test
public void testReplay() {
    FeatureLevelRecord record = new FeatureLevelRecord().setName("foo").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 2);
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    snapshotRegistry.getOrCreateSnapshot(-1);
    FeatureControlManager manager = new FeatureControlManager(rangeMap("foo", 1, 2), snapshotRegistry);
    manager.replay(record);
    snapshotRegistry.getOrCreateSnapshot(123);
    assertEquals(new FeatureMapAndEpoch(new FeatureMap(rangeMap("foo", 1, 2)), 123), manager.finalizedFeatures(123));
}
Also used : FeatureMap(org.apache.kafka.metadata.FeatureMap) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) LogContext(org.apache.kafka.common.utils.LogContext) FeatureMapAndEpoch(org.apache.kafka.metadata.FeatureMapAndEpoch) FeatureLevelRecord(org.apache.kafka.common.metadata.FeatureLevelRecord) Test(org.junit.jupiter.api.Test)

Example 2 with FeatureLevelRecord

use of org.apache.kafka.common.metadata.FeatureLevelRecord in project kafka by apache.

the class FeaturesImage method write.

public void write(Consumer<List<ApiMessageAndVersion>> out) {
    List<ApiMessageAndVersion> batch = new ArrayList<>();
    for (Entry<String, VersionRange> entry : finalizedVersions.entrySet()) {
        batch.add(new ApiMessageAndVersion(new FeatureLevelRecord().setName(entry.getKey()).setMinFeatureLevel(entry.getValue().min()).setMaxFeatureLevel(entry.getValue().max()), FEATURE_LEVEL_RECORD.highestSupportedVersion()));
    }
    out.accept(batch);
}
Also used : ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) VersionRange(org.apache.kafka.metadata.VersionRange) FeatureLevelRecord(org.apache.kafka.common.metadata.FeatureLevelRecord)

Example 3 with FeatureLevelRecord

use of org.apache.kafka.common.metadata.FeatureLevelRecord in project kafka by apache.

the class FeatureControlManager method updateFeature.

private ApiError updateFeature(String featureName, VersionRange newRange, boolean downgradeable, Map<Integer, Map<String, VersionRange>> brokerFeatures, List<ApiMessageAndVersion> records) {
    if (newRange.min() <= 0) {
        return new ApiError(Errors.INVALID_UPDATE_VERSION, "The lower value for the new range cannot be less than 1.");
    }
    if (newRange.max() <= 0) {
        return new ApiError(Errors.INVALID_UPDATE_VERSION, "The upper value for the new range cannot be less than 1.");
    }
    VersionRange localRange = supportedFeatures.get(featureName);
    if (localRange == null || !localRange.contains(newRange)) {
        return new ApiError(Errors.INVALID_UPDATE_VERSION, "The controller does not support the given feature range.");
    }
    for (Entry<Integer, Map<String, VersionRange>> brokerEntry : brokerFeatures.entrySet()) {
        VersionRange brokerRange = brokerEntry.getValue().get(featureName);
        if (brokerRange == null || !brokerRange.contains(newRange)) {
            return new ApiError(Errors.INVALID_UPDATE_VERSION, "Broker " + brokerEntry.getKey() + " does not support the given " + "feature range.");
        }
    }
    VersionRange currentRange = finalizedVersions.get(featureName);
    if (currentRange != null && currentRange.max() > newRange.max()) {
        if (!downgradeable) {
            return new ApiError(Errors.INVALID_UPDATE_VERSION, "Can't downgrade the maximum version of this feature without " + "setting downgradable to true.");
        }
    }
    records.add(new ApiMessageAndVersion(new FeatureLevelRecord().setName(featureName).setMinFeatureLevel(newRange.min()).setMaxFeatureLevel(newRange.max()), FEATURE_LEVEL_RECORD.highestSupportedVersion()));
    return ApiError.NONE;
}
Also used : ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) VersionRange(org.apache.kafka.metadata.VersionRange) ApiError(org.apache.kafka.common.requests.ApiError) HashMap(java.util.HashMap) TimelineHashMap(org.apache.kafka.timeline.TimelineHashMap) FeatureMap(org.apache.kafka.metadata.FeatureMap) TreeMap(java.util.TreeMap) Map(java.util.Map) FeatureLevelRecord(org.apache.kafka.common.metadata.FeatureLevelRecord)

Example 4 with FeatureLevelRecord

use of org.apache.kafka.common.metadata.FeatureLevelRecord in project kafka by apache.

the class FeatureControlManagerTest method testUpdateFeatures.

@Test
public void testUpdateFeatures() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    snapshotRegistry.getOrCreateSnapshot(-1);
    FeatureControlManager manager = new FeatureControlManager(rangeMap("foo", 1, 2), snapshotRegistry);
    assertEquals(new FeatureMapAndEpoch(new FeatureMap(Collections.emptyMap()), -1), manager.finalizedFeatures(-1));
    assertEquals(ControllerResult.atomicOf(Collections.emptyList(), Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, "The controller does not support the given feature range."))), manager.updateFeatures(rangeMap("foo", 1, 3), Collections.singleton("foo"), Collections.emptyMap()));
    ControllerResult<Map<String, ApiError>> result = manager.updateFeatures(rangeMap("foo", 1, 2, "bar", 1, 1), Collections.emptySet(), Collections.emptyMap());
    Map<String, ApiError> expectedMap = new HashMap<>();
    expectedMap.put("foo", ApiError.NONE);
    expectedMap.put("bar", new ApiError(Errors.INVALID_UPDATE_VERSION, "The controller does not support the given feature range."));
    assertEquals(expectedMap, result.response());
    List<ApiMessageAndVersion> expectedMessages = new ArrayList<>();
    expectedMessages.add(new ApiMessageAndVersion(new FeatureLevelRecord().setName("foo").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 2), (short) 0));
    assertEquals(expectedMessages, result.records());
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LogContext(org.apache.kafka.common.utils.LogContext) FeatureMapAndEpoch(org.apache.kafka.metadata.FeatureMapAndEpoch) FeatureLevelRecord(org.apache.kafka.common.metadata.FeatureLevelRecord) FeatureMap(org.apache.kafka.metadata.FeatureMap) SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ApiError(org.apache.kafka.common.requests.ApiError) HashMap(java.util.HashMap) FeatureMap(org.apache.kafka.metadata.FeatureMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 5 with FeatureLevelRecord

use of org.apache.kafka.common.metadata.FeatureLevelRecord in project kafka by apache.

the class FeatureControlManagerTest method testUpdateFeaturesErrorCases.

@Test
public void testUpdateFeaturesErrorCases() {
    SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
    FeatureControlManager manager = new FeatureControlManager(rangeMap("foo", 1, 5, "bar", 1, 2), snapshotRegistry);
    assertEquals(ControllerResult.atomicOf(Collections.emptyList(), Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, "Broker 5 does not support the given feature range."))), manager.updateFeatures(rangeMap("foo", 1, 3), Collections.singleton("foo"), Collections.singletonMap(5, rangeMap())));
    ControllerResult<Map<String, ApiError>> result = manager.updateFeatures(rangeMap("foo", 1, 3), Collections.emptySet(), Collections.emptyMap());
    assertEquals(Collections.singletonMap("foo", ApiError.NONE), result.response());
    manager.replay((FeatureLevelRecord) result.records().get(0).message());
    snapshotRegistry.getOrCreateSnapshot(3);
    assertEquals(ControllerResult.atomicOf(Collections.emptyList(), Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, "Can't downgrade the maximum version of this feature without " + "setting downgradable to true."))), manager.updateFeatures(rangeMap("foo", 1, 2), Collections.emptySet(), Collections.emptyMap()));
    assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord().setName("foo").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 2), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), manager.updateFeatures(rangeMap("foo", 1, 2), Collections.singleton("foo"), Collections.emptyMap()));
}
Also used : SnapshotRegistry(org.apache.kafka.timeline.SnapshotRegistry) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) LogContext(org.apache.kafka.common.utils.LogContext) ApiError(org.apache.kafka.common.requests.ApiError) HashMap(java.util.HashMap) FeatureMap(org.apache.kafka.metadata.FeatureMap) Map(java.util.Map) FeatureLevelRecord(org.apache.kafka.common.metadata.FeatureLevelRecord) Test(org.junit.jupiter.api.Test)

Aggregations

FeatureLevelRecord (org.apache.kafka.common.metadata.FeatureLevelRecord)6 FeatureMap (org.apache.kafka.metadata.FeatureMap)5 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)5 HashMap (java.util.HashMap)4 Map (java.util.Map)4 LogContext (org.apache.kafka.common.utils.LogContext)4 SnapshotRegistry (org.apache.kafka.timeline.SnapshotRegistry)4 Test (org.junit.jupiter.api.Test)4 ApiError (org.apache.kafka.common.requests.ApiError)3 ArrayList (java.util.ArrayList)2 FeatureMapAndEpoch (org.apache.kafka.metadata.FeatureMapAndEpoch)2 VersionRange (org.apache.kafka.metadata.VersionRange)2 TreeMap (java.util.TreeMap)1 TimelineHashMap (org.apache.kafka.timeline.TimelineHashMap)1