use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.
the class FeatureControlManagerTest method testUpdateFeaturesErrorCases.
@Test
public void testUpdateFeaturesErrorCases() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager manager = new FeatureControlManager(rangeMap("foo", 1, 5, "bar", 1, 2), snapshotRegistry);
assertEquals(ControllerResult.atomicOf(Collections.emptyList(), Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, "Broker 5 does not support the given feature range."))), manager.updateFeatures(rangeMap("foo", 1, 3), Collections.singleton("foo"), Collections.singletonMap(5, rangeMap())));
ControllerResult<Map<String, ApiError>> result = manager.updateFeatures(rangeMap("foo", 1, 3), Collections.emptySet(), Collections.emptyMap());
assertEquals(Collections.singletonMap("foo", ApiError.NONE), result.response());
manager.replay((FeatureLevelRecord) result.records().get(0).message());
snapshotRegistry.getOrCreateSnapshot(3);
assertEquals(ControllerResult.atomicOf(Collections.emptyList(), Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, "Can't downgrade the maximum version of this feature without " + "setting downgradable to true."))), manager.updateFeatures(rangeMap("foo", 1, 2), Collections.emptySet(), Collections.emptyMap()));
assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord().setName("foo").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 2), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), manager.updateFeatures(rangeMap("foo", 1, 2), Collections.singleton("foo"), Collections.emptyMap()));
}
use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.
the class FeatureControlManagerTest method testFeatureControlIterator.
@Test
public void testFeatureControlIterator() throws Exception {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager manager = new FeatureControlManager(rangeMap("foo", 1, 5, "bar", 1, 2), snapshotRegistry);
ControllerResult<Map<String, ApiError>> result = manager.updateFeatures(rangeMap("foo", 1, 5, "bar", 1, 1), Collections.emptySet(), Collections.emptyMap());
RecordTestUtils.replayAll(manager, result.records());
RecordTestUtils.assertBatchIteratorContains(Arrays.asList(Arrays.asList(new ApiMessageAndVersion(new FeatureLevelRecord().setName("foo").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 5), (short) 0)), Arrays.asList(new ApiMessageAndVersion(new FeatureLevelRecord().setName("bar").setMinFeatureLevel((short) 1).setMaxFeatureLevel((short) 1), (short) 0))), manager.iterator(Long.MAX_VALUE));
}
use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.
the class AclControlManagerTest method testLoadSnapshot.
@Test
public void testLoadSnapshot() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
snapshotRegistry.getOrCreateSnapshot(0);
AclControlManager manager = new AclControlManager(snapshotRegistry, Optional.empty());
// Load TEST_ACLS into the AclControlManager.
Set<ApiMessageAndVersion> loadedAcls = new HashSet<>();
for (StandardAclWithId acl : TEST_ACLS) {
AccessControlEntryRecord record = acl.toRecord();
assertTrue(loadedAcls.add(new ApiMessageAndVersion(record, (short) 0)));
manager.replay(acl.toRecord(), Optional.empty());
}
// Verify that the ACLs stored in the AclControlManager match the ones we expect.
Set<ApiMessageAndVersion> foundAcls = new HashSet<>();
for (Iterator<List<ApiMessageAndVersion>> iterator = manager.iterator(Long.MAX_VALUE); iterator.hasNext(); ) {
for (ApiMessageAndVersion apiMessageAndVersion : iterator.next()) {
assertTrue(foundAcls.add(apiMessageAndVersion));
}
}
assertEquals(loadedAcls, foundAcls);
// Once we complete the snapshot load, the ACLs should be reflected in the authorizer.
MockClusterMetadataAuthorizer authorizer = new MockClusterMetadataAuthorizer();
authorizer.loadSnapshot(manager.idToAcl());
assertEquals(new HashSet<>(StandardAclTest.TEST_ACLS), new HashSet<>(authorizer.acls.values()));
// Test reverting to an empty state and then completing the snapshot load without
// setting an authorizer. This simulates the case where the user didn't configure
// a cluster metadata authorizer.
snapshotRegistry.revertToSnapshot(0);
authorizer.loadSnapshot(manager.idToAcl());
assertFalse(manager.iterator(Long.MAX_VALUE).hasNext());
}
use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.
the class AclControlManagerTest method testCreateAclDeleteAcl.
@Test
public void testCreateAclDeleteAcl() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
AclControlManager manager = new AclControlManager(snapshotRegistry, Optional.empty());
MockClusterMetadataAuthorizer authorizer = new MockClusterMetadataAuthorizer();
authorizer.loadSnapshot(manager.idToAcl());
List<AclBinding> toCreate = new ArrayList<>();
for (int i = 0; i < 3; i++) {
toCreate.add(TEST_ACLS.get(i).toBinding());
}
toCreate.add(new AclBinding(new ResourcePattern(TOPIC, "*", PatternType.UNKNOWN), new AccessControlEntry("User:*", "*", ALTER, ALLOW)));
ControllerResult<List<AclCreateResult>> createResult = manager.createAcls(toCreate);
List<AclCreateResult> expectedResults = new ArrayList<>();
for (int i = 0; i < 3; i++) {
expectedResults.add(AclCreateResult.SUCCESS);
}
expectedResults.add(new AclCreateResult(new InvalidRequestException("Invalid patternType UNKNOWN")));
for (int i = 0; i < expectedResults.size(); i++) {
AclCreateResult expectedResult = expectedResults.get(i);
if (expectedResult.exception().isPresent()) {
assertEquals(expectedResult.exception().get().getMessage(), createResult.response().get(i).exception().get().getMessage());
} else {
assertFalse(createResult.response().get(i).exception().isPresent());
}
}
RecordTestUtils.replayAll(manager, createResult.records());
assertTrue(manager.iterator(Long.MAX_VALUE).hasNext());
ControllerResult<List<AclDeleteResult>> deleteResult = manager.deleteAcls(Arrays.asList(new AclBindingFilter(new ResourcePatternFilter(ResourceType.ANY, null, LITERAL), AccessControlEntryFilter.ANY), new AclBindingFilter(new ResourcePatternFilter(ResourceType.UNKNOWN, null, LITERAL), AccessControlEntryFilter.ANY)));
assertEquals(2, deleteResult.response().size());
Set<AclBinding> deleted = new HashSet<>();
for (AclDeleteResult.AclBindingDeleteResult result : deleteResult.response().get(0).aclBindingDeleteResults()) {
assertEquals(Optional.empty(), result.exception());
deleted.add(result.aclBinding());
}
assertEquals(new HashSet<>(Arrays.asList(TEST_ACLS.get(0).toBinding(), TEST_ACLS.get(2).toBinding())), deleted);
assertEquals(InvalidRequestException.class, deleteResult.response().get(1).exception().get().getClass());
RecordTestUtils.replayAll(manager, deleteResult.records());
Iterator<List<ApiMessageAndVersion>> iterator = manager.iterator(Long.MAX_VALUE);
assertTrue(iterator.hasNext());
List<ApiMessageAndVersion> list = iterator.next();
assertEquals(1, list.size());
assertEquals(TEST_ACLS.get(1).toBinding(), StandardAcl.fromRecord((AccessControlEntryRecord) list.get(0).message()).toBinding());
assertFalse(iterator.hasNext());
}
use of org.apache.kafka.timeline.SnapshotRegistry in project kafka by apache.
the class BrokersToIsrsTest method testLeadersOnlyIterator.
@Test
public void testLeadersOnlyIterator() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
BrokersToIsrs brokersToIsrs = new BrokersToIsrs(snapshotRegistry);
brokersToIsrs.update(UUIDS[0], 0, null, new int[] { 1, 2, 3 }, -1, 1);
brokersToIsrs.update(UUIDS[1], 1, null, new int[] { 2, 3, 4 }, -1, 4);
assertEquals(toSet(new TopicIdPartition(UUIDS[0], 0)), toSet(brokersToIsrs.iterator(1, true)));
assertEquals(toSet(), toSet(brokersToIsrs.iterator(2, true)));
assertEquals(toSet(new TopicIdPartition(UUIDS[1], 1)), toSet(brokersToIsrs.iterator(4, true)));
brokersToIsrs.update(UUIDS[0], 0, new int[] { 1, 2, 3 }, new int[] { 1, 2, 3 }, 1, 2);
assertEquals(toSet(), toSet(brokersToIsrs.iterator(1, true)));
assertEquals(toSet(new TopicIdPartition(UUIDS[0], 0)), toSet(brokersToIsrs.iterator(2, true)));
}
Aggregations