Search in sources :

Example 36 with ApiMessageAndVersion

use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.

the class ConfigurationControlManager method legacyAlterConfigResource.

private void legacyAlterConfigResource(ConfigResource configResource, Map<String, String> newConfigs, Consumer<ConfigResource> existenceChecker, List<ApiMessageAndVersion> outputRecords, Map<ConfigResource, ApiError> outputResults) {
    List<ApiMessageAndVersion> newRecords = new ArrayList<>();
    Map<String, String> currentConfigs = configData.get(configResource);
    if (currentConfigs == null) {
        currentConfigs = Collections.emptyMap();
    }
    for (Entry<String, String> entry : newConfigs.entrySet()) {
        String key = entry.getKey();
        String newValue = entry.getValue();
        String currentValue = currentConfigs.get(key);
        if (!Objects.equals(newValue, currentValue)) {
            newRecords.add(new ApiMessageAndVersion(new ConfigRecord().setResourceType(configResource.type().id()).setResourceName(configResource.name()).setName(key).setValue(newValue), CONFIG_RECORD.highestSupportedVersion()));
        }
    }
    for (String key : currentConfigs.keySet()) {
        if (!newConfigs.containsKey(key)) {
            newRecords.add(new ApiMessageAndVersion(new ConfigRecord().setResourceType(configResource.type().id()).setResourceName(configResource.name()).setName(key).setValue(null), CONFIG_RECORD.highestSupportedVersion()));
        }
    }
    ApiError error = validateAlterConfig(configResource, newRecords, existenceChecker);
    if (error.isFailure()) {
        outputResults.put(configResource, error);
        return;
    }
    outputRecords.addAll(newRecords);
    outputResults.put(configResource, ApiError.NONE);
}
Also used : ConfigRecord(org.apache.kafka.common.metadata.ConfigRecord) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) ApiError(org.apache.kafka.common.requests.ApiError)

Example 37 with ApiMessageAndVersion

use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.

the class AbstractApiMessageSerde method read.

@Override
public ApiMessageAndVersion read(Readable input, int size) {
    short frameVersion = unsignedIntToShort(input, "frame version");
    if (frameVersion == 0) {
        throw new MetadataParseException("Could not deserialize metadata record with frame version 0. " + "Note that upgrades from the preview release of KRaft in 2.8 to newer versions are not supported.");
    } else if (frameVersion != DEFAULT_FRAME_VERSION) {
        throw new MetadataParseException("Could not deserialize metadata record due to unknown frame version " + frameVersion + "(only frame version " + DEFAULT_FRAME_VERSION + " is supported)");
    }
    short apiKey = unsignedIntToShort(input, "type");
    short version = unsignedIntToShort(input, "version");
    ApiMessage record;
    try {
        record = apiMessageFor(apiKey);
    } catch (Exception e) {
        throw new MetadataParseException(e);
    }
    try {
        record.read(input, version);
    } catch (Exception e) {
        throw new MetadataParseException("Failed to deserialize record with type " + apiKey, e);
    }
    if (input.remaining() > 0) {
        throw new MetadataParseException("Found " + input.remaining() + " byte(s) of garbage after " + apiKey);
    }
    return new ApiMessageAndVersion(record, version);
}
Also used : ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ApiMessage(org.apache.kafka.common.protocol.ApiMessage)

Example 38 with ApiMessageAndVersion

use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.

the class LocalLogManagerTest method testCommits.

/**
 * Test that all the log managers see all the commits.
 */
@Test
public void testCommits() throws Exception {
    try (LocalLogManagerTestEnv env = LocalLogManagerTestEnv.createWithMockListeners(3, Optional.empty())) {
        LeaderAndEpoch leaderInfo = env.waitForLeader();
        int leaderId = leaderInfo.leaderId().orElseThrow(() -> new AssertionError("Current leader is undefined"));
        LocalLogManager activeLogManager = env.logManagers().get(leaderId);
        int epoch = activeLogManager.leaderAndEpoch().epoch();
        List<ApiMessageAndVersion> messages = Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(0), (short) 0), new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(1), (short) 0), new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(2), (short) 0));
        assertEquals(3, activeLogManager.scheduleAppend(epoch, messages));
        for (LocalLogManager logManager : env.logManagers()) {
            waitForLastCommittedOffset(3, logManager);
        }
        List<MockMetaLogManagerListener> listeners = env.logManagers().stream().map(m -> (MockMetaLogManagerListener) m.listeners().get(0)).collect(Collectors.toList());
        env.close();
        for (MockMetaLogManagerListener listener : listeners) {
            List<String> events = listener.serializedEvents();
            assertEquals(SHUTDOWN, events.get(events.size() - 1));
            int foundIndex = 0;
            for (String event : events) {
                if (event.startsWith(COMMIT)) {
                    assertEquals(messages.get(foundIndex).message().toString(), event.substring(COMMIT.length() + 1));
                    foundIndex++;
                }
            }
            assertEquals(messages.size(), foundIndex);
        }
    }
}
Also used : Arrays(java.util.Arrays) TestUtils(org.apache.kafka.test.TestUtils) LAST_COMMITTED_OFFSET(org.apache.kafka.metalog.MockMetaLogManagerListener.LAST_COMMITTED_OFFSET) OptionalInt(java.util.OptionalInt) Collectors(java.util.stream.Collectors) SHUTDOWN(org.apache.kafka.metalog.MockMetaLogManagerListener.SHUTDOWN) Test(org.junit.jupiter.api.Test) List(java.util.List) RegisterBrokerRecord(org.apache.kafka.common.metadata.RegisterBrokerRecord) COMMIT(org.apache.kafka.metalog.MockMetaLogManagerListener.COMMIT) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) Optional(java.util.Optional) LeaderAndEpoch(org.apache.kafka.raft.LeaderAndEpoch) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Timeout(org.junit.jupiter.api.Timeout) RegisterBrokerRecord(org.apache.kafka.common.metadata.RegisterBrokerRecord) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) LeaderAndEpoch(org.apache.kafka.raft.LeaderAndEpoch) Test(org.junit.jupiter.api.Test)

Example 39 with ApiMessageAndVersion

use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.

the class MockMetaLogManagerListener method handleCommit.

@Override
public synchronized void handleCommit(BatchReader<ApiMessageAndVersion> reader) {
    try {
        while (reader.hasNext()) {
            Batch<ApiMessageAndVersion> batch = reader.next();
            long lastCommittedOffset = batch.lastOffset();
            for (ApiMessageAndVersion messageAndVersion : batch.records()) {
                ApiMessage message = messageAndVersion.message();
                StringBuilder bld = new StringBuilder();
                bld.append(COMMIT).append(" ").append(message.toString());
                serializedEvents.add(bld.toString());
            }
            StringBuilder bld = new StringBuilder();
            bld.append(LAST_COMMITTED_OFFSET).append(" ").append(lastCommittedOffset);
            serializedEvents.add(bld.toString());
        }
    } finally {
        reader.close();
    }
}
Also used : ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ApiMessage(org.apache.kafka.common.protocol.ApiMessage)

Example 40 with ApiMessageAndVersion

use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.

the class MockMetaLogManagerListener method handleSnapshot.

@Override
public synchronized void handleSnapshot(SnapshotReader<ApiMessageAndVersion> reader) {
    long lastCommittedOffset = reader.lastContainedLogOffset();
    try {
        while (reader.hasNext()) {
            Batch<ApiMessageAndVersion> batch = reader.next();
            for (ApiMessageAndVersion messageAndVersion : batch.records()) {
                ApiMessage message = messageAndVersion.message();
                StringBuilder bld = new StringBuilder();
                bld.append(SNAPSHOT).append(" ").append(message.toString());
                serializedEvents.add(bld.toString());
            }
            StringBuilder bld = new StringBuilder();
            bld.append(LAST_COMMITTED_OFFSET).append(" ").append(lastCommittedOffset);
            serializedEvents.add(bld.toString());
        }
    } finally {
        reader.close();
    }
}
Also used : ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ApiMessage(org.apache.kafka.common.protocol.ApiMessage)

Aggregations

ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)84 ArrayList (java.util.ArrayList)38 Test (org.junit.jupiter.api.Test)35 Uuid (org.apache.kafka.common.Uuid)23 ApiError (org.apache.kafka.common.requests.ApiError)20 LogContext (org.apache.kafka.common.utils.LogContext)17 HashMap (java.util.HashMap)16 SnapshotRegistry (org.apache.kafka.timeline.SnapshotRegistry)15 List (java.util.List)12 Map (java.util.Map)12 PartitionChangeRecord (org.apache.kafka.common.metadata.PartitionChangeRecord)12 PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)11 TopicRecord (org.apache.kafka.common.metadata.TopicRecord)8 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)7 AlterIsrRequestData (org.apache.kafka.common.message.AlterIsrRequestData)7 Collections (java.util.Collections)6 Iterator (java.util.Iterator)6 Entry (java.util.Map.Entry)6 NoSuchElementException (java.util.NoSuchElementException)6 Optional (java.util.Optional)6