use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class ConfigurationControlManager method legacyAlterConfigResource.
private void legacyAlterConfigResource(ConfigResource configResource, Map<String, String> newConfigs, Consumer<ConfigResource> existenceChecker, List<ApiMessageAndVersion> outputRecords, Map<ConfigResource, ApiError> outputResults) {
List<ApiMessageAndVersion> newRecords = new ArrayList<>();
Map<String, String> currentConfigs = configData.get(configResource);
if (currentConfigs == null) {
currentConfigs = Collections.emptyMap();
}
for (Entry<String, String> entry : newConfigs.entrySet()) {
String key = entry.getKey();
String newValue = entry.getValue();
String currentValue = currentConfigs.get(key);
if (!Objects.equals(newValue, currentValue)) {
newRecords.add(new ApiMessageAndVersion(new ConfigRecord().setResourceType(configResource.type().id()).setResourceName(configResource.name()).setName(key).setValue(newValue), CONFIG_RECORD.highestSupportedVersion()));
}
}
for (String key : currentConfigs.keySet()) {
if (!newConfigs.containsKey(key)) {
newRecords.add(new ApiMessageAndVersion(new ConfigRecord().setResourceType(configResource.type().id()).setResourceName(configResource.name()).setName(key).setValue(null), CONFIG_RECORD.highestSupportedVersion()));
}
}
ApiError error = validateAlterConfig(configResource, newRecords, existenceChecker);
if (error.isFailure()) {
outputResults.put(configResource, error);
return;
}
outputRecords.addAll(newRecords);
outputResults.put(configResource, ApiError.NONE);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class AbstractApiMessageSerde method read.
@Override
public ApiMessageAndVersion read(Readable input, int size) {
short frameVersion = unsignedIntToShort(input, "frame version");
if (frameVersion == 0) {
throw new MetadataParseException("Could not deserialize metadata record with frame version 0. " + "Note that upgrades from the preview release of KRaft in 2.8 to newer versions are not supported.");
} else if (frameVersion != DEFAULT_FRAME_VERSION) {
throw new MetadataParseException("Could not deserialize metadata record due to unknown frame version " + frameVersion + "(only frame version " + DEFAULT_FRAME_VERSION + " is supported)");
}
short apiKey = unsignedIntToShort(input, "type");
short version = unsignedIntToShort(input, "version");
ApiMessage record;
try {
record = apiMessageFor(apiKey);
} catch (Exception e) {
throw new MetadataParseException(e);
}
try {
record.read(input, version);
} catch (Exception e) {
throw new MetadataParseException("Failed to deserialize record with type " + apiKey, e);
}
if (input.remaining() > 0) {
throw new MetadataParseException("Found " + input.remaining() + " byte(s) of garbage after " + apiKey);
}
return new ApiMessageAndVersion(record, version);
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class LocalLogManagerTest method testCommits.
/**
* Test that all the log managers see all the commits.
*/
@Test
public void testCommits() throws Exception {
try (LocalLogManagerTestEnv env = LocalLogManagerTestEnv.createWithMockListeners(3, Optional.empty())) {
LeaderAndEpoch leaderInfo = env.waitForLeader();
int leaderId = leaderInfo.leaderId().orElseThrow(() -> new AssertionError("Current leader is undefined"));
LocalLogManager activeLogManager = env.logManagers().get(leaderId);
int epoch = activeLogManager.leaderAndEpoch().epoch();
List<ApiMessageAndVersion> messages = Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(0), (short) 0), new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(1), (short) 0), new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(2), (short) 0));
assertEquals(3, activeLogManager.scheduleAppend(epoch, messages));
for (LocalLogManager logManager : env.logManagers()) {
waitForLastCommittedOffset(3, logManager);
}
List<MockMetaLogManagerListener> listeners = env.logManagers().stream().map(m -> (MockMetaLogManagerListener) m.listeners().get(0)).collect(Collectors.toList());
env.close();
for (MockMetaLogManagerListener listener : listeners) {
List<String> events = listener.serializedEvents();
assertEquals(SHUTDOWN, events.get(events.size() - 1));
int foundIndex = 0;
for (String event : events) {
if (event.startsWith(COMMIT)) {
assertEquals(messages.get(foundIndex).message().toString(), event.substring(COMMIT.length() + 1));
foundIndex++;
}
}
assertEquals(messages.size(), foundIndex);
}
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class MockMetaLogManagerListener method handleCommit.
@Override
public synchronized void handleCommit(BatchReader<ApiMessageAndVersion> reader) {
try {
while (reader.hasNext()) {
Batch<ApiMessageAndVersion> batch = reader.next();
long lastCommittedOffset = batch.lastOffset();
for (ApiMessageAndVersion messageAndVersion : batch.records()) {
ApiMessage message = messageAndVersion.message();
StringBuilder bld = new StringBuilder();
bld.append(COMMIT).append(" ").append(message.toString());
serializedEvents.add(bld.toString());
}
StringBuilder bld = new StringBuilder();
bld.append(LAST_COMMITTED_OFFSET).append(" ").append(lastCommittedOffset);
serializedEvents.add(bld.toString());
}
} finally {
reader.close();
}
}
use of org.apache.kafka.server.common.ApiMessageAndVersion in project kafka by apache.
the class MockMetaLogManagerListener method handleSnapshot.
@Override
public synchronized void handleSnapshot(SnapshotReader<ApiMessageAndVersion> reader) {
long lastCommittedOffset = reader.lastContainedLogOffset();
try {
while (reader.hasNext()) {
Batch<ApiMessageAndVersion> batch = reader.next();
for (ApiMessageAndVersion messageAndVersion : batch.records()) {
ApiMessage message = messageAndVersion.message();
StringBuilder bld = new StringBuilder();
bld.append(SNAPSHOT).append(" ").append(message.toString());
serializedEvents.add(bld.toString());
}
StringBuilder bld = new StringBuilder();
bld.append(LAST_COMMITTED_OFFSET).append(" ").append(lastCommittedOffset);
serializedEvents.add(bld.toString());
}
} finally {
reader.close();
}
}
Aggregations