use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class FileRecords method writeTo.
@Override
public long writeTo(GatheringByteChannel destChannel, long offset, int length) throws IOException {
long newSize = Math.min(channel.size(), end) - start;
int oldSize = sizeInBytes();
if (newSize < oldSize)
throw new KafkaException(String.format("Size of FileRecords %s has been truncated during write: old size %d, new size %d", file.getAbsolutePath(), oldSize, newSize));
long position = start + offset;
int count = Math.min(length, oldSize);
final long bytesTransferred;
if (destChannel instanceof TransportLayer) {
TransportLayer tl = (TransportLayer) destChannel;
bytesTransferred = tl.transferFrom(channel, position, count);
} else {
bytesTransferred = channel.transferTo(position, count, destChannel);
}
return bytesTransferred;
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordBatch method compressedIterator.
private CloseableIterator<Record> compressedIterator(BufferSupplier bufferSupplier) {
final ByteBuffer buffer = this.buffer.duplicate();
buffer.position(RECORDS_OFFSET);
final DataInputStream inputStream = new DataInputStream(compressionType().wrapForInput(buffer, magic(), bufferSupplier));
return new RecordIterator() {
@Override
protected Record readNext(long baseOffset, long firstTimestamp, int baseSequence, Long logAppendTime) {
try {
return DefaultRecord.readFrom(inputStream, baseOffset, firstTimestamp, baseSequence, logAppendTime);
} catch (EOFException e) {
throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached");
} catch (IOException e) {
throw new KafkaException("Failed to decompress record stream", e);
}
}
@Override
protected boolean ensureNoneRemaining() {
try {
return inputStream.read() == -1;
} catch (IOException e) {
throw new KafkaException("Error checking for remaining bytes after reading batch", e);
}
}
@Override
public void close() {
try {
inputStream.close();
} catch (IOException e) {
throw new KafkaException("Failed to close record stream", e);
}
}
};
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class LegacyRecord method write.
private static void write(ByteBuffer buffer, byte magic, long timestamp, ByteBuffer key, ByteBuffer value, CompressionType compressionType, TimestampType timestampType) {
try {
DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
write(out, magic, timestamp, key, value, compressionType, timestampType);
} catch (IOException e) {
throw new KafkaException(e);
}
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class PlaintextChannelBuilder method buildChannel.
@Override
public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize, MemoryPool memoryPool) throws KafkaException {
try {
PlaintextTransportLayer transportLayer = new PlaintextTransportLayer(key);
PlaintextAuthenticator authenticator = new PlaintextAuthenticator(configs, transportLayer);
return new KafkaChannel(id, transportLayer, authenticator, maxReceiveSize, memoryPool != null ? memoryPool : MemoryPool.NONE);
} catch (Exception e) {
log.warn("Failed to create channel due to ", e);
throw new KafkaException(e);
}
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class SaslChannelBuilder method configure.
@Override
public void configure(Map<String, ?> configs) throws KafkaException {
try {
this.configs = configs;
boolean hasKerberos = jaasContexts.containsKey(SaslConfigs.GSSAPI_MECHANISM);
if (hasKerberos) {
String defaultRealm;
try {
defaultRealm = defaultKerberosRealm();
} catch (Exception ke) {
defaultRealm = "";
}
@SuppressWarnings("unchecked") List<String> principalToLocalRules = (List<String>) configs.get(BrokerSecurityConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG);
if (principalToLocalRules != null)
kerberosShortNamer = KerberosShortNamer.fromUnparsedRules(defaultRealm, principalToLocalRules);
}
for (Map.Entry<String, JaasContext> entry : jaasContexts.entrySet()) {
String mechanism = entry.getKey();
// With static JAAS configuration, use KerberosLogin if Kerberos is enabled. With dynamic JAAS configuration,
// use KerberosLogin only for the LoginContext corresponding to GSSAPI
LoginManager loginManager = LoginManager.acquireLoginManager(entry.getValue(), mechanism, hasKerberos, configs);
loginManagers.put(mechanism, loginManager);
subjects.put(mechanism, loginManager.subject());
}
if (this.securityProtocol == SecurityProtocol.SASL_SSL) {
// Disable SSL client authentication as we are using SASL authentication
this.sslFactory = new SslFactory(mode, "none", isInterBrokerListener);
this.sslFactory.configure(configs);
}
} catch (Exception e) {
close();
throw new KafkaException(e);
}
}
Aggregations