use of org.apache.kafka.common.utils.ByteBufferInputStream in project apache-kafka-on-k8s by banzaicloud.
the class AssignmentInfo method decode.
/**
* @throws TaskAssignmentException if method fails to decode the data or if the data version is unknown
*/
public static AssignmentInfo decode(final ByteBuffer data) {
// ensure we are at the beginning of the ByteBuffer
data.rewind();
try (final DataInputStream in = new DataInputStream(new ByteBufferInputStream(data))) {
// decode used version
final int usedVersion = in.readInt();
final AssignmentInfo assignmentInfo = new AssignmentInfo(usedVersion);
switch(usedVersion) {
case 1:
decodeVersionOneData(assignmentInfo, in);
break;
case 2:
decodeVersionTwoData(assignmentInfo, in);
break;
default:
TaskAssignmentException fatalException = new TaskAssignmentException("Unable to decode subscription data: " + "used version: " + usedVersion + "; latest supported version: " + LATEST_SUPPORTED_VERSION);
log.error(fatalException.getMessage(), fatalException);
throw fatalException;
}
return assignmentInfo;
} catch (final IOException ex) {
throw new TaskAssignmentException("Failed to decode AssignmentInfo", ex);
}
}
use of org.apache.kafka.common.utils.ByteBufferInputStream in project kafka by apache.
the class DefaultRecordTest method testInvalidValueSizePartial.
@Test
public void testInvalidValueSizePartial() throws IOException {
byte attributes = 0;
long timestampDelta = 2;
int offsetDelta = 1;
int sizeOfBodyInBytes = 100;
// use a value size larger than the full message
int valueSize = 105;
ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes));
ByteUtils.writeVarint(sizeOfBodyInBytes, buf);
buf.put(attributes);
ByteUtils.writeVarlong(timestampDelta, buf);
ByteUtils.writeVarint(offsetDelta, buf);
// null key
ByteUtils.writeVarint(-1, buf);
ByteUtils.writeVarint(valueSize, buf);
buf.position(buf.limit());
buf.flip();
DataInputStream inputStream = new DataInputStream(new ByteBufferInputStream(buf));
assertThrows(InvalidRecordException.class, () -> DefaultRecord.readPartiallyFrom(inputStream, skipArray, 0L, 0L, RecordBatch.NO_SEQUENCE, null));
}
use of org.apache.kafka.common.utils.ByteBufferInputStream in project kafka by apache.
the class DefaultRecordTest method testInvalidKeySizePartial.
@Test
public void testInvalidKeySizePartial() {
byte attributes = 0;
long timestampDelta = 2;
int offsetDelta = 1;
int sizeOfBodyInBytes = 100;
// use a key size larger than the full message
int keySize = 105;
ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes));
ByteUtils.writeVarint(sizeOfBodyInBytes, buf);
buf.put(attributes);
ByteUtils.writeVarlong(timestampDelta, buf);
ByteUtils.writeVarint(offsetDelta, buf);
ByteUtils.writeVarint(keySize, buf);
buf.position(buf.limit());
buf.flip();
DataInputStream inputStream = new DataInputStream(new ByteBufferInputStream(buf));
assertThrows(InvalidRecordException.class, () -> DefaultRecord.readPartiallyFrom(inputStream, skipArray, 0L, 0L, RecordBatch.NO_SEQUENCE, null));
}
use of org.apache.kafka.common.utils.ByteBufferInputStream in project kafka by apache.
the class AssignmentInfo method decode.
/**
* @throws TaskAssignmentException if method fails to decode the data or if the data version is unknown
*/
public static AssignmentInfo decode(final ByteBuffer data) {
// ensure we are at the beginning of the ByteBuffer
data.rewind();
try (final DataInputStream in = new DataInputStream(new ByteBufferInputStream(data))) {
final AssignmentInfo assignmentInfo;
final int usedVersion = in.readInt();
final int commonlySupportedVersion;
switch(usedVersion) {
case 1:
assignmentInfo = new AssignmentInfo(usedVersion, UNKNOWN);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
assignmentInfo.partitionsByHost = new HashMap<>();
break;
case 2:
assignmentInfo = new AssignmentInfo(usedVersion, UNKNOWN);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
decodePartitionsByHost(assignmentInfo, in);
break;
case 3:
commonlySupportedVersion = in.readInt();
assignmentInfo = new AssignmentInfo(usedVersion, commonlySupportedVersion);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
decodePartitionsByHost(assignmentInfo, in);
break;
case 4:
commonlySupportedVersion = in.readInt();
assignmentInfo = new AssignmentInfo(usedVersion, commonlySupportedVersion);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
decodePartitionsByHost(assignmentInfo, in);
assignmentInfo.errCode = in.readInt();
break;
case 5:
commonlySupportedVersion = in.readInt();
assignmentInfo = new AssignmentInfo(usedVersion, commonlySupportedVersion);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
decodePartitionsByHostUsingDictionary(assignmentInfo, in);
assignmentInfo.errCode = in.readInt();
break;
case 6:
commonlySupportedVersion = in.readInt();
assignmentInfo = new AssignmentInfo(usedVersion, commonlySupportedVersion);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
decodeActiveAndStandbyHostPartitions(assignmentInfo, in);
assignmentInfo.errCode = in.readInt();
break;
case 7:
case 8:
case 9:
case 10:
commonlySupportedVersion = in.readInt();
assignmentInfo = new AssignmentInfo(usedVersion, commonlySupportedVersion);
decodeActiveTasks(assignmentInfo, in);
decodeStandbyTasks(assignmentInfo, in);
decodeActiveAndStandbyHostPartitions(assignmentInfo, in);
assignmentInfo.errCode = in.readInt();
assignmentInfo.nextRebalanceMs = in.readLong();
break;
default:
final TaskAssignmentException fatalException = new TaskAssignmentException("Unable to decode assignment data: " + "used version: " + usedVersion + "; latest supported version: " + LATEST_SUPPORTED_VERSION);
log.error(fatalException.getMessage(), fatalException);
throw fatalException;
}
return assignmentInfo;
} catch (final IOException ex) {
throw new TaskAssignmentException("Failed to decode AssignmentInfo", ex);
}
}
use of org.apache.kafka.common.utils.ByteBufferInputStream in project kafka by apache.
the class DefaultRecordTest method testNullHeaderKeyPartial.
@Test
public void testNullHeaderKeyPartial() {
byte attributes = 0;
long timestampDelta = 2;
int offsetDelta = 1;
int sizeOfBodyInBytes = 100;
ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes));
ByteUtils.writeVarint(sizeOfBodyInBytes, buf);
buf.put(attributes);
ByteUtils.writeVarlong(timestampDelta, buf);
ByteUtils.writeVarint(offsetDelta, buf);
// null key
ByteUtils.writeVarint(-1, buf);
// null value
ByteUtils.writeVarint(-1, buf);
ByteUtils.writeVarint(1, buf);
// null header key not allowed
ByteUtils.writeVarint(-1, buf);
buf.position(buf.limit());
buf.flip();
DataInputStream inputStream = new DataInputStream(new ByteBufferInputStream(buf));
assertThrows(InvalidRecordException.class, () -> DefaultRecord.readPartiallyFrom(inputStream, skipArray, 0L, 0L, RecordBatch.NO_SEQUENCE, null));
}
Aggregations