use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarSourceEnumStateSerializer method deserialize.
@Override
public PulsarSourceEnumState deserialize(int version, byte[] serialized) throws IOException {
// VERSION 0 deserialization
try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized);
DataInputStream in = new DataInputStream(bais)) {
Set<TopicPartition> partitions = deserializeSet(in, deserializePartition(version));
Set<PulsarPartitionSplit> splits = deserializeSet(in, deserializeSplit(version));
Map<Integer, Set<PulsarPartitionSplit>> sharedSplits = deserializeMap(in, DataInput::readInt, i -> deserializeSet(i, deserializeSplit(version)));
Map<Integer, Set<String>> mapping = deserializeMap(in, DataInput::readInt, i -> deserializeSet(i, DataInput::readUTF));
boolean initialized = in.readBoolean();
return new PulsarSourceEnumState(partitions, splits, sharedSplits, mapping, initialized);
}
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class SplitsAssignmentState method assignSharedSplits.
/**
* Every split would be shared among available readers.
*/
private Map<Integer, List<PulsarPartitionSplit>> assignSharedSplits(List<Integer> pendingReaders) {
Map<Integer, List<PulsarPartitionSplit>> assignMap = new HashMap<>();
// Drain the splits from share pending list.
for (Integer reader : pendingReaders) {
Set<PulsarPartitionSplit> pendingSplits = sharedPendingPartitionSplits.remove(reader);
if (pendingSplits == null) {
pendingSplits = new HashSet<>();
}
Set<String> assignedSplits = readerAssignedSplits.computeIfAbsent(reader, r -> new HashSet<>());
for (TopicPartition partition : appendedPartitions) {
String partitionName = partition.toString();
if (!assignedSplits.contains(partitionName)) {
pendingSplits.add(createSplit(partition));
assignedSplits.add(partitionName);
}
}
if (!pendingSplits.isEmpty()) {
assignMap.put(reader, new ArrayList<>(pendingSplits));
}
}
return assignMap;
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class SplitsAssignmentState method assignNormalSplits.
// ----------------- private methods -------------------
/**
* The splits don't shared for all the readers.
*/
private Map<Integer, List<PulsarPartitionSplit>> assignNormalSplits(List<Integer> pendingReaders) {
Map<Integer, List<PulsarPartitionSplit>> assignMap = new HashMap<>();
// Drain a list of splits.
List<PulsarPartitionSplit> pendingSplits = drainPendingPartitionsSplits();
for (int i = 0; i < pendingSplits.size(); i++) {
PulsarPartitionSplit split = pendingSplits.get(i);
int readerId = pendingReaders.get(i % pendingReaders.size());
assignMap.computeIfAbsent(readerId, id -> new ArrayList<>()).add(split);
}
return assignMap;
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarFetcherManagerBase method addSplits.
/**
* Override this method for supporting multiple thread fetching, one fetcher thread for one
* split.
*/
@Override
public void addSplits(List<PulsarPartitionSplit> splitsToAdd) {
for (PulsarPartitionSplit split : splitsToAdd) {
SplitFetcher<PulsarMessage<T>, PulsarPartitionSplit> fetcher = getOrCreateFetcher(split.splitId());
fetcher.addSplits(singletonList(split));
// This method could be executed multiple times.
startFetcher(fetcher);
}
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarSourceReaderTestBase method assigningEmptySplits.
@TestTemplate
void assigningEmptySplits(PulsarSourceReaderBase<Integer> reader, Boundedness boundedness, String topicName) throws Exception {
final PulsarPartitionSplit emptySplit = createPartitionSplit(topicName, 0, Boundedness.CONTINUOUS_UNBOUNDED, MessageId.latest);
reader.addSplits(Collections.singletonList(emptySplit));
TestingReaderOutput<Integer> output = new TestingReaderOutput<>();
InputStatus status = reader.pollNext(output);
assertThat(status).isEqualTo(InputStatus.NOTHING_AVAILABLE);
reader.close();
}
Aggregations