use of org.apache.flink.connector.file.table.ContinuousPartitionFetcher in project flink by apache.
the class PartitionMonitorTest method preparePartitionMonitor.
private void preparePartitionMonitor() {
List<List<String>> seenPartitionsSinceOffset = new ArrayList<>();
JobConf jobConf = new JobConf();
Configuration configuration = new Configuration();
ObjectPath tablePath = new ObjectPath("testDb", "testTable");
configuration.setString("streaming-source.consume-order", "create-time");
HiveContinuousPartitionContext<Partition, Long> fetcherContext = new HiveContinuousPartitionContext<Partition, Long>() {
@Override
public HiveTablePartition toHiveTablePartition(Partition partition) {
StorageDescriptor sd = partition.getSd();
Map<String, String> partitionColValues = new HashMap<>();
for (String partCol : partition.getValues()) {
String[] arr = partCol.split("=");
Asserts.check(arr.length == 2, "partition string should be key=value format");
partitionColValues.put(arr[0], arr[1]);
}
return new HiveTablePartition(sd, partitionColValues, new Properties());
}
@Override
public ObjectPath getTablePath() {
return null;
}
@Override
public TypeSerializer<Long> getTypeSerializer() {
return null;
}
@Override
public Long getConsumeStartOffset() {
return null;
}
@Override
public void open() throws Exception {
}
@Override
public Optional<Partition> getPartition(List<String> partValues) throws Exception {
return Optional.empty();
}
@Override
public List<ComparablePartitionValue> getComparablePartitionValueList() throws Exception {
return null;
}
@Override
public void close() throws Exception {
}
};
ContinuousPartitionFetcher<Partition, Long> continuousPartitionFetcher = new ContinuousPartitionFetcher<Partition, Long>() {
private static final long serialVersionUID = 1L;
@Override
public List<Tuple2<Partition, Long>> fetchPartitions(Context<Partition, Long> context, Long previousOffset) throws Exception {
return testPartitionWithOffset.stream().filter(p -> (long) p.getCreateTime() >= previousOffset).map(p -> Tuple2.of(p, (long) p.getCreateTime())).collect(Collectors.toList());
}
@Override
public List<Partition> fetch(PartitionFetcher.Context<Partition> context) throws Exception {
return null;
}
};
partitionMonitor = new ContinuousHiveSplitEnumerator.PartitionMonitor<>(0L, seenPartitionsSinceOffset, tablePath, configuration, jobConf, continuousPartitionFetcher, fetcherContext);
}
Aggregations