use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class StateChangeFormat method read.
@Override
public CloseableIterator<StateChange> read(StreamStateHandle handle, long offset) throws IOException {
FSDataInputStream stream = handle.openInputStream();
DataInputViewStreamWrapper input = wrap(stream);
if (stream.getPos() != offset) {
LOG.debug("seek from {} to {}", stream.getPos(), offset);
input.skipBytesToRead((int) offset);
}
return new CloseableIterator<StateChange>() {
int numUnreadGroups = input.readInt();
int numLeftInGroup = numUnreadGroups-- == 0 ? 0 : input.readInt();
int keyGroup = numLeftInGroup == 0 ? 0 : input.readInt();
@Override
public boolean hasNext() {
advance();
return numLeftInGroup > 0;
}
private void advance() {
if (numLeftInGroup == 0 && numUnreadGroups > 0) {
numUnreadGroups--;
try {
numLeftInGroup = input.readInt();
keyGroup = input.readInt();
} catch (IOException e) {
ExceptionUtils.rethrow(e);
}
}
}
@Override
public StateChange next() {
advance();
if (numLeftInGroup == 0) {
throw new NoSuchElementException();
}
numLeftInGroup--;
try {
return readChange();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private StateChange readChange() throws IOException {
int size = input.readInt();
byte[] bytes = new byte[size];
IOUtils.readFully(input, bytes, 0, size);
return new StateChange(keyGroup, bytes);
}
@Override
public void close() throws Exception {
LOG.trace("close {}", stream);
stream.close();
}
};
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class BinaryInputFormat method createStatistics.
/**
* Fill in the statistics. The last modification time and the total input size are prefilled.
*
* @param files The files that are associated with this block input format.
* @param stats The pre-filled statistics.
*/
protected SequentialStatistics createStatistics(List<FileStatus> files, FileBaseStatistics stats) throws IOException {
if (files.isEmpty()) {
return null;
}
BlockInfo blockInfo = new BlockInfo();
long totalCount = 0;
for (FileStatus file : files) {
// invalid file
if (file.getLen() < blockInfo.getInfoSize()) {
continue;
}
FileSystem fs = file.getPath().getFileSystem();
try (FSDataInputStream fdis = fs.open(file.getPath(), blockInfo.getInfoSize())) {
fdis.seek(file.getLen() - blockInfo.getInfoSize());
blockInfo.read(new DataInputViewStreamWrapper(fdis));
totalCount += blockInfo.getAccumulatedRecordCount();
}
}
final float avgWidth = totalCount == 0 ? 0 : ((float) stats.getTotalInputSize() / totalCount);
return new SequentialStatistics(stats.getLastModificationTime(), stats.getTotalInputSize(), avgWidth, totalCount);
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class AvroParquetRecordFormatTest method restoreReader.
private <T> StreamFormat.Reader<T> restoreReader(AvroParquetRecordFormat<T> format, Configuration config, Path filePath, long restoredOffset, long splitOffset, long splitLength) throws IOException {
final FileSystem fileSystem = filePath.getFileSystem();
final FileStatus fileStatus = fileSystem.getFileStatus(filePath);
final FSDataInputStream inputStream = fileSystem.open(filePath);
if (format.isSplittable()) {
inputStream.seek(splitOffset);
} else {
inputStream.seek(0);
checkArgument(splitLength == fileStatus.getLen());
}
return format.restoreReader(config, inputStream, restoredOffset, fileStatus.getLen(), splitOffset + splitLength);
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class FullSnapshotRestoreOperation method restoreKeyGroupsInStateHandle.
private SavepointRestoreResult restoreKeyGroupsInStateHandle(@Nonnull KeyGroupsStateHandle keyedStateHandle) throws IOException, StateMigrationException {
FSDataInputStream currentStateHandleInStream = keyedStateHandle.openInputStream();
KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(new DataInputViewStreamWrapper(currentStateHandleInStream));
KeyGroupsIterator groupsIterator = new KeyGroupsIterator(keyGroupRange, keyedStateHandle, currentStateHandleInStream, serializationProxy.isUsingKeyGroupCompression() ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE);
return new SavepointRestoreResult(serializationProxy.getStateMetaInfoSnapshots(), groupsIterator);
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class AzureFileSystemBehaviorITCase method testSimpleFileWriteAndRead.
@Test
public void testSimpleFileWriteAndRead() throws Exception {
// 30 secs
final long deadline = System.nanoTime() + 30_000_000_000L;
final String testLine = "Hello Upload!";
final Path path = new Path(getBasePath() + "/test.txt");
final FileSystem fs = path.getFileSystem();
try {
try (FSDataOutputStream out = fs.create(path, FileSystem.WriteMode.OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write(testLine);
}
// just in case, wait for the path to exist
checkPathEventualExistence(fs, path, true, deadline);
try (FSDataInputStream in = fs.open(path);
InputStreamReader ir = new InputStreamReader(in, StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(ir)) {
String line = reader.readLine();
assertEquals(testLine, line);
}
} finally {
fs.delete(path, false);
}
// now file must be gone
checkPathEventualExistence(fs, path, false, deadline);
}
Aggregations