use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class AbstractFsStateSnapshot method deserialize.
@Override
@SuppressWarnings("unchecked")
public StateTable<K, N, SV> deserialize(String stateName, HeapKeyedStateBackend<K> stateBackend) throws IOException {
final FileSystem fs = getFilePath().getFileSystem();
try (FSDataInputStream inStream = fs.open(getFilePath())) {
final DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(inStream);
AbstractMigrationRestoreStrategy<K, N, SV> restoreStrategy = new AbstractMigrationRestoreStrategy<K, N, SV>(keySerializer, namespaceSerializer, stateSerializer) {
@Override
protected DataInputView openDataInputView() throws IOException {
return inView;
}
};
return restoreStrategy.deserialize(stateName, stateBackend);
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class BlobClient method uploadJarFiles.
/**
* Uploads the JAR files to a {@link BlobServer} at the given address.
*
* @param serverAddress Server address of the {@link BlobServer}
* @param clientConfig Any additional configuration for the blob client
* @param jars List of JAR files to upload
* @throws IOException Thrown if the upload fails
*/
public static List<BlobKey> uploadJarFiles(InetSocketAddress serverAddress, Configuration clientConfig, List<Path> jars) throws IOException {
if (jars.isEmpty()) {
return Collections.emptyList();
} else {
List<BlobKey> blobKeys = new ArrayList<>();
try (BlobClient blobClient = new BlobClient(serverAddress, clientConfig)) {
for (final Path jar : jars) {
final FileSystem fs = jar.getFileSystem();
FSDataInputStream is = null;
try {
is = fs.open(jar);
final BlobKey key = blobClient.put(is);
blobKeys.add(key);
} finally {
if (is != null) {
is.close();
}
}
}
}
return blobKeys;
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class JobGraph method uploadRequiredJarFiles.
/**
* Uploads the previously added user jar file to the job manager through the job manager's BLOB server.
*
* @param serverAddress
* the network address of the BLOB server
* @param blobClientConfig
* the blob client configuration
* @throws IOException
* thrown if an I/O error occurs during the upload
*/
public void uploadRequiredJarFiles(InetSocketAddress serverAddress, Configuration blobClientConfig) throws IOException {
if (this.userJars.isEmpty()) {
return;
}
BlobClient bc = null;
try {
bc = new BlobClient(serverAddress, blobClientConfig);
for (final Path jar : this.userJars) {
final FileSystem fs = jar.getFileSystem();
FSDataInputStream is = null;
try {
is = fs.open(jar);
final BlobKey key = bc.put(is);
this.userJarBlobKeys.add(key);
} finally {
if (is != null) {
is.close();
}
}
}
} finally {
if (bc != null) {
bc.close();
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class AbstractStreamOperator method restoreStreamCheckpointed.
@Deprecated
private void restoreStreamCheckpointed(OperatorStateHandles stateHandles) throws Exception {
StreamStateHandle state = stateHandles.getLegacyOperatorState();
if (null != state) {
if (this instanceof CheckpointedRestoringOperator) {
LOG.debug("Restore state of task {} in chain ({}).", stateHandles.getOperatorChainIndex(), getContainingTask().getName());
FSDataInputStream is = state.openInputStream();
try {
getContainingTask().getCancelables().registerClosable(is);
((CheckpointedRestoringOperator) this).restoreState(is);
} finally {
getContainingTask().getCancelables().unregisterClosable(is);
is.close();
}
} else {
throw new Exception("Found legacy operator state for operator that does not implement StreamCheckpointedOperator.");
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class GenericWriteAheadSink method notifyOfCompletedCheckpoint.
@Override
public void notifyOfCompletedCheckpoint(long checkpointId) throws Exception {
super.notifyOfCompletedCheckpoint(checkpointId);
synchronized (pendingCheckpoints) {
Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator();
while (pendingCheckpointIt.hasNext()) {
PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next();
long pastCheckpointId = pendingCheckpoint.checkpointId;
int subtaskId = pendingCheckpoint.subtaskId;
long timestamp = pendingCheckpoint.timestamp;
StreamStateHandle streamHandle = pendingCheckpoint.stateHandle;
if (pastCheckpointId <= checkpointId) {
try {
if (!committer.isCheckpointCommitted(subtaskId, pastCheckpointId)) {
try (FSDataInputStream in = streamHandle.openInputStream()) {
boolean success = sendValues(new ReusingMutableToRegularIteratorWrapper<>(new InputViewIterator<>(new DataInputViewStreamWrapper(in), serializer), serializer), timestamp);
if (success) {
// in case the checkpoint was successfully committed,
// discard its state from the backend and mark it for removal
// in case it failed, we retry on the next checkpoint
committer.commitCheckpoint(subtaskId, pastCheckpointId);
streamHandle.discardState();
pendingCheckpointIt.remove();
}
}
} else {
streamHandle.discardState();
pendingCheckpointIt.remove();
}
} catch (Exception e) {
// we have to break here to prevent a new (later) checkpoint
// from being committed before this one
LOG.error("Could not commit checkpoint.", e);
break;
}
}
}
}
}
Aggregations