use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class StreamContextEnvironment method serializeConfig.
private static byte[] serializeConfig(Serializable config) {
try (final ByteArrayOutputStream bos = new ByteArrayOutputStream();
final ObjectOutputStream oos = new ObjectOutputStream(bos)) {
oos.writeObject(config);
oos.flush();
return bos.toByteArray();
} catch (IOException e) {
throw new FlinkRuntimeException("Cannot serialize configuration.", e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class FileSystemLookupFunction method checkCacheReload.
private void checkCacheReload() {
if (nextLoadTime > System.currentTimeMillis()) {
return;
}
if (nextLoadTime > 0) {
LOG.info("Lookup join cache has expired after {} minute(s), reloading", reloadInterval.toMinutes());
} else {
LOG.info("Populating lookup join cache");
}
int numRetry = 0;
while (true) {
cache.clear();
try {
long count = 0;
GenericRowData reuse = new GenericRowData(rowType.getFieldCount());
partitionReader.open(partitionFetcher.fetch(fetcherContext));
RowData row;
while ((row = partitionReader.read(reuse)) != null) {
count++;
RowData rowData = serializer.copy(row);
RowData key = extractLookupKey(rowData);
List<RowData> rows = cache.computeIfAbsent(key, k -> new ArrayList<>());
rows.add(rowData);
}
partitionReader.close();
nextLoadTime = System.currentTimeMillis() + reloadInterval.toMillis();
LOG.info("Loaded {} row(s) into lookup join cache", count);
return;
} catch (Exception e) {
if (numRetry >= MAX_RETRIES) {
throw new FlinkRuntimeException(String.format("Failed to load table into cache after %d retries", numRetry), e);
}
numRetry++;
long toSleep = numRetry * RETRY_INTERVAL.toMillis();
LOG.warn(String.format("Failed to load table into cache, will retry in %d seconds", toSleep / 1000), e);
try {
Thread.sleep(toSleep);
} catch (InterruptedException ex) {
LOG.warn("Interrupted while waiting to retry failed cache load, aborting");
throw new FlinkRuntimeException(ex);
}
}
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class AvroFactory method getSpecificDataForClass.
/**
* Creates a {@link SpecificData} object for a given class. Possibly uses the specific data from
* the generated class with logical conversions applied (avro >= 1.9.x).
*
* <p>Copied over from {@code SpecificData#getForClass(Class<T> c)} we do not use the method
* directly, because we want to be API backwards compatible with older Avro versions which did
* not have this method
*/
public static <T extends SpecificData> SpecificData getSpecificDataForClass(Class<T> type, ClassLoader cl) {
try {
Field specificDataField = type.getDeclaredField("MODEL$");
specificDataField.setAccessible(true);
return (SpecificData) specificDataField.get((Object) null);
} catch (IllegalAccessException e) {
throw new FlinkRuntimeException("Could not access the MODEL$ field of avro record", e);
} catch (NoSuchFieldException e) {
return new SpecificData(cl);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class KeyGroupPartitionedPriorityQueue method getSubsetForKeyGroup.
@Nonnull
@Override
public Set<T> getSubsetForKeyGroup(int keyGroupId) {
HashSet<T> result = new HashSet<>();
PQ partitionQueue = keyGroupedHeaps[globalKeyGroupToLocalIndex(keyGroupId)];
try (CloseableIterator<T> iterator = partitionQueue.iterator()) {
while (iterator.hasNext()) {
result.add(iterator.next());
}
} catch (Exception e) {
throw new FlinkRuntimeException("Exception while iterating key group.", e);
}
return result;
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class FileSourceSplitState method toFileSourceSplit.
/**
* Use the current row count as the starting row count to create a new FileSourceSplit.
*/
@SuppressWarnings("unchecked")
public SplitT toFileSourceSplit() {
final CheckpointedPosition position = (offset == CheckpointedPosition.NO_OFFSET && recordsToSkipAfterOffset == 0) ? null : new CheckpointedPosition(offset, recordsToSkipAfterOffset);
final FileSourceSplit updatedSplit = split.updateWithCheckpointedPosition(position);
// some sanity checks to avoid surprises and not accidentally lose split information
if (updatedSplit == null) {
throw new FlinkRuntimeException("Split returned 'null' in updateWithCheckpointedPosition(): " + split);
}
if (updatedSplit.getClass() != split.getClass()) {
throw new FlinkRuntimeException(String.format("Split returned different type in updateWithCheckpointedPosition(). " + "Split type is %s, returned type is %s", split.getClass().getName(), updatedSplit.getClass().getName()));
}
return (SplitT) updatedSplit;
}
Aggregations