use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class ParquetVectorizedInputFormat method clipParquetSchema.
/**
* Clips `parquetSchema` according to `fieldNames`.
*/
private MessageType clipParquetSchema(GroupType parquetSchema) {
Type[] types = new Type[projectedFields.length];
if (isCaseSensitive) {
for (int i = 0; i < projectedFields.length; ++i) {
String fieldName = projectedFields[i];
if (!parquetSchema.containsField(fieldName)) {
LOG.warn("{} does not exist in {}, will fill the field with null.", fieldName, parquetSchema);
types[i] = ParquetSchemaConverter.convertToParquetType(fieldName, projectedTypes[i]);
unknownFieldsIndices.add(i);
} else {
types[i] = parquetSchema.getType(fieldName);
}
}
} else {
Map<String, Type> caseInsensitiveFieldMap = new HashMap<>();
for (Type type : parquetSchema.getFields()) {
caseInsensitiveFieldMap.compute(type.getName().toLowerCase(Locale.ROOT), (key, previousType) -> {
if (previousType != null) {
throw new FlinkRuntimeException("Parquet with case insensitive mode should have no duplicate key: " + key);
}
return type;
});
}
for (int i = 0; i < projectedFields.length; ++i) {
Type type = caseInsensitiveFieldMap.get(projectedFields[i].toLowerCase(Locale.ROOT));
if (type == null) {
LOG.warn("{} does not exist in {}, will fill the field with null.", projectedFields[i], parquetSchema);
type = ParquetSchemaConverter.convertToParquetType(projectedFields[i].toLowerCase(Locale.ROOT), projectedTypes[i]);
unknownFieldsIndices.add(i);
}
// TODO clip for array,map,row types.
types[i] = type;
}
}
return Types.buildMessage().addFields(types).named("flink-parquet");
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class ParquetColumnarRowSplitReader method clipParquetSchema.
/**
* Clips `parquetSchema` according to `fieldNames`.
*/
private static MessageType clipParquetSchema(GroupType parquetSchema, String[] fieldNames, boolean caseSensitive) {
Type[] types = new Type[fieldNames.length];
if (caseSensitive) {
for (int i = 0; i < fieldNames.length; ++i) {
String fieldName = fieldNames[i];
if (parquetSchema.getFieldIndex(fieldName) < 0) {
throw new IllegalArgumentException(fieldName + " does not exist");
}
types[i] = parquetSchema.getType(fieldName);
}
} else {
Map<String, Type> caseInsensitiveFieldMap = new HashMap<>();
for (Type type : parquetSchema.getFields()) {
caseInsensitiveFieldMap.compute(type.getName().toLowerCase(Locale.ROOT), (key, previousType) -> {
if (previousType != null) {
throw new FlinkRuntimeException("Parquet with case insensitive mode should have no duplicate key: " + key);
}
return type;
});
}
for (int i = 0; i < fieldNames.length; ++i) {
Type type = caseInsensitiveFieldMap.get(fieldNames[i].toLowerCase(Locale.ROOT));
if (type == null) {
throw new IllegalArgumentException(fieldNames[i] + " does not exist");
}
// TODO clip for array,map,row types.
types[i] = type;
}
}
return Types.buildMessage().addFields(types).named("flink-parquet");
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class KubernetesStateHandleStoreTest method testReplaceFailedAndDiscardState.
@Test
public void testReplaceFailedAndDiscardState() throws Exception {
final FlinkRuntimeException updateException = new FlinkRuntimeException("Failed to update");
new Context() {
{
runTest(() -> {
leaderCallbackGrantLeadership();
final KubernetesStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store = new KubernetesStateHandleStore<>(flinkKubeClient, LEADER_CONFIGMAP_NAME, longStateStorage, filter, LOCK_IDENTITY);
store.addAndLock(key, state);
final FlinkKubeClient anotherFlinkKubeClient = createFlinkKubeClientBuilder().setCheckAndUpdateConfigMapFunction((configMapName, function) -> {
throw updateException;
}).build();
final KubernetesStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> anotherStore = new KubernetesStateHandleStore<>(anotherFlinkKubeClient, LEADER_CONFIGMAP_NAME, longStateStorage, filter, LOCK_IDENTITY);
final TestingLongStateHandleHelper.LongStateHandle newState = new TestingLongStateHandleHelper.LongStateHandle(23456L);
final StringResourceVersion resourceVersion = anotherStore.exists(key);
assertThat(resourceVersion.isExisting(), is(true));
try {
anotherStore.replace(key, resourceVersion, newState);
fail("We should get an exception when kube client failed to update.");
} catch (Exception ex) {
assertThat(ex, FlinkMatchers.containsCause(updateException));
}
assertThat(anotherStore.getAllAndLock().size(), is(1));
// The state do not change
assertThat(anotherStore.getAndLock(key).retrieveState(), is(state));
assertThat(TestingLongStateHandleHelper.getGlobalStorageSize(), is(2));
assertThat(TestingLongStateHandleHelper.getDiscardCallCountForStateHandleByIndex(0), is(0));
assertThat(TestingLongStateHandleHelper.getDiscardCallCountForStateHandleByIndex(1), is(1));
});
}
};
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class NFA method createDecisionGraph.
private OutgoingEdges<T> createDecisionGraph(ConditionContext context, ComputationState computationState, T event) {
State<T> state = getState(computationState);
final OutgoingEdges<T> outgoingEdges = new OutgoingEdges<>(state);
final Stack<State<T>> states = new Stack<>();
states.push(state);
// First create all outgoing edges, so to be able to reason about the Dewey version
while (!states.isEmpty()) {
State<T> currentState = states.pop();
Collection<StateTransition<T>> stateTransitions = currentState.getStateTransitions();
// check all state transitions for each state
for (StateTransition<T> stateTransition : stateTransitions) {
try {
if (checkFilterCondition(context, stateTransition.getCondition(), event)) {
// filter condition is true
switch(stateTransition.getAction()) {
case PROCEED:
// simply advance the computation state, but apply the current event
// to it
// PROCEED is equivalent to an epsilon transition
states.push(stateTransition.getTargetState());
break;
case IGNORE:
case TAKE:
outgoingEdges.add(stateTransition);
break;
}
}
} catch (Exception e) {
throw new FlinkRuntimeException("Failure happened in filter function.", e);
}
}
}
return outgoingEdges;
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class KubernetesUtilsTest method testParsePortRange.
@Test
public void testParsePortRange() {
final Configuration cfg = new Configuration();
cfg.set(BlobServerOptions.PORT, "50100-50200");
try {
KubernetesUtils.parsePort(cfg, BlobServerOptions.PORT);
fail("Should fail with an exception.");
} catch (FlinkRuntimeException e) {
assertThat(e.getMessage(), containsString(BlobServerOptions.PORT.key() + " should be specified to a fixed port. Do not support a range of ports."));
}
}
Aggregations