use of java.util.List in project camel by apache.
the class LinkedInExceptionResponseFilter method filter.
@Override
public void filter(ClientRequestContext requestContext, ClientResponseContext responseContext) throws IOException {
if (responseContext.getStatus() != Response.Status.OK.getStatusCode() && responseContext.hasEntity()) {
try {
final Unmarshaller unmarshaller = jaxbContext.createUnmarshaller();
final Error error = (Error) unmarshaller.unmarshal(responseContext.getEntityStream());
final Response.ResponseBuilder builder = Response.status(responseContext.getStatusInfo());
builder.entity(error);
// copy response headers
for (Map.Entry<String, List<String>> header : responseContext.getHeaders().entrySet()) {
builder.header(header.getKey(), header.getValue());
}
throw new LinkedInException(error, builder.build());
} catch (JAXBException e) {
// log and ignore
LOG.warn("Unable to parse LinkedIn error: " + e.getMessage(), e);
}
}
}
use of java.util.List in project camel by apache.
the class KubernetesServicesProducerTest method createAndDeleteService.
@Test
public void createAndDeleteService() throws Exception {
if (ObjectHelper.isEmpty(authToken)) {
return;
}
Exchange ex = template.request("direct:createService", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "default");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_SERVICE_NAME, "test");
Map<String, String> labels = new HashMap<String, String>();
labels.put("this", "rocks");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_SERVICE_LABELS, labels);
ServiceSpec serviceSpec = new ServiceSpec();
List<ServicePort> lsp = new ArrayList<ServicePort>();
ServicePort sp = new ServicePort();
sp.setPort(8080);
sp.setTargetPort(new IntOrString(8080));
sp.setProtocol("TCP");
lsp.add(sp);
serviceSpec.setPorts(lsp);
Map<String, String> selectorMap = new HashMap<String, String>();
selectorMap.put("containter", "test");
serviceSpec.setSelector(selectorMap);
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_SERVICE_SPEC, serviceSpec);
}
});
Service serv = ex.getOut().getBody(Service.class);
assertEquals(serv.getMetadata().getName(), "test");
ex = template.request("direct:deleteService", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "default");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_SERVICE_NAME, "test");
}
});
boolean servDeleted = ex.getOut().getBody(Boolean.class);
assertTrue(servDeleted);
}
use of java.util.List in project flink by apache.
the class FlinkAggregateExpandDistinctAggregatesRule method onMatch.
//~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
final Aggregate aggregate = call.rel(0);
if (!aggregate.containsDistinctCall()) {
return;
}
// Find all of the agg expressions. We use a LinkedHashSet to ensure
// determinism.
int nonDistinctCount = 0;
int distinctCount = 0;
int filterCount = 0;
int unsupportedAggCount = 0;
final Set<Pair<List<Integer>, Integer>> argLists = new LinkedHashSet<>();
for (AggregateCall aggCall : aggregate.getAggCallList()) {
if (aggCall.filterArg >= 0) {
++filterCount;
}
if (!aggCall.isDistinct()) {
++nonDistinctCount;
if (!(aggCall.getAggregation() instanceof SqlCountAggFunction || aggCall.getAggregation() instanceof SqlSumAggFunction || aggCall.getAggregation() instanceof SqlMinMaxAggFunction)) {
++unsupportedAggCount;
}
continue;
}
++distinctCount;
argLists.add(Pair.of(aggCall.getArgList(), aggCall.filterArg));
}
Preconditions.checkState(argLists.size() > 0, "containsDistinctCall lied");
// arguments then we can use a more efficient form.
if (nonDistinctCount == 0 && argLists.size() == 1) {
final Pair<List<Integer>, Integer> pair = Iterables.getOnlyElement(argLists);
final RelBuilder relBuilder = call.builder();
convertMonopole(relBuilder, aggregate, pair.left, pair.right);
call.transformTo(relBuilder.build());
return;
}
if (useGroupingSets) {
rewriteUsingGroupingSets(call, aggregate, argLists);
return;
}
// we can generate multi-phase aggregates
if (// one distinct aggregate
distinctCount == 1 && // no filter
filterCount == 0 && // sum/min/max/count in non-distinct aggregate
unsupportedAggCount == 0 && nonDistinctCount > 0) {
// one or more non-distinct aggregates
final RelBuilder relBuilder = call.builder();
convertSingletonDistinct(relBuilder, aggregate, argLists);
call.transformTo(relBuilder.build());
return;
}
// Create a list of the expressions which will yield the final result.
// Initially, the expressions point to the input field.
final List<RelDataTypeField> aggFields = aggregate.getRowType().getFieldList();
final List<RexInputRef> refs = new ArrayList<>();
final List<String> fieldNames = aggregate.getRowType().getFieldNames();
final ImmutableBitSet groupSet = aggregate.getGroupSet();
final int groupAndIndicatorCount = aggregate.getGroupCount() + aggregate.getIndicatorCount();
for (int i : Util.range(groupAndIndicatorCount)) {
refs.add(RexInputRef.of(i, aggFields));
}
// Aggregate the original relation, including any non-distinct aggregates.
final List<AggregateCall> newAggCallList = new ArrayList<>();
int i = -1;
for (AggregateCall aggCall : aggregate.getAggCallList()) {
++i;
if (aggCall.isDistinct()) {
refs.add(null);
continue;
}
refs.add(new RexInputRef(groupAndIndicatorCount + newAggCallList.size(), aggFields.get(groupAndIndicatorCount + i).getType()));
newAggCallList.add(aggCall);
}
// In the case where there are no non-distinct aggregates (regardless of
// whether there are group bys), there's no need to generate the
// extra aggregate and join.
final RelBuilder relBuilder = call.builder();
relBuilder.push(aggregate.getInput());
int n = 0;
if (!newAggCallList.isEmpty()) {
final RelBuilder.GroupKey groupKey = relBuilder.groupKey(groupSet, aggregate.indicator, aggregate.getGroupSets());
relBuilder.aggregate(groupKey, newAggCallList);
++n;
}
// set of operands.
for (Pair<List<Integer>, Integer> argList : argLists) {
doRewrite(relBuilder, aggregate, n++, argList.left, argList.right, refs);
}
relBuilder.project(refs, fieldNames);
call.transformTo(relBuilder.build());
}
use of java.util.List in project flink by apache.
the class StackTraceSampleCoordinatorTest method testTriggerStackTraceSample.
/** Tests simple trigger and collect of stack trace samples. */
@Test
public void testTriggerStackTraceSample() throws Exception {
ExecutionVertex[] vertices = new ExecutionVertex[] { mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true), mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true), mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true), mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true) };
int numSamples = 1;
Time delayBetweenSamples = Time.milliseconds(100L);
int maxStackTraceDepth = 0;
Future<StackTraceSample> sampleFuture = coord.triggerStackTraceSample(vertices, numSamples, delayBetweenSamples, maxStackTraceDepth);
// Verify messages have been sent
for (ExecutionVertex vertex : vertices) {
ExecutionAttemptID expectedExecutionId = vertex.getCurrentExecutionAttempt().getAttemptId();
TriggerStackTraceSample expectedMsg = new TriggerStackTraceSample(0, expectedExecutionId, numSamples, delayBetweenSamples, maxStackTraceDepth);
verify(vertex.getCurrentExecutionAttempt()).requestStackTraceSample(eq(0), eq(numSamples), eq(delayBetweenSamples), eq(maxStackTraceDepth), any(Time.class));
}
assertFalse(sampleFuture.isDone());
StackTraceElement[] stackTraceSample = Thread.currentThread().getStackTrace();
List<StackTraceElement[]> traces = new ArrayList<>();
traces.add(stackTraceSample);
traces.add(stackTraceSample);
traces.add(stackTraceSample);
// Collect stack traces
for (int i = 0; i < vertices.length; i++) {
ExecutionAttemptID executionId = vertices[i].getCurrentExecutionAttempt().getAttemptId();
coord.collectStackTraces(0, executionId, traces);
if (i == vertices.length - 1) {
assertTrue(sampleFuture.isDone());
} else {
assertFalse(sampleFuture.isDone());
}
}
// Verify completed stack trace sample
StackTraceSample sample = sampleFuture.get();
assertEquals(0, sample.getSampleId());
assertTrue(sample.getEndTime() >= sample.getStartTime());
Map<ExecutionAttemptID, List<StackTraceElement[]>> tracesByTask = sample.getStackTraces();
for (ExecutionVertex vertex : vertices) {
ExecutionAttemptID executionId = vertex.getCurrentExecutionAttempt().getAttemptId();
List<StackTraceElement[]> sampleTraces = tracesByTask.get(executionId);
assertNotNull("Task not found", sampleTraces);
assertTrue(traces.equals(sampleTraces));
}
// Verify no more pending sample
assertEquals(0, coord.getNumberOfPendingSamples());
// Verify no error on late collect
coord.collectStackTraces(0, vertices[0].getCurrentExecutionAttempt().getAttemptId(), traces);
}
use of java.util.List in project flink by apache.
the class SerializedCheckpointData method toDeque.
// ------------------------------------------------------------------------
// De-Serialize from Checkpoint
// ------------------------------------------------------------------------
/**
* De-serializes an array of SerializedCheckpointData back into an ArrayDeque of element checkpoints.
*
* @param data The data to be deserialized.
* @param serializer The serializer used to deserialize the data.
* @param <T> The type of the elements.
* @return An ArrayDeque of element checkpoints.
*
* @throws IOException Thrown, if the serialization fails.
*/
public static <T> ArrayDeque<Tuple2<Long, List<T>>> toDeque(SerializedCheckpointData[] data, TypeSerializer<T> serializer) throws IOException {
ArrayDeque<Tuple2<Long, List<T>>> deque = new ArrayDeque<>(data.length);
DataInputDeserializer deser = null;
for (SerializedCheckpointData checkpoint : data) {
byte[] serializedData = checkpoint.getSerializedData();
if (deser == null) {
deser = new DataInputDeserializer(serializedData, 0, serializedData.length);
} else {
deser.setBuffer(serializedData, 0, serializedData.length);
}
final List<T> ids = new ArrayList<>(checkpoint.getNumIds());
final int numIds = checkpoint.getNumIds();
for (int i = 0; i < numIds; i++) {
ids.add(serializer.deserialize(deser));
}
deque.addLast(new Tuple2<Long, List<T>>(checkpoint.checkpointId, ids));
}
return deque;
}
Aggregations