use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project xwiki-platform by xwiki.
the class SolrDocumentIteratorTest method iterate.
@Test
public void iterate() throws Exception {
SolrDocumentList firstResults = new SolrDocumentList();
firstResults.add(createSolrDocument("chess", Arrays.asList("A", "B"), "C", "", "1.3"));
firstResults.add(createSolrDocument("chess", Arrays.asList("M"), "N", "en", "2.4"));
QueryResponse firstResponse = mock(QueryResponse.class);
when(firstResponse.getNextCursorMark()).thenReturn("foo");
when(firstResponse.getResults()).thenReturn(firstResults);
SolrDocumentList secondResults = new SolrDocumentList();
secondResults.add(createSolrDocument("tennis", Arrays.asList("X", "Y", "Z"), "V", "fr", "1.1"));
QueryResponse secondResponse = mock(QueryResponse.class);
when(secondResponse.getNextCursorMark()).thenReturn("bar");
when(secondResponse.getResults()).thenReturn(secondResults);
when(solr.query(any(SolrQuery.class))).thenReturn(firstResponse, secondResponse, secondResponse);
DocumentIterator<String> iterator = mocker.getComponentUnderTest();
WikiReference rootReference = new WikiReference("wiki");
iterator.setRootReference(rootReference);
List<Pair<DocumentReference, String>> actualResult = new ArrayList<Pair<DocumentReference, String>>();
while (iterator.hasNext()) {
actualResult.add(iterator.next());
}
SolrReferenceResolver resolver = mocker.getInstance(SolrReferenceResolver.class);
verify(resolver).getQuery(rootReference);
List<Pair<DocumentReference, String>> expectedResult = new ArrayList<Pair<DocumentReference, String>>();
DocumentReference documentReference = new DocumentReference("chess", Arrays.asList("A", "B"), "C");
expectedResult.add(new ImmutablePair<DocumentReference, String>(documentReference, "1.3"));
documentReference = new DocumentReference("chess", Arrays.asList("M"), "N", Locale.ENGLISH);
expectedResult.add(new ImmutablePair<DocumentReference, String>(documentReference, "2.4"));
documentReference = new DocumentReference("tennis", Arrays.asList("X", "Y", "Z"), "V", Locale.FRENCH);
expectedResult.add(new ImmutablePair<DocumentReference, String>(documentReference, "1.1"));
assertEquals(expectedResult, actualResult);
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project xwiki-platform by xwiki.
the class DatabaseDocumentIteratorTest method iterateOneWiki.
@Test
public void iterateOneWiki() throws Exception {
DocumentReference rootReference = createDocumentReference("gang", Arrays.asList("A", "B"), "C", null);
Query emptyQuery = mock(Query.class);
when(emptyQuery.execute()).thenReturn(Collections.emptyList());
Query query = mock(Query.class);
when(query.setLimit(anyInt())).thenReturn(query);
when(query.setWiki(rootReference.getWikiReference().getName())).thenReturn(query);
when(query.setOffset(0)).thenReturn(query);
when(query.setOffset(100)).thenReturn(emptyQuery);
when(query.execute()).thenReturn(Collections.<Object>singletonList(new Object[] { "A.B", "C", "de", "3.1" }));
Map<String, Object> namedParameters = new HashMap<String, Object>();
namedParameters.put("space", "A.B");
namedParameters.put("name", "C");
when(query.getNamedParameters()).thenReturn(namedParameters);
Query countQuery = mock(Query.class);
when(countQuery.addFilter(mocker.<QueryFilter>getInstance(QueryFilter.class, "count"))).thenReturn(countQuery);
QueryManager queryManager = mocker.getInstance(QueryManager.class);
String whereClause = " where doc.space = :space and doc.name = :name";
when(queryManager.createQuery("select doc.space, doc.name, doc.language, doc.version from XWikiDocument doc" + whereClause + " order by doc.space, doc.name, doc.language", Query.HQL)).thenReturn(query);
when(queryManager.createQuery(whereClause, Query.HQL)).thenReturn(countQuery);
DocumentIterator<String> iterator = mocker.getComponentUnderTest();
iterator.setRootReference(rootReference);
List<Pair<DocumentReference, String>> actualResults = new ArrayList<Pair<DocumentReference, String>>();
while (iterator.hasNext()) {
actualResults.add(iterator.next());
}
List<Pair<DocumentReference, String>> expectedResults = new ArrayList<Pair<DocumentReference, String>>();
expectedResults.add(new ImmutablePair<DocumentReference, String>(new DocumentReference(rootReference, Locale.GERMAN), "3.1"));
assertEquals(expectedResults, actualResults);
verify(query).bindValue("space", "A.B");
verify(query).bindValue("name", "C");
verify(countQuery).bindValue("space", "A.B");
verify(countQuery).bindValue("name", "C");
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project twister2 by DSC-SPIDAL.
the class MPIDataFlowOperation method writeToMemoryManager.
/**
* extracts the data from the message and writes to the memory manager using the key
*
* @param currentMessage message to be parsed
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private void writeToMemoryManager(MPIMessage currentMessage, int id) {
Object data = messageDeSerializer.get(id).getDataBuffers(currentMessage, edge);
// TODO: make sure to add the data length for non primitive types when adding to byte buffer
// Object data = messageDeSerializer.get(id).build(currentMessage,
// currentMessage.getHeader().getEdge());
int sourceID = currentMessage.getHeader().getSourceId();
int noOfMessages = 1;
boolean isList = false;
if (data instanceof List) {
noOfMessages = ((List) data).size();
isList = true;
}
ByteBuffer tempData;
ByteBuffer tempKey;
if (isList) {
List objectList = (List) data;
for (Object message : objectList) {
if (isKeyed) {
ImmutablePair<byte[], byte[]> tempPair = (ImmutablePair<byte[], byte[]>) message;
setupThreadLocalBuffers(tempPair.getKey().length, tempPair.getValue().length, currentMessage.getType());
tempData = threadLocalDataBuffer.get();
tempKey = threadLocalKeyBuffer.get();
tempKey.put(tempPair.getKey());
tempData.putInt(tempPair.getValue().length);
tempData.put(tempPair.getValue());
operationMemoryManager.put(tempKey, tempData);
} else {
byte[] dataBytes = (byte[]) message;
setupThreadLocalBuffers(4, dataBytes.length, currentMessage.getType());
tempData = threadLocalDataBuffer.get();
tempKey = threadLocalKeyBuffer.get();
tempKey.putInt(sourceID);
tempData.put(dataBytes);
operationMemoryManager.put(tempKey, tempData);
}
}
} else {
if (isKeyed) {
Pair<byte[], byte[]> tempPair = (Pair<byte[], byte[]>) data;
setupThreadLocalBuffers(tempPair.getKey().length, tempPair.getValue().length, currentMessage.getType());
tempData = threadLocalDataBuffer.get();
tempKey = threadLocalKeyBuffer.get();
tempKey.put(tempPair.getKey());
tempData.putInt(tempPair.getValue().length);
tempData.put(tempPair.getValue());
operationMemoryManager.put(tempKey, tempData);
} else {
byte[] dataBytes = (byte[]) data;
setupThreadLocalBuffers(4, dataBytes.length, currentMessage.getType());
tempData = threadLocalDataBuffer.get();
tempKey = threadLocalKeyBuffer.get();
tempKey.putInt(sourceID);
tempData.put(dataBytes);
operationMemoryManager.put(tempKey, tempData);
}
}
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project twister2 by DSC-SPIDAL.
the class MPIDataFlowPartition method init.
/**
* Initialize
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.thisSources = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
LOG.info(String.format("%d setup loadbalance routing %s", taskPlan.getThisExecutor(), thisSources));
this.thisTasks = taskPlan.getTasksOfThisExecutor();
this.router = new PartitionRouter(taskPlan, sources, destinations);
Map<Integer, Set<Integer>> internal = router.getInternalSendTasks(0);
Map<Integer, Set<Integer>> external = router.getExternalSendTasks(0);
this.instancePlan = taskPlan;
this.type = t;
LOG.log(Level.FINE, String.format("%d adding internal/external routing", taskPlan.getThisExecutor()));
for (int s : thisSources) {
Set<Integer> integerSetMap = internal.get(s);
if (integerSetMap != null) {
this.dests.internal.addAll(integerSetMap);
}
Set<Integer> integerSetMap1 = external.get(s);
if (integerSetMap1 != null) {
this.dests.external.addAll(integerSetMap1);
}
LOG.fine(String.format("%d adding internal/external routing %d", taskPlan.getThisExecutor(), s));
break;
}
LOG.log(Level.FINE, String.format("%d done adding internal/external routing", taskPlan.getThisExecutor()));
// TODO : Does this send the correct receiveExpectedTaskIds for partition communication
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.receivingExecutors();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
}
for (int src : srcs) {
for (int dest : destinations) {
sendRoutingParameters(src, dest);
}
}
delegete.setCompletionListener(completionListener);
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
delegete.setKeyType(keyType);
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project twister2 by DSC-SPIDAL.
the class MPIDirectDataFlowCommunication method init.
/**
* Initialize
* @param cfg
* @param t
* @param taskPlan
* @param edge
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.router = new DirectRouter(taskPlan, sources, destination);
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
pendingReceiveDeSerializations.put(s, new ArrayBlockingQueue<MPIMessage>(MPIContext.sendPendingMax(cfg)));
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
MessageDeSerializer messageDeSerializer = new MPIMessageDeSerializer(new KryoSerializer());
deSerializerMap.put(destination, messageDeSerializer);
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
Aggregations