use of org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testEmptyFrames.
@Test
public void testEmptyFrames() throws Exception {
try {
List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
message.getBuffer().flip();
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
IFrameWriter partitioner = connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
List<TestFrameWriter> recipients = new ArrayList<>();
try {
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
for (IFrameWriter writer : partitionWriterFactory.getWriters().values()) {
recipients.add((TestFrameWriter) writer);
}
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 1);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 2);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 3);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
} catch (Throwable t) {
partitioner.fail();
throw t;
} finally {
partitioner.close();
}
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 4);
Assert.assertEquals(writer.closeCount(), 1);
}
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageFitsWithTuples.
@Test
public void testMessageFitsWithTuples() throws Exception {
try {
// Routing will be round robin
List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
message.getBuffer().flip();
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
FieldType[] types = { FieldType.Integer64, FieldType.Double, FieldType.Boolean, FieldType.String };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
PartitionWithMessageDataWriter partitioner = (PartitionWithMessageDataWriter) connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (int i = 0; i < partitionWriterFactory.getWriters().values().size(); i++) {
recipients.add(partitionWriterFactory.getWriters().get(i));
}
TestTupleGenerator ttg = new TestTupleGenerator(types, STRING_FIELD_SIZES, true);
VSizeFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender(frame);
for (int count = 0; count < NUMBER_OF_CONSUMERS; count++) {
ITupleReference tuple = ttg.next();
appender.append(tuple);
}
partitioner.nextFrame(frame.getBuffer());
partitioner.flush();
Assert.assertEquals(partitionWriterFactory.getWriters().get(0).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(1).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(2).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(3).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(4).nextFrameCount(), 1);
for (TestFrameWriter writer : recipients) {
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 2);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor in project asterixdb by apache.
the class FeedOperations method combineIntakeCollectJobs.
private static JobSpecification combineIntakeCollectJobs(MetadataProvider metadataProvider, Feed feed, JobSpecification intakeJob, List<JobSpecification> jobsList, List<FeedConnection> feedConnections, String[] intakeLocations) throws AlgebricksException, HyracksDataException {
JobSpecification jobSpec = new JobSpecification(intakeJob.getFrameSize());
// copy ingestor
FeedIntakeOperatorDescriptor firstOp = (FeedIntakeOperatorDescriptor) intakeJob.getOperatorMap().get(new OperatorDescriptorId(0));
FeedIntakeOperatorDescriptor ingestionOp;
if (firstOp.getAdaptorFactory() == null) {
ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorLibraryName(), firstOp.getAdaptorFactoryClassName(), firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
} else {
ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorFactory(), firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
}
// create replicator
ReplicateOperatorDescriptor replicateOp = new ReplicateOperatorDescriptor(jobSpec, ingestionOp.getOutputRecordDescriptors()[0], jobsList.size());
jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), ingestionOp, 0, replicateOp, 0);
PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, ingestionOp, intakeLocations);
PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, replicateOp, intakeLocations);
// Loop over the jobs to copy operators and connections
Map<OperatorDescriptorId, OperatorDescriptorId> operatorIdMapping = new HashMap<>();
Map<ConnectorDescriptorId, ConnectorDescriptorId> connectorIdMapping = new HashMap<>();
Map<OperatorDescriptorId, List<LocationConstraint>> operatorLocations = new HashMap<>();
Map<OperatorDescriptorId, Integer> operatorCounts = new HashMap<>();
List<JobId> jobIds = new ArrayList<>();
FeedMetaOperatorDescriptor metaOp;
for (int iter1 = 0; iter1 < jobsList.size(); iter1++) {
FeedConnection curFeedConnection = feedConnections.get(iter1);
JobSpecification subJob = jobsList.get(iter1);
operatorIdMapping.clear();
Map<OperatorDescriptorId, IOperatorDescriptor> operatorsMap = subJob.getOperatorMap();
String datasetName = feedConnections.get(iter1).getDatasetName();
FeedConnectionId feedConnectionId = new FeedConnectionId(ingestionOp.getEntityId(), datasetName);
FeedPolicyEntity feedPolicyEntity = FeedMetadataUtil.validateIfPolicyExists(curFeedConnection.getDataverseName(), curFeedConnection.getPolicyName(), metadataProvider.getMetadataTxnContext());
for (Map.Entry<OperatorDescriptorId, IOperatorDescriptor> entry : operatorsMap.entrySet()) {
IOperatorDescriptor opDesc = entry.getValue();
OperatorDescriptorId oldId = opDesc.getOperatorId();
OperatorDescriptorId opId = null;
if (opDesc instanceof LSMTreeInsertDeleteOperatorDescriptor && ((LSMTreeInsertDeleteOperatorDescriptor) opDesc).isPrimary()) {
metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc, feedPolicyEntity.getProperties(), FeedRuntimeType.STORE);
opId = metaOp.getOperatorId();
opDesc.setOperatorId(opId);
} else {
if (opDesc instanceof AlgebricksMetaOperatorDescriptor) {
AlgebricksMetaOperatorDescriptor algOp = (AlgebricksMetaOperatorDescriptor) opDesc;
IPushRuntimeFactory[] runtimeFactories = algOp.getPipeline().getRuntimeFactories();
// Tweak AssignOp to work with messages
if (runtimeFactories[0] instanceof AssignRuntimeFactory && runtimeFactories.length > 1) {
IConnectorDescriptor connectorDesc = subJob.getOperatorInputMap().get(opDesc.getOperatorId()).get(0);
// anything on the network interface needs to be message compatible
if (connectorDesc instanceof MToNPartitioningConnectorDescriptor) {
metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc, feedPolicyEntity.getProperties(), FeedRuntimeType.COMPUTE);
opId = metaOp.getOperatorId();
opDesc.setOperatorId(opId);
}
}
}
if (opId == null) {
opId = jobSpec.createOperatorDescriptorId(opDesc);
}
}
operatorIdMapping.put(oldId, opId);
}
// copy connectors
connectorIdMapping.clear();
for (Entry<ConnectorDescriptorId, IConnectorDescriptor> entry : subJob.getConnectorMap().entrySet()) {
IConnectorDescriptor connDesc = entry.getValue();
ConnectorDescriptorId newConnId;
if (connDesc instanceof MToNPartitioningConnectorDescriptor) {
MToNPartitioningConnectorDescriptor m2nConn = (MToNPartitioningConnectorDescriptor) connDesc;
connDesc = new MToNPartitioningWithMessageConnectorDescriptor(jobSpec, m2nConn.getTuplePartitionComputerFactory());
newConnId = connDesc.getConnectorId();
} else {
newConnId = jobSpec.createConnectorDescriptor(connDesc);
}
connectorIdMapping.put(entry.getKey(), newConnId);
}
// make connections between operators
for (Entry<ConnectorDescriptorId, Pair<Pair<IOperatorDescriptor, Integer>, Pair<IOperatorDescriptor, Integer>>> entry : subJob.getConnectorOperatorMap().entrySet()) {
ConnectorDescriptorId newId = connectorIdMapping.get(entry.getKey());
IConnectorDescriptor connDesc = jobSpec.getConnectorMap().get(newId);
Pair<IOperatorDescriptor, Integer> leftOp = entry.getValue().getLeft();
Pair<IOperatorDescriptor, Integer> rightOp = entry.getValue().getRight();
IOperatorDescriptor leftOpDesc = jobSpec.getOperatorMap().get(leftOp.getLeft().getOperatorId());
IOperatorDescriptor rightOpDesc = jobSpec.getOperatorMap().get(rightOp.getLeft().getOperatorId());
if (leftOp.getLeft() instanceof FeedCollectOperatorDescriptor) {
jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), replicateOp, iter1, leftOpDesc, leftOp.getRight());
}
jobSpec.connect(connDesc, leftOpDesc, leftOp.getRight(), rightOpDesc, rightOp.getRight());
}
// prepare for setting partition constraints
operatorLocations.clear();
operatorCounts.clear();
for (Constraint constraint : subJob.getUserConstraints()) {
LValueConstraintExpression lexpr = constraint.getLValue();
ConstraintExpression cexpr = constraint.getRValue();
OperatorDescriptorId opId;
switch(lexpr.getTag()) {
case PARTITION_COUNT:
opId = ((PartitionCountExpression) lexpr).getOperatorDescriptorId();
operatorCounts.put(operatorIdMapping.get(opId), (int) ((ConstantExpression) cexpr).getValue());
break;
case PARTITION_LOCATION:
opId = ((PartitionLocationExpression) lexpr).getOperatorDescriptorId();
IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(operatorIdMapping.get(opId));
List<LocationConstraint> locations = operatorLocations.get(opDesc.getOperatorId());
if (locations == null) {
locations = new ArrayList<>();
operatorLocations.put(opDesc.getOperatorId(), locations);
}
String location = (String) ((ConstantExpression) cexpr).getValue();
LocationConstraint lc = new LocationConstraint(location, ((PartitionLocationExpression) lexpr).getPartition());
locations.add(lc);
break;
default:
break;
}
}
// set absolute location constraints
for (Entry<OperatorDescriptorId, List<LocationConstraint>> entry : operatorLocations.entrySet()) {
IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
// why do we need to sort?
Collections.sort(entry.getValue(), (LocationConstraint o1, LocationConstraint o2) -> {
return o1.partition - o2.partition;
});
String[] locations = new String[entry.getValue().size()];
for (int j = 0; j < locations.length; ++j) {
locations[j] = entry.getValue().get(j).location;
}
PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, opDesc, locations);
}
// set count constraints
for (Entry<OperatorDescriptorId, Integer> entry : operatorCounts.entrySet()) {
IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
if (!operatorLocations.keySet().contains(entry.getKey())) {
PartitionConstraintHelper.addPartitionCountConstraint(jobSpec, opDesc, entry.getValue());
}
}
// roots
for (OperatorDescriptorId root : subJob.getRoots()) {
jobSpec.addRoot(jobSpec.getOperatorMap().get(operatorIdMapping.get(root)));
}
jobIds.add(((JobEventListenerFactory) subJob.getJobletEventListenerFactory()).getJobId());
}
// jobEventListenerFactory
jobSpec.setJobletEventListenerFactory(new MultiTransactionJobletEventListenerFactory(jobIds, true));
// useConnectorSchedulingPolicy
jobSpec.setUseConnectorPolicyForScheduling(jobsList.get(0).isUseConnectorPolicyForScheduling());
// connectorAssignmentPolicy
jobSpec.setConnectorPolicyAssignmentPolicy(jobsList.get(0).getConnectorPolicyAssignmentPolicy());
return jobSpec;
}
use of org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanEmptyFrame.
@Test
public void testMessageLargerThanEmptyFrame() throws Exception {
try {
List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE + 1);
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
IFrameWriter partitioner = connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (IFrameWriter writer : partitionWriterFactory.getWriters().values()) {
recipients.add((TestFrameWriter) writer);
}
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 1);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 2);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 3);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 4);
Assert.assertEquals(writer.closeCount(), 1);
}
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanSome.
@Test
public void testMessageLargerThanSome() throws Exception {
try {
// Routing will be to 1, 3, and 4 only. 0 and 2 will receive no tuples
List<Integer> routing = Arrays.asList(1, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE);
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
FieldType[] types = { FieldType.Integer64, FieldType.Double, FieldType.Boolean, FieldType.String };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
PartitionWithMessageDataWriter partitioner = (PartitionWithMessageDataWriter) connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (int i = 0; i < partitionWriterFactory.getWriters().values().size(); i++) {
recipients.add(partitionWriterFactory.getWriters().get(i));
}
TestTupleGenerator ttg = new TestTupleGenerator(types, STRING_FIELD_SIZES, true);
VSizeFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender(frame);
ITupleReference tuple = ttg.next();
while (appender.append(tuple)) {
tuple = ttg.next();
}
partitioner.nextFrame(frame.getBuffer());
partitioner.flush();
Assert.assertEquals(1, partitionWriterFactory.getWriters().get(0).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(1).nextFrameCount());
Assert.assertEquals(1, partitionWriterFactory.getWriters().get(2).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(3).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(4).nextFrameCount());
for (TestFrameWriter writer : recipients) {
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
Aggregations