use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor in project asterixdb by apache.
the class LimitOperatorDescriptor method createPushRuntime.
@Override
public IOperatorNodePushable createPushRuntime(final IHyracksTaskContext ctx, final IRecordDescriptorProvider recordDescProvider, int partition, int nPartitions) throws HyracksDataException {
return new AbstractUnaryInputUnaryOutputOperatorNodePushable() {
private FrameTupleAccessor fta;
private int currentSize;
private boolean finished;
@Override
public void open() throws HyracksDataException {
fta = new FrameTupleAccessor(outRecDescs[0]);
currentSize = 0;
finished = false;
writer.open();
}
@Override
public void nextFrame(ByteBuffer buffer) throws HyracksDataException {
if (!finished) {
fta.reset(buffer);
int count = fta.getTupleCount();
if ((currentSize + count) > outputLimit) {
FrameTupleAppender partialAppender = new FrameTupleAppender(new VSizeFrame(ctx));
int copyCount = outputLimit - currentSize;
for (int i = 0; i < copyCount; i++) {
FrameUtils.appendToWriter(writer, partialAppender, fta, i);
currentSize++;
}
partialAppender.write(writer, false);
finished = true;
} else {
FrameUtils.flushFrame(buffer, writer);
currentSize += count;
}
}
}
@Override
public void fail() throws HyracksDataException {
writer.fail();
}
@Override
public void close() throws HyracksDataException {
writer.close();
}
@Override
public void flush() throws HyracksDataException {
writer.flush();
}
};
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor in project asterixdb by apache.
the class IndexInsertUpdateDeleteOperatorNodePushable method open.
@Override
public void open() throws HyracksDataException {
accessor = new FrameTupleAccessor(inputRecDesc);
writeBuffer = new VSizeFrame(ctx);
indexHelper.open();
index = indexHelper.getIndexInstance();
try {
writer.open();
LocalResource resource = indexHelper.getResource();
modCallback = modOpCallbackFactory.createModificationOperationCallback(resource, ctx, this);
indexAccessor = index.createAccessor(modCallback, NoOpOperationCallback.INSTANCE);
if (tupleFilterFactory != null) {
tupleFilter = tupleFilterFactory.createTupleFilter(ctx);
frameTuple = new FrameTupleReference();
}
} catch (Exception e) {
throw new HyracksDataException(e);
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanEmptyFrame.
@Test
public void testMessageLargerThanEmptyFrame() throws Exception {
try {
List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE + 1);
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
IFrameWriter partitioner = connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (IFrameWriter writer : partitionWriterFactory.getWriters().values()) {
recipients.add((TestFrameWriter) writer);
}
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 1);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 2);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 3);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 4);
Assert.assertEquals(writer.closeCount(), 1);
}
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanSome.
@Test
public void testMessageLargerThanSome() throws Exception {
try {
// Routing will be to 1, 3, and 4 only. 0 and 2 will receive no tuples
List<Integer> routing = Arrays.asList(1, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE);
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
FieldType[] types = { FieldType.Integer64, FieldType.Double, FieldType.Boolean, FieldType.String };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
PartitionWithMessageDataWriter partitioner = (PartitionWithMessageDataWriter) connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (int i = 0; i < partitionWriterFactory.getWriters().values().size(); i++) {
recipients.add(partitionWriterFactory.getWriters().get(i));
}
TestTupleGenerator ttg = new TestTupleGenerator(types, STRING_FIELD_SIZES, true);
VSizeFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender(frame);
ITupleReference tuple = ttg.next();
while (appender.append(tuple)) {
tuple = ttg.next();
}
partitioner.nextFrame(frame.getBuffer());
partitioner.flush();
Assert.assertEquals(1, partitionWriterFactory.getWriters().get(0).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(1).nextFrameCount());
Assert.assertEquals(1, partitionWriterFactory.getWriters().get(2).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(3).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(4).nextFrameCount());
for (TestFrameWriter writer : recipients) {
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor in project asterixdb by apache.
the class LSMInsertDeleteOperatorNodePushable method open.
@Override
public void open() throws HyracksDataException {
accessor = new FrameTupleAccessor(inputRecDesc);
writeBuffer = new VSizeFrame(ctx);
appender = new FrameTupleAppender(writeBuffer);
indexHelper.open();
lsmIndex = (AbstractLSMIndex) indexHelper.getIndexInstance();
try {
if (isPrimary && ctx.getSharedObject() != null) {
PrimaryIndexLogMarkerCallback callback = new PrimaryIndexLogMarkerCallback(lsmIndex);
TaskUtil.putInSharedMap(ILogMarkerCallback.KEY_MARKER_CALLBACK, callback, ctx);
}
writer.open();
modCallback = modOpCallbackFactory.createModificationOperationCallback(indexHelper.getResource(), ctx, this);
indexAccessor = lsmIndex.createAccessor(modCallback, NoOpOperationCallback.INSTANCE);
if (tupleFilterFactory != null) {
tupleFilter = tupleFilterFactory.createTupleFilter(ctx);
frameTuple = new FrameTupleReference();
}
INcApplicationContext runtimeCtx = (INcApplicationContext) ctx.getJobletContext().getServiceContext().getApplicationContext();
LSMIndexUtil.checkAndSetFirstLSN(lsmIndex, runtimeCtx.getTransactionSubsystem().getLogManager());
} catch (Throwable th) {
throw new HyracksDataException(th);
}
}
Aggregations