Search in sources :

Example 31 with VSizeFrame

use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.

the class ResultPrinter method print.

public void print(ResultReader resultReader) throws HyracksDataException {
    printPrefix();
    final IFrameTupleAccessor fta = resultReader.getFrameTupleAccessor();
    final IFrame frame = new VSizeFrame(resultDisplayFrameMgr);
    while (resultReader.read(frame) > 0) {
        final ByteBuffer frameBuffer = frame.getBuffer();
        final byte[] frameBytes = frameBuffer.array();
        fta.reset(frameBuffer);
        final int last = fta.getTupleCount();
        for (int tIndex = 0; tIndex < last; tIndex++) {
            final int start = fta.getTupleStartOffset(tIndex);
            int length = fta.getTupleEndOffset(tIndex) - start;
            if (conf.fmt() == SessionConfig.OutputFormat.CSV && ((length > 0) && (frameBytes[start + length - 1] == '\n'))) {
                length--;
            }
            String result = new String(frameBytes, start, length, UTF_8);
            if (wrapArray && notFirst) {
                output.out().print(", ");
            }
            notFirst = true;
            displayRecord(result);
        }
        frameBuffer.clear();
    }
    printPostfix();
}
Also used : IFrame(org.apache.hyracks.api.comm.IFrame) IFrameTupleAccessor(org.apache.hyracks.api.comm.IFrameTupleAccessor) ByteBuffer(java.nio.ByteBuffer) VSizeFrame(org.apache.hyracks.api.comm.VSizeFrame)

Example 32 with VSizeFrame

use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.

the class TestNodeController method createTestContext.

public IHyracksTaskContext createTestContext(boolean withMessaging) throws HyracksDataException {
    IHyracksTaskContext ctx = TestUtils.create(KB32);
    if (withMessaging) {
        TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, new VSizeFrame(ctx), ctx);
    }
    ctx = Mockito.spy(ctx);
    Mockito.when(ctx.getJobletContext()).thenReturn(jobletCtx);
    Mockito.when(ctx.getIoManager()).thenReturn(ExecutionTestUtil.integrationUtil.ncs[0].getIoManager());
    return ctx;
}
Also used : IHyracksTaskContext(org.apache.hyracks.api.context.IHyracksTaskContext) VSizeFrame(org.apache.hyracks.api.comm.VSizeFrame)

Example 33 with VSizeFrame

use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.

the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanEmptyFrame.

@Test
public void testMessageLargerThanEmptyFrame() throws Exception {
    try {
        List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
        IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
        ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
        MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
        IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
        VSizeFrame message = new VSizeFrame(ctx);
        VSizeFrame tempBuffer = new VSizeFrame(ctx);
        TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
        writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE + 1);
        ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
        RecordDescriptor rDesc = new RecordDescriptor(serdes);
        TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
        IFrameWriter partitioner = connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
        partitioner.open();
        FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
        List<TestFrameWriter> recipients = new ArrayList<>();
        for (IFrameWriter writer : partitionWriterFactory.getWriters().values()) {
            recipients.add((TestFrameWriter) writer);
        }
        partitioner.flush();
        for (TestFrameWriter writer : recipients) {
            Assert.assertEquals(writer.nextFrameCount(), 1);
            fta.reset(writer.getLastFrame());
            Assert.assertEquals(fta.getTupleCount(), 1);
            FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
            Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
        }
        message.getBuffer().clear();
        message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
        message.getBuffer().flip();
        partitioner.flush();
        for (TestFrameWriter writer : recipients) {
            Assert.assertEquals(writer.nextFrameCount(), 2);
            fta.reset(writer.getLastFrame());
            Assert.assertEquals(fta.getTupleCount(), 1);
            FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
            Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
        }
        message.getBuffer().clear();
        message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
        message.getBuffer().flip();
        partitioner.flush();
        for (TestFrameWriter writer : recipients) {
            Assert.assertEquals(writer.nextFrameCount(), 3);
            fta.reset(writer.getLastFrame());
            Assert.assertEquals(fta.getTupleCount(), 1);
            FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
            Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
        }
        partitioner.close();
        for (TestFrameWriter writer : recipients) {
            Assert.assertEquals(writer.nextFrameCount(), 4);
            Assert.assertEquals(writer.closeCount(), 1);
        }
    } catch (Throwable th) {
        th.printStackTrace();
        throw th;
    }
}
Also used : ITuplePartitionComputerFactory(org.apache.hyracks.api.dataflow.value.ITuplePartitionComputerFactory) MToNPartitioningWithMessageConnectorDescriptor(org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor) IFrameWriter(org.apache.hyracks.api.comm.IFrameWriter) RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) TestFrameWriter(org.apache.hyracks.api.test.TestFrameWriter) ArrayList(java.util.ArrayList) UTF8StringSerializerDeserializer(org.apache.hyracks.dataflow.common.data.marshalling.UTF8StringSerializerDeserializer) VSizeFrame(org.apache.hyracks.api.comm.VSizeFrame) ISerializerDeserializer(org.apache.hyracks.api.dataflow.value.ISerializerDeserializer) IConnectorDescriptorRegistry(org.apache.hyracks.api.job.IConnectorDescriptorRegistry) IHyracksTaskContext(org.apache.hyracks.api.context.IHyracksTaskContext) FrameTupleAccessor(org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor) Test(org.junit.Test)

Example 34 with VSizeFrame

use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.

the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanSome.

@Test
public void testMessageLargerThanSome() throws Exception {
    try {
        // Routing will be to 1, 3, and 4 only. 0 and 2 will receive no tuples
        List<Integer> routing = Arrays.asList(1, 3, 4);
        IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
        ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
        MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
        IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
        VSizeFrame message = new VSizeFrame(ctx);
        VSizeFrame tempBuffer = new VSizeFrame(ctx);
        TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
        message.getBuffer().clear();
        writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE);
        ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
        FieldType[] types = { FieldType.Integer64, FieldType.Double, FieldType.Boolean, FieldType.String };
        RecordDescriptor rDesc = new RecordDescriptor(serdes);
        TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
        PartitionWithMessageDataWriter partitioner = (PartitionWithMessageDataWriter) connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
        partitioner.open();
        FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
        List<TestFrameWriter> recipients = new ArrayList<>();
        for (int i = 0; i < partitionWriterFactory.getWriters().values().size(); i++) {
            recipients.add(partitionWriterFactory.getWriters().get(i));
        }
        TestTupleGenerator ttg = new TestTupleGenerator(types, STRING_FIELD_SIZES, true);
        VSizeFrame frame = new VSizeFrame(ctx);
        FrameTupleAppender appender = new FrameTupleAppender(frame);
        ITupleReference tuple = ttg.next();
        while (appender.append(tuple)) {
            tuple = ttg.next();
        }
        partitioner.nextFrame(frame.getBuffer());
        partitioner.flush();
        Assert.assertEquals(1, partitionWriterFactory.getWriters().get(0).nextFrameCount());
        Assert.assertEquals(2, partitionWriterFactory.getWriters().get(1).nextFrameCount());
        Assert.assertEquals(1, partitionWriterFactory.getWriters().get(2).nextFrameCount());
        Assert.assertEquals(2, partitionWriterFactory.getWriters().get(3).nextFrameCount());
        Assert.assertEquals(2, partitionWriterFactory.getWriters().get(4).nextFrameCount());
        for (TestFrameWriter writer : recipients) {
            fta.reset(writer.getLastFrame());
            Assert.assertEquals(fta.getTupleCount(), 1);
            FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
            Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
        }
        partitioner.close();
    } catch (Throwable th) {
        th.printStackTrace();
        throw th;
    }
}
Also used : PartitionWithMessageDataWriter(org.apache.hyracks.dataflow.std.connectors.PartitionWithMessageDataWriter) RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) TestFrameWriter(org.apache.hyracks.api.test.TestFrameWriter) ArrayList(java.util.ArrayList) TestTupleGenerator(org.apache.asterix.test.common.TestTupleGenerator) UTF8StringSerializerDeserializer(org.apache.hyracks.dataflow.common.data.marshalling.UTF8StringSerializerDeserializer) IConnectorDescriptorRegistry(org.apache.hyracks.api.job.IConnectorDescriptorRegistry) MessagingFrameTupleAppender(org.apache.hyracks.dataflow.common.io.MessagingFrameTupleAppender) FrameTupleAppender(org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender) ITuplePartitionComputerFactory(org.apache.hyracks.api.dataflow.value.ITuplePartitionComputerFactory) MToNPartitioningWithMessageConnectorDescriptor(org.apache.hyracks.dataflow.std.connectors.MToNPartitioningWithMessageConnectorDescriptor) VSizeFrame(org.apache.hyracks.api.comm.VSizeFrame) ISerializerDeserializer(org.apache.hyracks.api.dataflow.value.ISerializerDeserializer) FieldType(org.apache.asterix.test.common.TestTupleGenerator.FieldType) IHyracksTaskContext(org.apache.hyracks.api.context.IHyracksTaskContext) ITupleReference(org.apache.hyracks.dataflow.common.data.accessors.ITupleReference) FrameTupleAccessor(org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor) Test(org.junit.Test)

Example 35 with VSizeFrame

use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.

the class CheckpointingTest method testDeleteOldLogFiles.

@Test
public void testDeleteOldLogFiles() {
    try {
        TestNodeController nc = new TestNodeController(new File(TEST_CONFIG_FILE_PATH).getAbsolutePath(), false);
        StorageComponentProvider storageManager = new StorageComponentProvider();
        nc.init();
        List<List<String>> partitioningKeys = new ArrayList<>();
        partitioningKeys.add(Collections.singletonList("key"));
        Dataset dataset = new Dataset(DATAVERSE_NAME, DATASET_NAME, DATAVERSE_NAME, DATA_TYPE_NAME, NODE_GROUP_NAME, null, null, new InternalDatasetDetails(null, PartitioningStrategy.HASH, partitioningKeys, null, null, null, false, null, false), null, DatasetType.INTERNAL, DATASET_ID, 0);
        try {
            nc.createPrimaryIndex(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, storageManager, KEY_INDEXES, KEY_INDICATOR_LIST);
            IHyracksTaskContext ctx = nc.createTestContext(false);
            nc.newJobId();
            ITransactionContext txnCtx = nc.getTransactionManager().getTransactionContext(nc.getTxnJobId(), true);
            // Prepare insert operation
            LSMInsertDeleteOperatorNodePushable insertOp = nc.getInsertPipeline(ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, KEY_INDEXES, KEY_INDICATOR_LIST, storageManager).getLeft();
            insertOp.open();
            TupleGenerator tupleGenerator = new TupleGenerator(RECORD_TYPE, META_TYPE, KEY_INDEXES, KEY_INDICATOR, RECORD_GEN_FUNCTION, UNIQUE_RECORD_FIELDS, META_GEN_FUNCTION, UNIQUE_META_FIELDS);
            VSizeFrame frame = new VSizeFrame(ctx);
            FrameTupleAppender tupleAppender = new FrameTupleAppender(frame);
            IRecoveryManager recoveryManager = nc.getTransactionSubsystem().getRecoveryManager();
            ICheckpointManager checkpointManager = nc.getTransactionSubsystem().getCheckpointManager();
            LogManager logManager = (LogManager) nc.getTransactionSubsystem().getLogManager();
            // Number of log files after node startup should be one
            int numberOfLogFiles = logManager.getLogFileIds().size();
            Assert.assertEquals(1, numberOfLogFiles);
            // Low-water mark LSN
            long lowWaterMarkLSN = recoveryManager.getMinFirstLSN();
            // Low-water mark log file id
            long initialLowWaterMarkFileId = logManager.getLogFileId(lowWaterMarkLSN);
            // Initial Low-water mark should be in the only available log file
            Assert.assertEquals(initialLowWaterMarkFileId, logManager.getLogFileIds().get(0).longValue());
            // Insert records until a new log file is created
            while (logManager.getLogFileIds().size() == 1) {
                ITupleReference tuple = tupleGenerator.next();
                DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp);
            }
            // Check if the new low-water mark is still in the initial low-water mark log file
            lowWaterMarkLSN = recoveryManager.getMinFirstLSN();
            long currentLowWaterMarkLogFileId = logManager.getLogFileId(lowWaterMarkLSN);
            if (currentLowWaterMarkLogFileId == initialLowWaterMarkFileId) {
                /*
                     * Make sure checkpoint will not delete the initial log file since
                     * the low-water mark is still in it (i.e. it is still required for
                     * recovery)
                     */
                int numberOfLogFilesBeforeCheckpoint = logManager.getLogFileIds().size();
                checkpointManager.tryCheckpoint(logManager.getAppendLSN());
                int numberOfLogFilesAfterCheckpoint = logManager.getLogFileIds().size();
                Assert.assertEquals(numberOfLogFilesBeforeCheckpoint, numberOfLogFilesAfterCheckpoint);
                /*
                     * Insert records until the low-water mark is not in the initialLowWaterMarkFileId
                     * either because of the asynchronous flush caused by the previous checkpoint or a flush
                     * due to the dataset memory budget getting full.
                     */
                while (currentLowWaterMarkLogFileId == initialLowWaterMarkFileId) {
                    ITupleReference tuple = tupleGenerator.next();
                    DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp);
                    lowWaterMarkLSN = recoveryManager.getMinFirstLSN();
                    currentLowWaterMarkLogFileId = logManager.getLogFileId(lowWaterMarkLSN);
                }
            }
            /*
                 * At this point, the low-water mark is not in the initialLowWaterMarkFileId, so
                 * a checkpoint should delete it.
                 */
            checkpointManager.tryCheckpoint(recoveryManager.getMinFirstLSN());
            // Validate initialLowWaterMarkFileId was deleted
            for (Long fileId : logManager.getLogFileIds()) {
                Assert.assertNotEquals(initialLowWaterMarkFileId, fileId.longValue());
            }
            if (tupleAppender.getTupleCount() > 0) {
                tupleAppender.write(insertOp, true);
            }
            insertOp.close();
            nc.getTransactionManager().completedTransaction(txnCtx, DatasetId.NULL, -1, true);
        } finally {
            nc.deInit();
        }
    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }
}
Also used : LSMInsertDeleteOperatorNodePushable(org.apache.asterix.common.dataflow.LSMInsertDeleteOperatorNodePushable) ICheckpointManager(org.apache.asterix.common.transactions.ICheckpointManager) Dataset(org.apache.asterix.metadata.entities.Dataset) InternalDatasetDetails(org.apache.asterix.metadata.entities.InternalDatasetDetails) ITransactionContext(org.apache.asterix.common.transactions.ITransactionContext) ArrayList(java.util.ArrayList) TupleGenerator(org.apache.asterix.app.data.gen.TupleGenerator) StorageComponentProvider(org.apache.asterix.file.StorageComponentProvider) VSizeFrame(org.apache.hyracks.api.comm.VSizeFrame) NoMergePolicyFactory(org.apache.hyracks.storage.am.lsm.common.impls.NoMergePolicyFactory) IRecoveryManager(org.apache.asterix.common.transactions.IRecoveryManager) IHyracksTaskContext(org.apache.hyracks.api.context.IHyracksTaskContext) TestNodeController(org.apache.asterix.app.bootstrap.TestNodeController) FrameTupleAppender(org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender) ITupleReference(org.apache.hyracks.dataflow.common.data.accessors.ITupleReference) ArrayList(java.util.ArrayList) List(java.util.List) File(java.io.File) LogManager(org.apache.asterix.transaction.management.service.logging.LogManager) Test(org.junit.Test)

Aggregations

VSizeFrame (org.apache.hyracks.api.comm.VSizeFrame)63 FrameTupleAppender (org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender)32 HyracksDataException (org.apache.hyracks.api.exceptions.HyracksDataException)22 FrameTupleAccessor (org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor)18 IHyracksTaskContext (org.apache.hyracks.api.context.IHyracksTaskContext)17 ArrayTupleBuilder (org.apache.hyracks.dataflow.common.comm.io.ArrayTupleBuilder)16 Test (org.junit.Test)16 IFrame (org.apache.hyracks.api.comm.IFrame)13 ArrayList (java.util.ArrayList)11 DataOutput (java.io.DataOutput)10 IFrameTupleAccessor (org.apache.hyracks.api.comm.IFrameTupleAccessor)9 FrameTupleReference (org.apache.hyracks.dataflow.common.data.accessors.FrameTupleReference)9 IOException (java.io.IOException)8 IFrameWriter (org.apache.hyracks.api.comm.IFrameWriter)8 RecordDescriptor (org.apache.hyracks.api.dataflow.value.RecordDescriptor)8 ConcurrentFramePool (org.apache.asterix.common.memory.ConcurrentFramePool)7 FeedRuntimeInputHandler (org.apache.asterix.external.feed.dataflow.FeedRuntimeInputHandler)7 FeedPolicyAccessor (org.apache.asterix.external.feed.policy.FeedPolicyAccessor)7 TestFrameWriter (org.apache.hyracks.api.test.TestFrameWriter)7 ISerializerDeserializer (org.apache.hyracks.api.dataflow.value.ISerializerDeserializer)6