use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender in project asterixdb by apache.
the class TreeIndexDiskOrderScanOperatorNodePushable method initialize.
@Override
public void initialize() throws HyracksDataException {
treeIndexHelper.open();
ITreeIndex treeIndex = (ITreeIndex) treeIndexHelper.getIndexInstance();
try {
ITreeIndexFrame cursorFrame = treeIndex.getLeafFrameFactory().createFrame();
ITreeIndexCursor cursor = new TreeIndexDiskOrderScanCursor(cursorFrame);
LocalResource resource = treeIndexHelper.getResource();
ISearchOperationCallback searchCallback = searchCallbackFactory.createSearchOperationCallback(resource.getId(), ctx, null);
ITreeIndexAccessor indexAccessor = (ITreeIndexAccessor) treeIndex.createAccessor(NoOpOperationCallback.INSTANCE, searchCallback);
try {
writer.open();
indexAccessor.diskOrderScan(cursor);
int fieldCount = treeIndex.getFieldCount();
FrameTupleAppender appender = new FrameTupleAppender(new VSizeFrame(ctx));
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
while (cursor.hasNext()) {
tb.reset();
cursor.next();
ITupleReference frameTuple = cursor.getTuple();
for (int i = 0; i < frameTuple.getFieldCount(); i++) {
dos.write(frameTuple.getFieldData(i), frameTuple.getFieldStart(i), frameTuple.getFieldLength(i));
tb.addFieldEndOffset();
}
FrameUtils.appendToWriter(writer, appender, tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
}
appender.write(writer, true);
} catch (Throwable th) {
writer.fail();
throw new HyracksDataException(th);
} finally {
try {
cursor.close();
} catch (Exception cursorCloseException) {
throw new IllegalStateException(cursorCloseException);
} finally {
writer.close();
}
}
} catch (Throwable th) {
treeIndexHelper.close();
throw new HyracksDataException(th);
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender in project asterixdb by apache.
the class TreeIndexStatsOperatorNodePushable method initialize.
@Override
public void initialize() throws HyracksDataException {
treeIndexHelper.open();
ITreeIndex treeIndex = (ITreeIndex) treeIndexHelper.getIndexInstance();
try {
writer.open();
IBufferCache bufferCache = storageManager.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fileMapProvider = storageManager.getFileMapProvider(ctx.getJobletContext().getServiceContext());
LocalResource resource = treeIndexHelper.getResource();
IIOManager ioManager = ctx.getIoManager();
FileReference fileRef = ioManager.resolve(resource.getPath());
int indexFileId = fileMapProvider.lookupFileId(fileRef);
TreeIndexStatsGatherer statsGatherer = new TreeIndexStatsGatherer(bufferCache, treeIndex.getPageManager(), indexFileId, treeIndex.getRootPageId());
TreeIndexStats stats = statsGatherer.gatherStats(treeIndex.getLeafFrameFactory().createFrame(), treeIndex.getInteriorFrameFactory().createFrame(), treeIndex.getPageManager().createMetadataFrame());
// Write the stats output as a single string field.
FrameTupleAppender appender = new FrameTupleAppender(new VSizeFrame(ctx));
ArrayTupleBuilder tb = new ArrayTupleBuilder(1);
DataOutput dos = tb.getDataOutput();
tb.reset();
utf8SerDer.serialize(stats.toString(), dos);
tb.addFieldEndOffset();
if (!appender.append(tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize())) {
throw new HyracksDataException("Record size (" + tb.getSize() + ") larger than frame size (" + appender.getBuffer().capacity() + ")");
}
appender.write(writer, false);
} catch (Exception e) {
writer.fail();
throw new HyracksDataException(e);
} finally {
try {
writer.close();
} finally {
treeIndexHelper.close();
}
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageLargerThanSome.
@Test
public void testMessageLargerThanSome() throws Exception {
try {
// Routing will be to 1, 3, and 4 only. 0 and 2 will receive no tuples
List<Integer> routing = Arrays.asList(1, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
writeRandomMessage(message, MessagingFrameTupleAppender.MARKER_MESSAGE, DEFAULT_FRAME_SIZE);
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
FieldType[] types = { FieldType.Integer64, FieldType.Double, FieldType.Boolean, FieldType.String };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
PartitionWithMessageDataWriter partitioner = (PartitionWithMessageDataWriter) connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (int i = 0; i < partitionWriterFactory.getWriters().values().size(); i++) {
recipients.add(partitionWriterFactory.getWriters().get(i));
}
TestTupleGenerator ttg = new TestTupleGenerator(types, STRING_FIELD_SIZES, true);
VSizeFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender(frame);
ITupleReference tuple = ttg.next();
while (appender.append(tuple)) {
tuple = ttg.next();
}
partitioner.nextFrame(frame.getBuffer());
partitioner.flush();
Assert.assertEquals(1, partitionWriterFactory.getWriters().get(0).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(1).nextFrameCount());
Assert.assertEquals(1, partitionWriterFactory.getWriters().get(2).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(3).nextFrameCount());
Assert.assertEquals(2, partitionWriterFactory.getWriters().get(4).nextFrameCount());
for (TestFrameWriter writer : recipients) {
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.MARKER_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender in project asterixdb by apache.
the class CheckpointingTest method testDeleteOldLogFiles.
@Test
public void testDeleteOldLogFiles() {
try {
TestNodeController nc = new TestNodeController(new File(TEST_CONFIG_FILE_PATH).getAbsolutePath(), false);
StorageComponentProvider storageManager = new StorageComponentProvider();
nc.init();
List<List<String>> partitioningKeys = new ArrayList<>();
partitioningKeys.add(Collections.singletonList("key"));
Dataset dataset = new Dataset(DATAVERSE_NAME, DATASET_NAME, DATAVERSE_NAME, DATA_TYPE_NAME, NODE_GROUP_NAME, null, null, new InternalDatasetDetails(null, PartitioningStrategy.HASH, partitioningKeys, null, null, null, false, null, false), null, DatasetType.INTERNAL, DATASET_ID, 0);
try {
nc.createPrimaryIndex(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, storageManager, KEY_INDEXES, KEY_INDICATOR_LIST);
IHyracksTaskContext ctx = nc.createTestContext(false);
nc.newJobId();
ITransactionContext txnCtx = nc.getTransactionManager().getTransactionContext(nc.getTxnJobId(), true);
// Prepare insert operation
LSMInsertDeleteOperatorNodePushable insertOp = nc.getInsertPipeline(ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, KEY_INDEXES, KEY_INDICATOR_LIST, storageManager).getLeft();
insertOp.open();
TupleGenerator tupleGenerator = new TupleGenerator(RECORD_TYPE, META_TYPE, KEY_INDEXES, KEY_INDICATOR, RECORD_GEN_FUNCTION, UNIQUE_RECORD_FIELDS, META_GEN_FUNCTION, UNIQUE_META_FIELDS);
VSizeFrame frame = new VSizeFrame(ctx);
FrameTupleAppender tupleAppender = new FrameTupleAppender(frame);
IRecoveryManager recoveryManager = nc.getTransactionSubsystem().getRecoveryManager();
ICheckpointManager checkpointManager = nc.getTransactionSubsystem().getCheckpointManager();
LogManager logManager = (LogManager) nc.getTransactionSubsystem().getLogManager();
// Number of log files after node startup should be one
int numberOfLogFiles = logManager.getLogFileIds().size();
Assert.assertEquals(1, numberOfLogFiles);
// Low-water mark LSN
long lowWaterMarkLSN = recoveryManager.getMinFirstLSN();
// Low-water mark log file id
long initialLowWaterMarkFileId = logManager.getLogFileId(lowWaterMarkLSN);
// Initial Low-water mark should be in the only available log file
Assert.assertEquals(initialLowWaterMarkFileId, logManager.getLogFileIds().get(0).longValue());
// Insert records until a new log file is created
while (logManager.getLogFileIds().size() == 1) {
ITupleReference tuple = tupleGenerator.next();
DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp);
}
// Check if the new low-water mark is still in the initial low-water mark log file
lowWaterMarkLSN = recoveryManager.getMinFirstLSN();
long currentLowWaterMarkLogFileId = logManager.getLogFileId(lowWaterMarkLSN);
if (currentLowWaterMarkLogFileId == initialLowWaterMarkFileId) {
/*
* Make sure checkpoint will not delete the initial log file since
* the low-water mark is still in it (i.e. it is still required for
* recovery)
*/
int numberOfLogFilesBeforeCheckpoint = logManager.getLogFileIds().size();
checkpointManager.tryCheckpoint(logManager.getAppendLSN());
int numberOfLogFilesAfterCheckpoint = logManager.getLogFileIds().size();
Assert.assertEquals(numberOfLogFilesBeforeCheckpoint, numberOfLogFilesAfterCheckpoint);
/*
* Insert records until the low-water mark is not in the initialLowWaterMarkFileId
* either because of the asynchronous flush caused by the previous checkpoint or a flush
* due to the dataset memory budget getting full.
*/
while (currentLowWaterMarkLogFileId == initialLowWaterMarkFileId) {
ITupleReference tuple = tupleGenerator.next();
DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp);
lowWaterMarkLSN = recoveryManager.getMinFirstLSN();
currentLowWaterMarkLogFileId = logManager.getLogFileId(lowWaterMarkLSN);
}
}
/*
* At this point, the low-water mark is not in the initialLowWaterMarkFileId, so
* a checkpoint should delete it.
*/
checkpointManager.tryCheckpoint(recoveryManager.getMinFirstLSN());
// Validate initialLowWaterMarkFileId was deleted
for (Long fileId : logManager.getLogFileIds()) {
Assert.assertNotEquals(initialLowWaterMarkFileId, fileId.longValue());
}
if (tupleAppender.getTupleCount() > 0) {
tupleAppender.write(insertOp, true);
}
insertOp.close();
nc.getTransactionManager().completedTransaction(txnCtx, DatasetId.NULL, -1, true);
} finally {
nc.deInit();
}
} catch (Throwable e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
}
use of org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender in project asterixdb by apache.
the class LSMInsertDeleteOperatorNodePushable method open.
@Override
public void open() throws HyracksDataException {
accessor = new FrameTupleAccessor(inputRecDesc);
writeBuffer = new VSizeFrame(ctx);
appender = new FrameTupleAppender(writeBuffer);
indexHelper.open();
lsmIndex = (AbstractLSMIndex) indexHelper.getIndexInstance();
try {
if (isPrimary && ctx.getSharedObject() != null) {
PrimaryIndexLogMarkerCallback callback = new PrimaryIndexLogMarkerCallback(lsmIndex);
TaskUtil.putInSharedMap(ILogMarkerCallback.KEY_MARKER_CALLBACK, callback, ctx);
}
writer.open();
modCallback = modOpCallbackFactory.createModificationOperationCallback(indexHelper.getResource(), ctx, this);
indexAccessor = lsmIndex.createAccessor(modCallback, NoOpOperationCallback.INSTANCE);
if (tupleFilterFactory != null) {
tupleFilter = tupleFilterFactory.createTupleFilter(ctx);
frameTuple = new FrameTupleReference();
}
INcApplicationContext runtimeCtx = (INcApplicationContext) ctx.getJobletContext().getServiceContext().getApplicationContext();
LSMIndexUtil.checkAndSetFirstLSN(lsmIndex, runtimeCtx.getTransactionSubsystem().getLogManager());
} catch (Throwable th) {
throw new HyracksDataException(th);
}
}
Aggregations