use of org.apache.hyracks.api.context.IHyracksTaskContext in project asterixdb by apache.
the class TopKRunGeneratorTest method testAlreadySortedDataShouldNotGenerateAnyRuns.
@Test
public void testAlreadySortedDataShouldNotGenerateAnyRuns() throws HyracksDataException {
int topK = SORT_FRAME_LIMIT;
IHyracksTaskContext ctx = AbstractRunGeneratorTest.testUtils.create(PAGE_SIZE);
HeapSortRunGenerator sorter = new HeapSortRunGenerator(ctx, SORT_FRAME_LIMIT, topK, SortFields, null, ComparatorFactories, RecordDesc);
testInMemoryOnly(ctx, topK, ORDER.INORDER, sorter);
}
use of org.apache.hyracks.api.context.IHyracksTaskContext in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testEmptyFrames.
@Test
public void testEmptyFrames() throws Exception {
try {
List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
message.getBuffer().flip();
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
IFrameWriter partitioner = connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
List<TestFrameWriter> recipients = new ArrayList<>();
try {
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
for (IFrameWriter writer : partitionWriterFactory.getWriters().values()) {
recipients.add((TestFrameWriter) writer);
}
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 1);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 2);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE);
message.getBuffer().flip();
partitioner.flush();
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 3);
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 1);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.NULL_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
} catch (Throwable t) {
partitioner.fail();
throw t;
} finally {
partitioner.close();
}
for (TestFrameWriter writer : recipients) {
Assert.assertEquals(writer.nextFrameCount(), 4);
Assert.assertEquals(writer.closeCount(), 1);
}
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.api.context.IHyracksTaskContext in project asterixdb by apache.
the class ConnectorDescriptorWithMessagingTest method testMessageFitsWithTuples.
@Test
public void testMessageFitsWithTuples() throws Exception {
try {
// Routing will be round robin
List<Integer> routing = Arrays.asList(0, 1, 2, 3, 4);
IConnectorDescriptorRegistry connDescRegistry = Mockito.mock(IConnectorDescriptorRegistry.class);
ITuplePartitionComputerFactory partitionComputerFactory = new TestPartitionComputerFactory(routing);
MToNPartitioningWithMessageConnectorDescriptor connector = new MToNPartitioningWithMessageConnectorDescriptor(connDescRegistry, partitionComputerFactory);
IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
VSizeFrame message = new VSizeFrame(ctx);
VSizeFrame tempBuffer = new VSizeFrame(ctx);
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, message, ctx);
message.getBuffer().clear();
message.getBuffer().put(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE);
message.getBuffer().flip();
ISerializerDeserializer<?>[] serdes = new ISerializerDeserializer<?>[] { Integer64SerializerDeserializer.INSTANCE, DoubleSerializerDeserializer.INSTANCE, BooleanSerializerDeserializer.INSTANCE, new UTF8StringSerializerDeserializer() };
FieldType[] types = { FieldType.Integer64, FieldType.Double, FieldType.Boolean, FieldType.String };
RecordDescriptor rDesc = new RecordDescriptor(serdes);
TestPartitionWriterFactory partitionWriterFactory = new TestPartitionWriterFactory();
PartitionWithMessageDataWriter partitioner = (PartitionWithMessageDataWriter) connector.createPartitioner(ctx, rDesc, partitionWriterFactory, CURRENT_PRODUCER, NUMBER_OF_CONSUMERS, NUMBER_OF_CONSUMERS);
partitioner.open();
FrameTupleAccessor fta = new FrameTupleAccessor(rDesc);
List<TestFrameWriter> recipients = new ArrayList<>();
for (int i = 0; i < partitionWriterFactory.getWriters().values().size(); i++) {
recipients.add(partitionWriterFactory.getWriters().get(i));
}
TestTupleGenerator ttg = new TestTupleGenerator(types, STRING_FIELD_SIZES, true);
VSizeFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender(frame);
for (int count = 0; count < NUMBER_OF_CONSUMERS; count++) {
ITupleReference tuple = ttg.next();
appender.append(tuple);
}
partitioner.nextFrame(frame.getBuffer());
partitioner.flush();
Assert.assertEquals(partitionWriterFactory.getWriters().get(0).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(1).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(2).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(3).nextFrameCount(), 1);
Assert.assertEquals(partitionWriterFactory.getWriters().get(4).nextFrameCount(), 1);
for (TestFrameWriter writer : recipients) {
fta.reset(writer.getLastFrame());
Assert.assertEquals(fta.getTupleCount(), 2);
FeedUtils.processFeedMessage(writer.getLastFrame(), tempBuffer, fta);
Assert.assertEquals(MessagingFrameTupleAppender.ACK_REQ_FEED_MESSAGE, MessagingFrameTupleAppender.getMessageType(tempBuffer));
}
partitioner.close();
} catch (Throwable th) {
th.printStackTrace();
throw th;
}
}
use of org.apache.hyracks.api.context.IHyracksTaskContext in project asterixdb by apache.
the class LogMarkerTest method testInsertWithSnapshot.
@Test
public void testInsertWithSnapshot() {
try {
TestNodeController nc = new TestNodeController(null, false);
nc.init();
StorageComponentProvider storageManager = new StorageComponentProvider();
List<List<String>> partitioningKeys = new ArrayList<>();
partitioningKeys.add(Collections.singletonList("key"));
Dataset dataset = new Dataset(DATAVERSE_NAME, DATASET_NAME, DATAVERSE_NAME, DATA_TYPE_NAME, NODE_GROUP_NAME, null, null, new InternalDatasetDetails(null, PartitioningStrategy.HASH, partitioningKeys, null, null, null, false, null, false), null, DatasetType.INTERNAL, DATASET_ID, 0);
try {
nc.createPrimaryIndex(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, storageManager, KEY_INDEXES, KEY_INDICATORS_LIST);
IHyracksTaskContext ctx = nc.createTestContext(true);
nc.newJobId();
ITransactionContext txnCtx = nc.getTransactionManager().getTransactionContext(nc.getTxnJobId(), true);
LSMInsertDeleteOperatorNodePushable insertOp = nc.getInsertPipeline(ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, KEY_INDEXES, KEY_INDICATORS_LIST, storageManager).getLeft();
insertOp.open();
TupleGenerator tupleGenerator = new TupleGenerator(RECORD_TYPE, META_TYPE, KEY_INDEXES, KEY_INDICATORS, RECORD_GEN_FUNCTION, UNIQUE_RECORD_FIELDS, META_GEN_FUNCTION, UNIQUE_META_FIELDS);
VSizeFrame frame = new VSizeFrame(ctx);
VSizeFrame marker = new VSizeFrame(ctx);
FrameTupleAppender tupleAppender = new FrameTupleAppender(frame);
long markerId = 0L;
for (int j = 0; j < NUM_OF_RECORDS; j++) {
if (j % SNAPSHOT_SIZE == 0) {
marker.reset();
marker.getBuffer().put(MessagingFrameTupleAppender.MARKER_MESSAGE);
marker.getBuffer().putLong(markerId);
marker.getBuffer().flip();
markerId++;
TaskUtil.putInSharedMap(HyracksConstants.KEY_MESSAGE, marker, ctx);
tupleAppender.flush(insertOp);
}
ITupleReference tuple = tupleGenerator.next();
DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp);
}
if (tupleAppender.getTupleCount() > 0) {
tupleAppender.write(insertOp, true);
}
insertOp.close();
nc.getTransactionManager().completedTransaction(txnCtx, DatasetId.NULL, -1, true);
IIndexDataflowHelper dataflowHelper = nc.getPrimaryIndexDataflowHelper(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, storageManager, KEY_INDEXES, KEY_INDICATORS_LIST);
dataflowHelper.open();
LSMBTree btree = (LSMBTree) dataflowHelper.getIndexInstance();
LongPointable longPointable = LongPointable.FACTORY.createPointable();
ComponentMetadataUtil.get(btree, ComponentMetadataUtil.MARKER_LSN_KEY, longPointable);
long lsn = longPointable.getLong();
int numOfMarkers = 0;
LogReader logReader = (LogReader) nc.getTransactionSubsystem().getLogManager().getLogReader(false);
long expectedMarkerId = markerId - 1;
while (lsn >= 0) {
numOfMarkers++;
ILogRecord logRecord = logReader.read(lsn);
lsn = logRecord.getPreviousMarkerLSN();
long logMarkerId = logRecord.getMarker().getLong();
Assert.assertEquals(expectedMarkerId, logMarkerId);
expectedMarkerId--;
}
logReader.close();
dataflowHelper.close();
Assert.assertEquals(markerId, numOfMarkers);
nc.newJobId();
TestTupleCounterFrameWriter countOp = create(nc.getSearchOutputDesc(KEY_TYPES, RECORD_TYPE, META_TYPE), Collections.emptyList(), Collections.emptyList(), false);
IPushRuntime emptyTupleOp = nc.getFullScanPipeline(countOp, ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, new NoMergePolicyFactory(), null, null, KEY_INDEXES, KEY_INDICATORS_LIST, storageManager);
emptyTupleOp.open();
emptyTupleOp.close();
Assert.assertEquals(NUM_OF_RECORDS, countOp.getCount());
} finally {
nc.deInit();
}
} catch (Throwable e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
}
use of org.apache.hyracks.api.context.IHyracksTaskContext in project asterixdb by apache.
the class AInt8ConstructorDescriptor method createEvaluatorFactory.
@Override
public IScalarEvaluatorFactory createEvaluatorFactory(final IScalarEvaluatorFactory[] args) {
return new IScalarEvaluatorFactory() {
private static final long serialVersionUID = 1L;
@Override
public IScalarEvaluator createScalarEvaluator(IHyracksTaskContext ctx) throws HyracksDataException {
return new IScalarEvaluator() {
private ArrayBackedValueStorage resultStorage = new ArrayBackedValueStorage();
private DataOutput out = resultStorage.getDataOutput();
private IPointable inputArg = new VoidPointable();
private IScalarEvaluator eval = args[0].createScalarEvaluator(ctx);
private byte value;
private int offset;
private boolean positive;
private AMutableInt8 aInt8 = new AMutableInt8((byte) 0);
@SuppressWarnings("unchecked")
private ISerializerDeserializer<AInt8> int8Serde = SerializerDeserializerProvider.INSTANCE.getSerializerDeserializer(BuiltinType.AINT8);
private final UTF8StringPointable utf8Ptr = new UTF8StringPointable();
@Override
public void evaluate(IFrameTupleReference tuple, IPointable result) throws HyracksDataException {
try {
resultStorage.reset();
eval.evaluate(tuple, inputArg);
byte[] serString = inputArg.getByteArray();
int startOffset = inputArg.getStartOffset();
int len = inputArg.getLength();
if (serString[startOffset] == ATypeTag.SERIALIZED_STRING_TYPE_TAG) {
utf8Ptr.set(serString, startOffset + 1, len - 1);
offset = utf8Ptr.getCharStartOffset();
//accumulating value in negative domain
//otherwise Byte.MIN_VALUE = -(Byte.MAX_VALUE + 1) would have caused overflow
value = 0;
positive = true;
byte limit = -Byte.MAX_VALUE;
if (serString[offset] == '+') {
offset++;
} else if (serString[offset] == '-') {
offset++;
positive = false;
limit = Byte.MIN_VALUE;
}
int end = startOffset + len;
for (; offset < end; offset++) {
int digit;
if (serString[offset] >= '0' && serString[offset] <= '9') {
value = (byte) (value * 10);
digit = serString[offset] - '0';
} else if (serString[offset] == 'i' && serString[offset + 1] == '8' && offset + 2 == end) {
break;
} else {
throw new InvalidDataFormatException(getIdentifier(), ATypeTag.SERIALIZED_INT8_TYPE_TAG);
}
if (value < limit + digit) {
throw new InvalidDataFormatException(getIdentifier(), ATypeTag.SERIALIZED_INT8_TYPE_TAG);
}
value = (byte) (value - digit);
}
if (value > 0) {
throw new InvalidDataFormatException(getIdentifier(), ATypeTag.SERIALIZED_INT8_TYPE_TAG);
}
if (value < 0 && positive) {
value *= -1;
}
aInt8.setValue(value);
int8Serde.serialize(aInt8, out);
} else {
throw new InvalidDataFormatException(getIdentifier(), ATypeTag.SERIALIZED_INT8_TYPE_TAG);
}
result.set(resultStorage);
} catch (IOException e1) {
throw new InvalidDataFormatException(getIdentifier(), e1, ATypeTag.SERIALIZED_INT8_TYPE_TAG);
}
}
};
}
};
}
Aggregations