use of org.apache.hyracks.dataflow.std.connectors.OneToOneConnectorDescriptor in project asterixdb by apache.
the class RTreeSecondaryIndexStatsOperatorTest method showPrimaryIndexStats.
@Test
public void showPrimaryIndexStats() throws Exception {
JobSpecification spec = new JobSpecification();
TreeIndexStatsOperatorDescriptor secondaryStatsOp = new TreeIndexStatsOperatorDescriptor(spec, storageManager, secondaryHelperFactory);
PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, secondaryStatsOp, NC1_ID);
IFileSplitProvider outSplits = new ConstantFileSplitProvider(new FileSplit[] { createFile(nc1) });
IOperatorDescriptor printer = new PlainFileWriterOperatorDescriptor(spec, outSplits, ",");
PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, printer, NC1_ID);
spec.connect(new OneToOneConnectorDescriptor(spec), secondaryStatsOp, 0, printer, 0);
spec.addRoot(printer);
runTest(spec);
}
use of org.apache.hyracks.dataflow.std.connectors.OneToOneConnectorDescriptor in project asterixdb by apache.
the class JobFailureTest method execTest.
private void execTest() throws Exception {
JobSpecification spec = new JobSpecification();
AbstractSingleActivityOperatorDescriptor sourceOpDesc = new ExceptionOnCreatePushRuntimeOperatorDescriptor(spec, 0, 1, new int[] { 4 }, true);
PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, sourceOpDesc, ASTERIX_IDS);
SinkOperatorDescriptor sinkOpDesc = new SinkOperatorDescriptor(spec, 1);
PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, sinkOpDesc, ASTERIX_IDS);
IConnectorDescriptor conn = new OneToOneConnectorDescriptor(spec);
spec.connect(conn, sourceOpDesc, 0, sinkOpDesc, 0);
spec.addRoot(sinkOpDesc);
try {
runTest(spec);
} catch (Exception e) {
e.printStackTrace();
throw e;
}
Assert.assertTrue(ExceptionOnCreatePushRuntimeOperatorDescriptor.stats() + ExceptionOnCreatePushRuntimeOperatorDescriptor.succeed(), ExceptionOnCreatePushRuntimeOperatorDescriptor.succeed());
// should also check the content of the different ncs
}
use of org.apache.hyracks.dataflow.std.connectors.OneToOneConnectorDescriptor in project asterixdb by apache.
the class SecondaryInvertedIndexOperationsHelper method buildLoadingJobSpec.
@Override
public JobSpecification buildLoadingJobSpec() throws AlgebricksException {
JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext());
JobId jobId = IndexUtil.bindJobEventListener(spec, metadataProvider);
// Create dummy key provider for feeding the primary index scan.
IOperatorDescriptor keyProviderOp = DatasetUtil.createDummyKeyProviderOp(spec, dataset, metadataProvider);
// Create primary index scan op.
IOperatorDescriptor primaryScanOp = DatasetUtil.createPrimaryIndexScanOp(spec, metadataProvider, dataset, jobId);
IOperatorDescriptor sourceOp = primaryScanOp;
boolean isEnforcingKeyTypes = index.isEnforcingKeyFileds();
int numSecondaryKeys = index.getKeyFieldNames().size();
if (isEnforcingKeyTypes && !enforcedItemType.equals(itemType)) {
sourceOp = createCastOp(spec, dataset.getDatasetType());
spec.connect(new OneToOneConnectorDescriptor(spec), primaryScanOp, 0, sourceOp, 0);
}
AlgebricksMetaOperatorDescriptor asterixAssignOp = createAssignOp(spec, numSecondaryKeys, secondaryRecDesc);
// If any of the secondary fields are nullable, then add a select op
// that filters nulls.
AlgebricksMetaOperatorDescriptor selectOp = null;
if (anySecondaryKeyIsNullable || isEnforcingKeyTypes) {
selectOp = createFilterNullsSelectOp(spec, numSecondaryKeys, secondaryRecDesc);
}
// Create a tokenizer op.
AbstractOperatorDescriptor tokenizerOp = createTokenizerOp(spec);
// Sort by token + primary keys.
ExternalSortOperatorDescriptor sortOp = createSortOp(spec, tokenKeyPairComparatorFactories, tokenKeyPairRecDesc);
// Create secondary inverted index bulk load op.
AbstractSingleActivityOperatorDescriptor invIndexBulkLoadOp = createInvertedIndexBulkLoadOp(spec);
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, invIndexBulkLoadOp, secondaryPartitionConstraint);
AlgebricksMetaOperatorDescriptor metaOp = new AlgebricksMetaOperatorDescriptor(spec, 1, 0, new IPushRuntimeFactory[] { new SinkRuntimeFactory() }, new RecordDescriptor[] {});
// Connect the operators.
spec.connect(new OneToOneConnectorDescriptor(spec), keyProviderOp, 0, primaryScanOp, 0);
spec.connect(new OneToOneConnectorDescriptor(spec), sourceOp, 0, asterixAssignOp, 0);
if (anySecondaryKeyIsNullable || isEnforcingKeyTypes) {
spec.connect(new OneToOneConnectorDescriptor(spec), asterixAssignOp, 0, selectOp, 0);
spec.connect(new OneToOneConnectorDescriptor(spec), selectOp, 0, tokenizerOp, 0);
} else {
spec.connect(new OneToOneConnectorDescriptor(spec), asterixAssignOp, 0, tokenizerOp, 0);
}
spec.connect(new OneToOneConnectorDescriptor(spec), tokenizerOp, 0, sortOp, 0);
spec.connect(new OneToOneConnectorDescriptor(spec), sortOp, 0, invIndexBulkLoadOp, 0);
spec.connect(new OneToOneConnectorDescriptor(spec), invIndexBulkLoadOp, 0, metaOp, 0);
spec.addRoot(metaOp);
spec.setConnectorPolicyAssignmentPolicy(new ConnectorPolicyAssignmentPolicy());
return spec;
}
Aggregations