use of com.yahoo.searchlib.rankingexpression.Reference in project vespa by vespa-engine.
the class RankProfile method typeContext.
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public TypeContext<Reference> typeContext(QueryProfileRegistry queryProfiles) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getMacros().values().stream().map(Macro::asExpressionFunction).collect(Collectors.toList()));
// Add small and large constants, respectively
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
getSearch().getRankingConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
// Add attributes
getSearch().allFields().forEach(field -> addAttributeFeatureTypes(field, context));
getSearch().allImportedFields().forEach(field -> addAttributeFeatureTypes(field, context));
// Add query features from rank profile types reached from the "default" profile
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if (!feature.isPresent() || !feature.get().name().equals("query"))
continue;
TensorType existingType = context.getType(feature.get());
if (!Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow(() -> new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() + " with type " + field.getType().asTensorType() + ", but this is already defined " + "in another query profile with type " + context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
return context;
}
use of com.yahoo.searchlib.rankingexpression.Reference in project vespa by vespa-engine.
the class TensorFlowFeatureConverter method reduceBatchDimensions.
/**
* Check if batch dimensions of inputs can be reduced out. If the input
* macro specifies that a single exemplar should be evaluated, we can
* reduce the batch dimension out.
*/
private void reduceBatchDimensions(RankingExpression expression, TensorFlowModel model, RankProfile profile, QueryProfileRegistry queryProfiles) {
TypeContext<Reference> typeContext = profile.typeContext(queryProfiles);
TensorType typeBeforeReducing = expression.getRoot().type(typeContext);
// Check generated macros for inputs to reduce
Set<String> macroNames = new HashSet<>();
addMacroNamesIn(expression.getRoot(), macroNames, model);
for (String macroName : macroNames) {
if (!model.macros().containsKey(macroName)) {
continue;
}
RankProfile.Macro macro = profile.getMacros().get(macroName);
if (macro == null) {
throw new IllegalArgumentException("Model refers to generated macro '" + macroName + "but this macro is not present in " + profile);
}
RankingExpression macroExpression = macro.getRankingExpression();
macroExpression.setRoot(reduceBatchDimensionsAtInput(macroExpression.getRoot(), model, typeContext));
}
// Check expression for inputs to reduce
ExpressionNode root = expression.getRoot();
root = reduceBatchDimensionsAtInput(root, model, typeContext);
TensorType typeAfterReducing = root.type(typeContext);
root = expandBatchDimensionsAtOutput(root, typeBeforeReducing, typeAfterReducing);
expression.setRoot(root);
}
Aggregations