use of org.apache.asterix.om.types.ARecordType in project asterixdb by apache.
the class LangExpressionToPlanTranslator method translateUpsert.
private ILogicalOperator translateUpsert(DatasetDataSource targetDatasource, Mutable<ILogicalExpression> varRef, List<Mutable<ILogicalExpression>> varRefsForLoading, List<Mutable<ILogicalExpression>> additionalFilteringExpressions, ILogicalOperator assign, List<String> additionalFilteringField, LogicalVariable unnestVar, ILogicalOperator topOp, List<Mutable<ILogicalExpression>> exprs, LogicalVariable resVar, AssignOperator additionalFilteringAssign, ICompiledDmlStatement stmt) throws AlgebricksException {
if (!targetDatasource.getDataset().allow(topOp, DatasetUtil.OP_UPSERT)) {
throw new AlgebricksException(targetDatasource.getDataset().getDatasetName() + ": upsert into dataset is not supported on Datasets with Meta records");
}
ProjectOperator project = (ProjectOperator) topOp;
CompiledUpsertStatement compiledUpsert = (CompiledUpsertStatement) stmt;
Expression returnExpression = compiledUpsert.getReturnExpression();
InsertDeleteUpsertOperator upsertOp;
ILogicalOperator rootOperator;
if (targetDatasource.getDataset().hasMetaPart()) {
if (returnExpression != null) {
throw new AlgebricksException("Returning not allowed on datasets with Meta records");
}
AssignOperator metaAndKeysAssign;
List<LogicalVariable> metaAndKeysVars;
List<Mutable<ILogicalExpression>> metaAndKeysExprs;
List<Mutable<ILogicalExpression>> metaExpSingletonList;
metaAndKeysVars = new ArrayList<>();
metaAndKeysExprs = new ArrayList<>();
// add the meta function
IFunctionInfo finfoMeta = FunctionUtil.getFunctionInfo(BuiltinFunctions.META);
ScalarFunctionCallExpression metaFunction = new ScalarFunctionCallExpression(finfoMeta, new MutableObject<>(new VariableReferenceExpression(unnestVar)));
// create assign for the meta part
LogicalVariable metaVar = context.newVar();
metaExpSingletonList = new ArrayList<>(1);
metaExpSingletonList.add(new MutableObject<>(new VariableReferenceExpression(metaVar)));
metaAndKeysVars.add(metaVar);
metaAndKeysExprs.add(new MutableObject<>(metaFunction));
project.getVariables().add(metaVar);
varRefsForLoading.clear();
for (Mutable<ILogicalExpression> assignExpr : exprs) {
if (assignExpr.getValue().getExpressionTag() == LogicalExpressionTag.FUNCTION_CALL) {
AbstractFunctionCallExpression funcCall = (AbstractFunctionCallExpression) assignExpr.getValue();
funcCall.substituteVar(resVar, unnestVar);
LogicalVariable pkVar = context.newVar();
metaAndKeysVars.add(pkVar);
metaAndKeysExprs.add(new MutableObject<>(assignExpr.getValue()));
project.getVariables().add(pkVar);
varRefsForLoading.add(new MutableObject<>(new VariableReferenceExpression(pkVar)));
}
}
// A change feed, we don't need the assign to access PKs
upsertOp = new InsertDeleteUpsertOperator(targetDatasource, varRef, varRefsForLoading, metaExpSingletonList, InsertDeleteUpsertOperator.Kind.UPSERT, false);
// Create and add a new variable used for representing the original record
upsertOp.setPrevRecordVar(context.newVar());
upsertOp.setPrevRecordType(targetDatasource.getItemType());
if (targetDatasource.getDataset().hasMetaPart()) {
List<LogicalVariable> metaVars = new ArrayList<>();
metaVars.add(context.newVar());
upsertOp.setPrevAdditionalNonFilteringVars(metaVars);
List<Object> metaTypes = new ArrayList<>();
metaTypes.add(targetDatasource.getMetaItemType());
upsertOp.setPrevAdditionalNonFilteringTypes(metaTypes);
}
if (additionalFilteringField != null) {
upsertOp.setPrevFilterVar(context.newVar());
upsertOp.setPrevFilterType(((ARecordType) targetDatasource.getItemType()).getFieldType(additionalFilteringField.get(0)));
additionalFilteringAssign.getInputs().clear();
additionalFilteringAssign.getInputs().add(assign.getInputs().get(0));
upsertOp.getInputs().add(new MutableObject<>(additionalFilteringAssign));
} else {
upsertOp.getInputs().add(assign.getInputs().get(0));
}
metaAndKeysAssign = new AssignOperator(metaAndKeysVars, metaAndKeysExprs);
metaAndKeysAssign.getInputs().add(topOp.getInputs().get(0));
topOp.getInputs().set(0, new MutableObject<>(metaAndKeysAssign));
upsertOp.setAdditionalFilteringExpressions(additionalFilteringExpressions);
} else {
upsertOp = new InsertDeleteUpsertOperator(targetDatasource, varRef, varRefsForLoading, InsertDeleteUpsertOperator.Kind.UPSERT, false);
upsertOp.setAdditionalFilteringExpressions(additionalFilteringExpressions);
upsertOp.getInputs().add(new MutableObject<>(assign));
// Create and add a new variable used for representing the original record
ARecordType recordType = (ARecordType) targetDatasource.getItemType();
upsertOp.setPrevRecordVar(context.newVar());
upsertOp.setPrevRecordType(recordType);
if (additionalFilteringField != null) {
upsertOp.setPrevFilterVar(context.newVar());
upsertOp.setPrevFilterType(recordType.getFieldType(additionalFilteringField.get(0)));
}
}
rootOperator = new DelegateOperator(new CommitOperator(returnExpression == null));
rootOperator.getInputs().add(new MutableObject<>(upsertOp));
// Compiles the return expression.
return processReturningExpression(rootOperator, upsertOp, compiledUpsert);
}
use of org.apache.asterix.om.types.ARecordType in project asterixdb by apache.
the class TypeTranslator method computeRecordType.
private static ARecordType computeRecordType(TypeSignature typeSignature, RecordTypeDefinition rtd, Map<TypeSignature, IAType> typeMap, Map<String, Map<ARecordType, List<Integer>>> incompleteFieldTypes, Map<TypeSignature, List<AbstractCollectionType>> incompleteItemTypes, String defaultDataverse) throws AsterixException {
List<String> names = rtd.getFieldNames();
int n = names.size();
String[] fldNames = new String[n];
IAType[] fldTypes = new IAType[n];
int i = 0;
for (String s : names) {
fldNames[i++] = s;
}
boolean isOpen = rtd.getRecordKind() == RecordKind.OPEN;
ARecordType recType = new ARecordType(typeSignature == null ? null : typeSignature.getName(), fldNames, fldTypes, isOpen);
List<IRecordFieldDataGen> fieldDataGen = rtd.getFieldDataGen();
if (fieldDataGen.size() == n) {
IRecordFieldDataGen[] rfdg = new IRecordFieldDataGen[n];
rfdg = fieldDataGen.toArray(rfdg);
recType.getAnnotations().add(new RecordDataGenAnnotation(rfdg, rtd.getUndeclaredFieldsDataGen()));
}
for (int j = 0; j < n; j++) {
TypeExpression texpr = rtd.getFieldTypes().get(j);
switch(texpr.getTypeKind()) {
case TYPEREFERENCE:
{
TypeReferenceExpression tre = (TypeReferenceExpression) texpr;
TypeSignature signature = new TypeSignature(tre.getIdent().first == null ? defaultDataverse : tre.getIdent().first.getValue(), tre.getIdent().second.getValue());
IAType tref = solveTypeReference(signature, typeMap);
if (tref != null) {
if (!rtd.getOptionableFields().get(j)) {
// not nullable
fldTypes[j] = tref;
} else {
// optional
fldTypes[j] = AUnionType.createUnknownableType(tref);
}
} else {
addIncompleteFieldTypeReference(recType, j, tre, incompleteFieldTypes);
if (rtd.getOptionableFields().get(j)) {
fldTypes[j] = AUnionType.createUnknownableType(null);
}
}
break;
}
case RECORD:
{
RecordTypeDefinition recTypeDef2 = (RecordTypeDefinition) texpr;
IAType t2 = computeRecordType(null, recTypeDef2, typeMap, incompleteFieldTypes, incompleteItemTypes, defaultDataverse);
if (!rtd.getOptionableFields().get(j)) {
// not nullable
fldTypes[j] = t2;
} else {
// nullable
fldTypes[j] = AUnionType.createUnknownableType(t2);
}
break;
}
case ORDEREDLIST:
{
OrderedListTypeDefinition oltd = (OrderedListTypeDefinition) texpr;
IAType t2 = computeOrderedListType(null, oltd, typeMap, incompleteItemTypes, incompleteFieldTypes, defaultDataverse);
fldTypes[j] = rtd.getOptionableFields().get(j) ? AUnionType.createUnknownableType(t2) : t2;
break;
}
case UNORDEREDLIST:
{
UnorderedListTypeDefinition ultd = (UnorderedListTypeDefinition) texpr;
IAType t2 = computeUnorderedListType(null, ultd, typeMap, incompleteItemTypes, incompleteFieldTypes, defaultDataverse);
fldTypes[j] = rtd.getOptionableFields().get(j) ? AUnionType.createUnknownableType(t2) : t2;
break;
}
default:
{
throw new IllegalStateException();
}
}
}
return recType;
}
use of org.apache.asterix.om.types.ARecordType in project asterixdb by apache.
the class FilePartition method get.
@Override
protected void get(IServletRequest request, IServletResponse response) {
response.setStatus(HttpResponseStatus.OK);
try {
HttpUtil.setContentType(response, HttpUtil.ContentType.APPLICATION_JSON, HttpUtil.Encoding.UTF8);
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Failure setting content type", e);
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
response.writer().write(e.toString());
return;
}
PrintWriter out = response.writer();
try {
ObjectMapper om = new ObjectMapper();
ObjectNode jsonResponse = om.createObjectNode();
String dataverseName = request.getParameter("dataverseName");
String datasetName = request.getParameter("datasetName");
if (dataverseName == null || datasetName == null) {
jsonResponse.put("error", "Parameter dataverseName or datasetName is null,");
out.write(jsonResponse.toString());
return;
}
IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
// Metadata transaction begins.
MetadataManager.INSTANCE.init();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
// Retrieves file splits of the dataset.
MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
try {
metadataProvider.setMetadataTxnContext(mdTxnCtx);
Dataset dataset = metadataProvider.findDataset(dataverseName, datasetName);
if (dataset == null) {
jsonResponse.put("error", "Dataset " + datasetName + " does not exist in " + "dataverse " + dataverseName);
out.write(jsonResponse.toString());
out.flush();
return;
}
boolean temp = dataset.getDatasetDetails().isTemp();
FileSplit[] fileSplits = metadataProvider.splitsForIndex(mdTxnCtx, dataset, datasetName);
ARecordType recordType = (ARecordType) metadataProvider.findType(dataset.getItemTypeDataverseName(), dataset.getItemTypeName());
List<List<String>> primaryKeys = dataset.getPrimaryKeys();
StringBuilder pkStrBuf = new StringBuilder();
for (List<String> keys : primaryKeys) {
for (String key : keys) {
pkStrBuf.append(key).append(",");
}
}
pkStrBuf.delete(pkStrBuf.length() - 1, pkStrBuf.length());
// Constructs the returned json object.
formResponseObject(jsonResponse, fileSplits, recordType, pkStrBuf.toString(), temp, hcc.getNodeControllerInfos());
// Flush the cached contents of the dataset to file system.
FlushDatasetUtil.flushDataset(hcc, metadataProvider, dataverseName, datasetName, datasetName);
// Metadata transaction commits.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
// Writes file splits.
out.write(jsonResponse.toString());
} finally {
metadataProvider.getLocks().unlock();
}
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Failure handling a request", e);
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
out.write(e.toString());
} finally {
out.flush();
}
}
use of org.apache.asterix.om.types.ARecordType in project asterixdb by apache.
the class NCQueryServiceServlet method executeStatement.
@Override
protected void executeStatement(String statementsText, SessionOutput sessionOutput, IStatementExecutor.ResultDelivery delivery, IStatementExecutor.Stats stats, RequestParameters param, String handleUrl, long[] outExecStartEnd) throws Exception {
// Running on NC -> send 'execute' message to CC
INCServiceContext ncCtx = (INCServiceContext) serviceCtx;
INCMessageBroker ncMb = (INCMessageBroker) ncCtx.getMessageBroker();
IStatementExecutor.ResultDelivery ccDelivery = delivery == IStatementExecutor.ResultDelivery.IMMEDIATE ? IStatementExecutor.ResultDelivery.DEFERRED : delivery;
ExecuteStatementResponseMessage responseMsg;
MessageFuture responseFuture = ncMb.registerMessageFuture();
try {
ExecuteStatementRequestMessage requestMsg = new ExecuteStatementRequestMessage(ncCtx.getNodeId(), responseFuture.getFutureId(), queryLanguage, statementsText, sessionOutput.config(), ccDelivery, param.clientContextID, handleUrl);
outExecStartEnd[0] = System.nanoTime();
ncMb.sendMessageToCC(requestMsg);
responseMsg = (ExecuteStatementResponseMessage) responseFuture.get(ExecuteStatementResponseMessage.DEFAULT_TIMEOUT_MILLIS, java.util.concurrent.TimeUnit.MILLISECONDS);
outExecStartEnd[1] = System.nanoTime();
} finally {
ncMb.deregisterMessageFuture(responseFuture.getFutureId());
}
Throwable err = responseMsg.getError();
if (err != null) {
if (err instanceof Error) {
throw (Error) err;
} else if (err instanceof Exception) {
throw (Exception) err;
} else {
throw new Exception(err.toString(), err);
}
}
IStatementExecutor.ResultMetadata resultMetadata = responseMsg.getMetadata();
if (delivery == IStatementExecutor.ResultDelivery.IMMEDIATE && !resultMetadata.getResultSets().isEmpty()) {
for (Triple<JobId, ResultSetId, ARecordType> rsmd : resultMetadata.getResultSets()) {
ResultReader resultReader = new ResultReader(getHyracksDataset(), rsmd.getLeft(), rsmd.getMiddle());
ResultUtil.printResults(appCtx, resultReader, sessionOutput, stats, rsmd.getRight());
}
} else {
sessionOutput.out().append(responseMsg.getResult());
}
}
use of org.apache.asterix.om.types.ARecordType in project asterixdb by apache.
the class TypeTranslator method secondPass.
private static void secondPass(MetadataTransactionContext mdTxnCtx, Map<TypeSignature, IAType> typeMap, Map<String, Map<ARecordType, List<Integer>>> incompleteFieldTypes, Map<TypeSignature, List<AbstractCollectionType>> incompleteItemTypes, Map<TypeSignature, List<TypeSignature>> incompleteTopLevelTypeReferences, String typeDataverse) throws AlgebricksException {
// solve remaining top level references
for (TypeSignature typeSignature : incompleteTopLevelTypeReferences.keySet()) {
IAType t;
Datatype dt = MetadataManager.INSTANCE.getDatatype(mdTxnCtx, typeSignature.getNamespace(), typeSignature.getName());
if (dt == null) {
throw new AlgebricksException("Could not resolve type " + typeSignature);
} else {
t = dt.getDatatype();
}
for (TypeSignature sign : incompleteTopLevelTypeReferences.get(typeSignature)) {
typeMap.put(sign, t);
}
}
// solve remaining field type references
for (String trefName : incompleteFieldTypes.keySet()) {
IAType t;
Datatype dt = MetadataManager.INSTANCE.getDatatype(mdTxnCtx, typeDataverse, trefName);
if (dt == null) {
dt = MetadataManager.INSTANCE.getDatatype(mdTxnCtx, MetadataConstants.METADATA_DATAVERSE_NAME, trefName);
}
if (dt == null) {
throw new AlgebricksException("Could not resolve type " + trefName);
} else {
t = dt.getDatatype();
}
Map<ARecordType, List<Integer>> fieldsToFix = incompleteFieldTypes.get(trefName);
for (ARecordType recType : fieldsToFix.keySet()) {
List<Integer> positions = fieldsToFix.get(recType);
IAType[] fldTypes = recType.getFieldTypes();
for (Integer pos : positions) {
if (fldTypes[pos] == null) {
fldTypes[pos] = t;
} else {
// nullable
AUnionType nullableUnion = (AUnionType) fldTypes[pos];
nullableUnion.setActualType(t);
}
}
}
}
// solve remaining item type references
for (TypeSignature typeSignature : incompleteItemTypes.keySet()) {
IAType t;
Datatype dt;
if (MetadataManager.INSTANCE != null) {
dt = MetadataManager.INSTANCE.getDatatype(mdTxnCtx, typeSignature.getNamespace(), typeSignature.getName());
if (dt == null) {
throw new AlgebricksException("Could not resolve type " + typeSignature);
}
t = dt.getDatatype();
} else {
t = typeMap.get(typeSignature);
}
for (AbstractCollectionType act : incompleteItemTypes.get(typeSignature)) {
act.setItemType(t);
}
}
}
Aggregations