use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.
the class IntroduceLSMComponentFilterRule method rewritePost.
@Override
public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context) throws AlgebricksException {
if (!checkIfRuleIsApplicable(opRef, context)) {
return false;
}
AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue();
typeEnvironment = context.getOutputTypeEnvironment(op);
ILogicalExpression condExpr = ((SelectOperator) op).getCondition().getValue();
AccessMethodAnalysisContext analysisCtx = analyzeCondition(condExpr, context, typeEnvironment);
if (analysisCtx.getMatchedFuncExprs().isEmpty()) {
return false;
}
Dataset dataset = getDataset(op, context);
List<String> filterFieldName = null;
ARecordType recType = null;
if (dataset != null && dataset.getDatasetType() == DatasetType.INTERNAL) {
filterFieldName = DatasetUtil.getFilterField(dataset);
IAType itemType = ((MetadataProvider) context.getMetadataProvider()).findType(dataset.getItemTypeDataverseName(), dataset.getItemTypeName());
if (itemType.getTypeTag() == ATypeTag.OBJECT) {
recType = (ARecordType) itemType;
}
}
if (filterFieldName == null || recType == null) {
return false;
}
List<Index> datasetIndexes = ((MetadataProvider) context.getMetadataProvider()).getDatasetIndexes(dataset.getDataverseName(), dataset.getDatasetName());
List<IOptimizableFuncExpr> optFuncExprs = new ArrayList<>();
for (int i = 0; i < analysisCtx.getMatchedFuncExprs().size(); i++) {
IOptimizableFuncExpr optFuncExpr = analysisCtx.getMatchedFuncExpr(i);
boolean found = findMacthedExprFieldName(optFuncExpr, op, dataset, recType, datasetIndexes, context);
if (found && optFuncExpr.getFieldName(0).equals(filterFieldName)) {
optFuncExprs.add(optFuncExpr);
}
}
if (optFuncExprs.isEmpty()) {
return false;
}
changePlan(optFuncExprs, op, dataset, context);
OperatorPropertiesUtil.typeOpRec(opRef, context);
context.addToDontApplySet(this, op);
return true;
}
use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.
the class FilePartition method get.
@Override
protected void get(IServletRequest request, IServletResponse response) {
response.setStatus(HttpResponseStatus.OK);
try {
HttpUtil.setContentType(response, HttpUtil.ContentType.APPLICATION_JSON, HttpUtil.Encoding.UTF8);
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Failure setting content type", e);
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
response.writer().write(e.toString());
return;
}
PrintWriter out = response.writer();
try {
ObjectMapper om = new ObjectMapper();
ObjectNode jsonResponse = om.createObjectNode();
String dataverseName = request.getParameter("dataverseName");
String datasetName = request.getParameter("datasetName");
if (dataverseName == null || datasetName == null) {
jsonResponse.put("error", "Parameter dataverseName or datasetName is null,");
out.write(jsonResponse.toString());
return;
}
IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
// Metadata transaction begins.
MetadataManager.INSTANCE.init();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
// Retrieves file splits of the dataset.
MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
try {
metadataProvider.setMetadataTxnContext(mdTxnCtx);
Dataset dataset = metadataProvider.findDataset(dataverseName, datasetName);
if (dataset == null) {
jsonResponse.put("error", "Dataset " + datasetName + " does not exist in " + "dataverse " + dataverseName);
out.write(jsonResponse.toString());
out.flush();
return;
}
boolean temp = dataset.getDatasetDetails().isTemp();
FileSplit[] fileSplits = metadataProvider.splitsForIndex(mdTxnCtx, dataset, datasetName);
ARecordType recordType = (ARecordType) metadataProvider.findType(dataset.getItemTypeDataverseName(), dataset.getItemTypeName());
List<List<String>> primaryKeys = dataset.getPrimaryKeys();
StringBuilder pkStrBuf = new StringBuilder();
for (List<String> keys : primaryKeys) {
for (String key : keys) {
pkStrBuf.append(key).append(",");
}
}
pkStrBuf.delete(pkStrBuf.length() - 1, pkStrBuf.length());
// Constructs the returned json object.
formResponseObject(jsonResponse, fileSplits, recordType, pkStrBuf.toString(), temp, hcc.getNodeControllerInfos());
// Flush the cached contents of the dataset to file system.
FlushDatasetUtil.flushDataset(hcc, metadataProvider, dataverseName, datasetName, datasetName);
// Metadata transaction commits.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
// Writes file splits.
out.write(jsonResponse.toString());
} finally {
metadataProvider.getLocks().unlock();
}
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Failure handling a request", e);
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
out.write(e.toString());
} finally {
out.flush();
}
}
use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.
the class RebalanceApiServlet method rebalanceDataset.
// Rebalances a given dataset.
private void rebalanceDataset(String dataverseName, String datasetName, String[] targetNodes) throws Exception {
IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
RebalanceUtil.rebalance(dataverseName, datasetName, new LinkedHashSet<>(Arrays.asList(targetNodes)), metadataProvider, hcc);
}
use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.
the class QueryTranslator method handleDataverseDropStatement.
protected void handleDataverseDropStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
DataverseDropStatement stmtDelete = (DataverseDropStatement) stmt;
String dataverseName = stmtDelete.getDataverseName().getValue();
if (dataverseName.equals(MetadataBuiltinEntities.DEFAULT_DATAVERSE_NAME)) {
throw new HyracksDataException(MetadataBuiltinEntities.DEFAULT_DATAVERSE_NAME + " dataverse can't be dropped");
}
ProgressState progress = ProgressState.NO_PROGRESS;
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
List<JobSpecification> jobsToExecute = new ArrayList<>();
MetadataLockManager.INSTANCE.acquireDataverseWriteLock(metadataProvider.getLocks(), dataverseName);
try {
Dataverse dv = MetadataManager.INSTANCE.getDataverse(mdTxnCtx, dataverseName);
if (dv == null) {
if (stmtDelete.getIfExists()) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
throw new AlgebricksException("There is no dataverse with this name " + dataverseName + ".");
}
}
// # disconnect all feeds from any datasets in the dataverse.
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
IActiveEntityEventsListener[] activeListeners = activeEventHandler.getEventListeners();
Identifier dvId = new Identifier(dataverseName);
MetadataProvider tempMdProvider = new MetadataProvider(appCtx, metadataProvider.getDefaultDataverse(), metadataProvider.getStorageComponentProvider());
tempMdProvider.setConfig(metadataProvider.getConfig());
for (IActiveEntityEventsListener listener : activeListeners) {
EntityId activeEntityId = listener.getEntityId();
if (activeEntityId.getExtensionName().equals(Feed.EXTENSION_NAME) && activeEntityId.getDataverse().equals(dataverseName)) {
tempMdProvider.getLocks().reset();
stopFeedBeforeDelete(new Pair<>(dvId, new Identifier(activeEntityId.getEntityName())), tempMdProvider);
// prepare job to remove feed log storage
jobsToExecute.add(FeedOperations.buildRemoveFeedStorageJob(metadataProvider, MetadataManager.INSTANCE.getFeed(mdTxnCtx, dataverseName, activeEntityId.getEntityName())));
}
}
// #. prepare jobs which will drop corresponding datasets with indexes.
List<Dataset> datasets = MetadataManager.INSTANCE.getDataverseDatasets(mdTxnCtx, dataverseName);
for (Dataset dataset : datasets) {
String datasetName = dataset.getDatasetName();
DatasetType dsType = dataset.getDatasetType();
if (dsType == DatasetType.INTERNAL) {
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
for (Index index : indexes) {
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(index, metadataProvider, dataset));
}
} else {
// External dataset
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
for (int k = 0; k < indexes.size(); k++) {
if (ExternalIndexingOperations.isFileIndex(indexes.get(k))) {
jobsToExecute.add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(metadataProvider, dataset));
} else {
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(k), metadataProvider, dataset));
}
}
ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(dataset);
}
}
jobsToExecute.add(DataverseUtil.dropDataverseJobSpec(dv, metadataProvider));
// #. mark PendingDropOp on the dataverse record by
// first, deleting the dataverse record from the DATAVERSE_DATASET
// second, inserting the dataverse record with the PendingDropOp value into the
// DATAVERSE_DATASET
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
MetadataManager.INSTANCE.addDataverse(mdTxnCtx, new Dataverse(dataverseName, dv.getDataFormat(), MetadataUtil.PENDING_DROP_OP));
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
// #. finally, delete the dataverse.
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
// Drops all node groups that no longer needed
for (Dataset dataset : datasets) {
String nodeGroup = dataset.getNodeGroupName();
MetadataLockManager.INSTANCE.acquireNodeGroupWriteLock(metadataProvider.getLocks(), nodeGroup);
if (MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, nodeGroup) != null) {
MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx, nodeGroup, true);
}
}
if (activeDataverse != null && activeDataverse.getDataverseName() == dataverseName) {
activeDataverse = null;
}
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
if (bActiveTxn) {
abort(e, e, mdTxnCtx);
}
if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
if (activeDataverse != null && activeDataverse.getDataverseName() == dataverseName) {
activeDataverse = null;
}
// remove the all indexes in NC
try {
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
} catch (Exception e2) {
// do no throw exception since still the metadata needs to be compensated.
e.addSuppressed(e2);
}
// remove the record from the metadata.
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
try {
MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e2) {
e.addSuppressed(e2);
abort(e, e2, mdTxnCtx);
throw new IllegalStateException("System is inconsistent state: pending dataverse(" + dataverseName + ") couldn't be removed from the metadata", e);
}
}
throw e;
} finally {
metadataProvider.getLocks().unlock();
ExternalDatasetsRegistry.INSTANCE.releaseAcquiredLocks(metadataProvider);
}
}
use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.
the class QueryTranslator method compileAndExecute.
@Override
public void compileAndExecute(IHyracksClientConnection hcc, IHyracksDataset hdc, ResultDelivery resultDelivery, ResultMetadata outMetadata, Stats stats, String clientContextId, IStatementExecutorContext ctx) throws Exception {
int resultSetIdCounter = 0;
FileSplit outputFile = null;
IAWriterFactory writerFactory = PrinterBasedWriterFactory.INSTANCE;
IResultSerializerFactoryProvider resultSerializerFactoryProvider = ResultSerializerFactoryProvider.INSTANCE;
Map<String, String> config = new HashMap<>();
/* Since the system runs a large number of threads, when HTTP requests don't return, it becomes difficult to
* find the thread running the request to determine where it has stopped.
* Setting the thread name helps make that easier
*/
String threadName = Thread.currentThread().getName();
Thread.currentThread().setName(QueryTranslator.class.getSimpleName());
try {
for (Statement stmt : statements) {
if (sessionConfig.is(SessionConfig.FORMAT_HTML)) {
sessionOutput.out().println(ApiServlet.HTML_STATEMENT_SEPARATOR);
}
validateOperation(appCtx, activeDataverse, stmt);
// Rewrite the statement's AST.
rewriteStatement(stmt);
MetadataProvider metadataProvider = new MetadataProvider(appCtx, activeDataverse, componentProvider);
metadataProvider.setWriterFactory(writerFactory);
metadataProvider.setResultSerializerFactoryProvider(resultSerializerFactoryProvider);
metadataProvider.setOutputFile(outputFile);
metadataProvider.setConfig(config);
switch(stmt.getKind()) {
case Statement.Kind.SET:
handleSetStatement(stmt, config);
break;
case Statement.Kind.DATAVERSE_DECL:
activeDataverse = handleUseDataverseStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_DATAVERSE:
handleCreateDataverseStatement(metadataProvider, stmt);
break;
case Statement.Kind.DATASET_DECL:
handleCreateDatasetStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.CREATE_INDEX:
handleCreateIndexStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.TYPE_DECL:
handleCreateTypeStatement(metadataProvider, stmt);
break;
case Statement.Kind.NODEGROUP_DECL:
handleCreateNodeGroupStatement(metadataProvider, stmt);
break;
case Statement.Kind.DATAVERSE_DROP:
handleDataverseDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.DATASET_DROP:
handleDatasetDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.INDEX_DROP:
handleIndexDropStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.TYPE_DROP:
handleTypeDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.NODEGROUP_DROP:
handleNodegroupDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_FUNCTION:
handleCreateFunctionStatement(metadataProvider, stmt);
break;
case Statement.Kind.FUNCTION_DROP:
handleFunctionDropStatement(metadataProvider, stmt);
break;
case Statement.Kind.LOAD:
handleLoadStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.INSERT:
case Statement.Kind.UPSERT:
if (((InsertStatement) stmt).getReturnExpression() != null) {
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
}
handleInsertUpsertStatement(metadataProvider, stmt, hcc, hdc, resultDelivery, outMetadata, stats, false, clientContextId, ctx);
break;
case Statement.Kind.DELETE:
handleDeleteStatement(metadataProvider, stmt, hcc, false);
break;
case Statement.Kind.CREATE_FEED:
handleCreateFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.DROP_FEED:
handleDropFeedStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.DROP_FEED_POLICY:
handleDropFeedPolicyStatement(metadataProvider, stmt);
break;
case Statement.Kind.CONNECT_FEED:
handleConnectFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.DISCONNECT_FEED:
handleDisconnectFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.START_FEED:
handleStartFeedStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.STOP_FEED:
handleStopFeedStatement(metadataProvider, stmt);
break;
case Statement.Kind.CREATE_FEED_POLICY:
handleCreateFeedPolicyStatement(metadataProvider, stmt);
break;
case Statement.Kind.QUERY:
metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
handleQuery(metadataProvider, (Query) stmt, hcc, hdc, resultDelivery, outMetadata, stats, clientContextId, ctx);
break;
case Statement.Kind.COMPACT:
handleCompactStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.EXTERNAL_DATASET_REFRESH:
handleExternalDatasetRefreshStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.WRITE:
Pair<IAWriterFactory, FileSplit> result = handleWriteStatement(stmt);
writerFactory = (result.first != null) ? result.first : writerFactory;
outputFile = result.second;
break;
case Statement.Kind.RUN:
handleRunStatement(metadataProvider, stmt, hcc);
break;
case Statement.Kind.FUNCTION_DECL:
// No op
break;
case Statement.Kind.EXTENSION:
((IExtensionStatement) stmt).handle(this, metadataProvider, hcc, hdc, resultDelivery, stats, resultSetIdCounter);
break;
default:
throw new CompilationException("Unknown function");
}
}
} finally {
Thread.currentThread().setName(threadName);
}
}
Aggregations