use of org.apache.asterix.file.StorageComponentProvider in project asterixdb by apache.
the class FilePartition method get.
@Override
protected void get(IServletRequest request, IServletResponse response) {
response.setStatus(HttpResponseStatus.OK);
try {
HttpUtil.setContentType(response, HttpUtil.ContentType.APPLICATION_JSON, HttpUtil.Encoding.UTF8);
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Failure setting content type", e);
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
response.writer().write(e.toString());
return;
}
PrintWriter out = response.writer();
try {
ObjectMapper om = new ObjectMapper();
ObjectNode jsonResponse = om.createObjectNode();
String dataverseName = request.getParameter("dataverseName");
String datasetName = request.getParameter("datasetName");
if (dataverseName == null || datasetName == null) {
jsonResponse.put("error", "Parameter dataverseName or datasetName is null,");
out.write(jsonResponse.toString());
return;
}
IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
// Metadata transaction begins.
MetadataManager.INSTANCE.init();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
// Retrieves file splits of the dataset.
MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
try {
metadataProvider.setMetadataTxnContext(mdTxnCtx);
Dataset dataset = metadataProvider.findDataset(dataverseName, datasetName);
if (dataset == null) {
jsonResponse.put("error", "Dataset " + datasetName + " does not exist in " + "dataverse " + dataverseName);
out.write(jsonResponse.toString());
out.flush();
return;
}
boolean temp = dataset.getDatasetDetails().isTemp();
FileSplit[] fileSplits = metadataProvider.splitsForIndex(mdTxnCtx, dataset, datasetName);
ARecordType recordType = (ARecordType) metadataProvider.findType(dataset.getItemTypeDataverseName(), dataset.getItemTypeName());
List<List<String>> primaryKeys = dataset.getPrimaryKeys();
StringBuilder pkStrBuf = new StringBuilder();
for (List<String> keys : primaryKeys) {
for (String key : keys) {
pkStrBuf.append(key).append(",");
}
}
pkStrBuf.delete(pkStrBuf.length() - 1, pkStrBuf.length());
// Constructs the returned json object.
formResponseObject(jsonResponse, fileSplits, recordType, pkStrBuf.toString(), temp, hcc.getNodeControllerInfos());
// Flush the cached contents of the dataset to file system.
FlushDatasetUtil.flushDataset(hcc, metadataProvider, dataverseName, datasetName, datasetName);
// Metadata transaction commits.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
// Writes file splits.
out.write(jsonResponse.toString());
} finally {
metadataProvider.getLocks().unlock();
}
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Failure handling a request", e);
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
out.write(e.toString());
} finally {
out.flush();
}
}
use of org.apache.asterix.file.StorageComponentProvider in project asterixdb by apache.
the class RebalanceApiServlet method rebalanceDataset.
// Rebalances a given dataset.
private void rebalanceDataset(String dataverseName, String datasetName, String[] targetNodes) throws Exception {
IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
RebalanceUtil.rebalance(dataverseName, datasetName, new LinkedHashSet<>(Arrays.asList(targetNodes)), metadataProvider, hcc);
}
use of org.apache.asterix.file.StorageComponentProvider in project asterixdb by apache.
the class AsterixCLI method main.
public static void main(String[] args) throws Exception {
Options options = new Options();
CmdLineParser parser = new CmdLineParser(options);
parser.parseArgument(args);
ILangCompilationProvider compilationProvider = new AqlCompilationProvider();
setUp(options);
try {
for (String queryFile : options.args) {
Reader in = new FileReader(queryFile);
AsterixJavaClient ajc = new AsterixJavaClient((ICcApplicationContext) integrationUtil.cc.getApplicationContext(), integrationUtil.getHyracksClientConnection(), in, compilationProvider, new DefaultStatementExecutorFactory(), new StorageComponentProvider());
try {
ajc.compile(true, false, false, false, false, true, false);
} finally {
in.close();
}
ajc.execute();
}
} finally {
tearDown();
}
System.exit(0);
}
use of org.apache.asterix.file.StorageComponentProvider in project asterixdb by apache.
the class AsterixClientDriver method compileQuery.
private static AsterixJavaClient compileQuery(IHyracksClientConnection hcc, String filename, boolean optimize, boolean onlyPhysical, boolean createBinaryRuntime) throws Exception {
ILangCompilationProvider compilationProvider = new AqlCompilationProvider();
FileReader reader = new FileReader(filename);
AsterixJavaClient q = new AsterixJavaClient(null, hcc, reader, compilationProvider, new DefaultStatementExecutorFactory(), new StorageComponentProvider());
q.compile(optimize, true, true, true, onlyPhysical, createBinaryRuntime, createBinaryRuntime);
return q;
}
use of org.apache.asterix.file.StorageComponentProvider in project asterixdb by apache.
the class QueryTranslator method handleStartFeedStatement.
private void handleStartFeedStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
StartFeedStatement sfs = (StartFeedStatement) stmt;
String dataverseName = getActiveDataverse(sfs.getDataverseName());
String feedName = sfs.getFeedName().getValue();
// Transcation handler
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
// Runtime handler
EntityId entityId = new EntityId(Feed.EXTENSION_NAME, dataverseName, feedName);
// Feed & Feed Connections
Feed feed = FeedMetadataUtil.validateIfFeedExists(dataverseName, feedName, metadataProvider.getMetadataTxnContext());
List<FeedConnection> feedConnections = MetadataManager.INSTANCE.getFeedConections(metadataProvider.getMetadataTxnContext(), dataverseName, feedName);
ILangCompilationProvider compilationProvider = new AqlCompilationProvider();
IStorageComponentProvider storageComponentProvider = new StorageComponentProvider();
DefaultStatementExecutorFactory qtFactory = new DefaultStatementExecutorFactory();
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
FeedEventsListener listener = (FeedEventsListener) activeEventHandler.getActiveEntityListener(entityId);
if (listener != null) {
throw new AlgebricksException("Feed " + feedName + " is started already.");
}
// Start
MetadataLockManager.INSTANCE.startFeedBegin(metadataProvider.getLocks(), dataverseName, dataverseName + "." + feedName, feedConnections);
try {
// Prepare policy
List<IDataset> datasets = new ArrayList<>();
for (FeedConnection connection : feedConnections) {
Dataset ds = metadataProvider.findDataset(connection.getDataverseName(), connection.getDatasetName());
datasets.add(ds);
}
org.apache.commons.lang3.tuple.Pair<JobSpecification, AlgebricksAbsolutePartitionConstraint> jobInfo = FeedOperations.buildStartFeedJob(sessionOutput, metadataProvider, feed, feedConnections, compilationProvider, storageComponentProvider, qtFactory, hcc);
JobSpecification feedJob = jobInfo.getLeft();
listener = new FeedEventsListener(appCtx, entityId, datasets, jobInfo.getRight().getLocations());
activeEventHandler.registerListener(listener);
IActiveEventSubscriber eventSubscriber = listener.subscribe(ActivityState.STARTED);
feedJob.setProperty(ActiveJobNotificationHandler.ACTIVE_ENTITY_PROPERTY_NAME, entityId);
JobUtils.runJob(hcc, feedJob, Boolean.valueOf(metadataProvider.getConfig().get(StartFeedStatement.WAIT_FOR_COMPLETION)));
eventSubscriber.sync();
LOGGER.log(Level.INFO, "Submitted");
} catch (Exception e) {
abort(e, e, mdTxnCtx);
if (listener != null) {
activeEventHandler.unregisterListener(listener);
}
throw e;
} finally {
metadataProvider.getLocks().unlock();
}
}
Aggregations