use of co.cask.cdap.data2.dataset2.lib.timeseries.Fact in project cdap by caskdata.
the class DefaultCube method delete.
@Override
public void delete(CubeDeleteQuery query) {
//this may be very inefficient and its better to use TTL, this is to only support existing old functionality.
List<DimensionValue> dimensionValues = Lists.newArrayList();
// use the dimension values of the aggregation to delete entries in all the fact-tables.
for (Aggregation agg : aggregations.values()) {
if (agg.getDimensionNames().containsAll(query.getDimensionValues().keySet())) {
dimensionValues.clear();
for (String dimensionName : agg.getDimensionNames()) {
dimensionValues.add(new DimensionValue(dimensionName, query.getDimensionValues().get(dimensionName)));
}
FactTable factTable = resolutionToFactTable.get(query.getResolution());
FactScan scan = new FactScan(query.getStartTs(), query.getEndTs(), query.getMeasureNames(), dimensionValues);
factTable.delete(scan);
}
}
}
use of co.cask.cdap.data2.dataset2.lib.timeseries.Fact in project cdap by caskdata.
the class DatasetTypeService method createModuleConsumer.
private AbstractBodyConsumer createModuleConsumer(final DatasetModuleId datasetModuleId, final String className, final boolean forceUpdate, final Principal principal) throws IOException, NotFoundException {
final NamespaceId namespaceId = datasetModuleId.getParent();
final Location namespaceHomeLocation;
try {
namespaceHomeLocation = impersonator.doAs(namespaceId, new Callable<Location>() {
@Override
public Location call() throws Exception {
return namespacedLocationFactory.get(namespaceId);
}
});
} catch (Exception e) {
// the only checked exception that the callable throws is IOException
Throwables.propagateIfInstanceOf(e, IOException.class);
throw Throwables.propagate(e);
}
// verify namespace directory exists
if (!namespaceHomeLocation.exists()) {
String msg = String.format("Home directory %s for namespace %s not found", namespaceHomeLocation, namespaceId);
LOG.debug(msg);
throw new NotFoundException(msg);
}
// Store uploaded content to a local temp file
String namespacesDir = cConf.get(Constants.Namespace.NAMESPACES_DIR);
File localDataDir = new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR));
File namespaceBase = new File(localDataDir, namespacesDir);
File tempDir = new File(new File(namespaceBase, datasetModuleId.getNamespace()), cConf.get(Constants.AppFabric.TEMP_DIR)).getAbsoluteFile();
if (!DirUtils.mkdirs(tempDir)) {
throw new IOException("Could not create temporary directory at: " + tempDir);
}
return new AbstractBodyConsumer(File.createTempFile("dataset-", ".jar", tempDir)) {
@Override
protected void onFinish(HttpResponder responder, File uploadedFile) throws Exception {
if (className == null) {
// We have to delay until body upload is completed due to the fact that not all client is
// requesting with "Expect: 100-continue" header and the client library we have cannot handle
// connection close, and yet be able to read response reliably.
// In longer term we should fix the client, as well as the netty-http server. However, since
// this handler will be gone in near future, it's ok to have this workaround.
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Required header 'class-name' is absent.");
return;
}
LOG.debug("Adding module {}, class name: {}", datasetModuleId, className);
String dataFabricDir = cConf.get(Constants.Dataset.Manager.OUTPUT_DIR);
String moduleName = datasetModuleId.getModule();
Location archiveDir = namespaceHomeLocation.append(dataFabricDir).append(moduleName).append(Constants.ARCHIVE_DIR);
String archiveName = moduleName + ".jar";
Location archive = archiveDir.append(archiveName);
// Copy uploaded content to a temporary location
Location tmpLocation = archive.getTempFile(".tmp");
try {
Locations.mkdirsIfNotExists(archiveDir);
LOG.debug("Copy from {} to {}", uploadedFile, tmpLocation);
Files.copy(uploadedFile, Locations.newOutputSupplier(tmpLocation));
// Finally, move archive to final location
LOG.debug("Storing module {} jar at {}", datasetModuleId, archive);
if (tmpLocation.renameTo(archive) == null) {
throw new IOException(String.format("Could not move archive from location: %s, to location: %s", tmpLocation, archive));
}
typeManager.addModule(datasetModuleId, className, archive, forceUpdate);
// todo: response with DatasetModuleMeta of just added module (and log this info)
// Ideally this should have been done before, but we cannot grant privileges on types until they've been
// added to the type MDS. First revoke any orphaned privileges for types left behind by past failed revokes
revokeAllPrivilegesOnModule(datasetModuleId);
grantAllPrivilegesOnModule(datasetModuleId, principal);
LOG.info("Added module {}", datasetModuleId);
responder.sendStatus(HttpResponseStatus.OK);
} catch (Exception e) {
// There was a problem in deploying the dataset module. so revoke the privileges.
revokeAllPrivilegesOnModule(datasetModuleId);
// In case copy to temporary file failed, or rename failed
try {
tmpLocation.delete();
} catch (IOException ex) {
LOG.warn("Failed to cleanup temporary location {}", tmpLocation);
}
if (e instanceof DatasetModuleConflictException) {
responder.sendString(HttpResponseStatus.CONFLICT, e.getMessage());
} else {
throw e;
}
}
}
};
}
Aggregations