use of co.cask.cdap.proto.NamespaceMeta in project cdap by caskdata.
the class HBaseTableUtil method getHBaseToCDAPNamespaceMap.
/**
* Returns a map of HBase to CDAP namespace. This is required when we want to report metrics for HBase tables where
* it is run a separate service and reads the table metrics and reports it. There we need to translate the hbase
* namespace to cdap namespace for metrics to make sense from CDAP perspective. This is also used during upgrade
* step where we want to construct the correct {@link DatasetAdmin} for each dataset.
*
* @return map of hbase namespace to cdap namespace
* @throws IOException if there was an error getting the {@link NamespaceMeta} of all the namespaces
*/
public Map<String, String> getHBaseToCDAPNamespaceMap() throws IOException {
Map<String, String> reverseMap = new HashMap<>();
if (namespaceQueryAdmin == null) {
throw new IOException("NamespaceQueryAdmin is not set and a reverseLookupMap was requested.");
}
try {
List<NamespaceMeta> namespaceMetas = namespaceQueryAdmin.list();
for (NamespaceMeta namespaceMeta : namespaceMetas) {
String hbaseNamespace = getHBaseNamespace(namespaceMeta);
reverseMap.put(hbaseNamespace, namespaceMeta.getName());
}
} catch (Exception ex) {
throw new IOException("NamespaceQueryAdmin lookup to list all NamespaceMetas failed", ex);
}
return ImmutableMap.copyOf(reverseMap);
}
use of co.cask.cdap.proto.NamespaceMeta in project cdap by caskdata.
the class AuthorizationTest method testCrossNSDatasetAccessWithAuthSpark.
private void testCrossNSDatasetAccessWithAuthSpark(SparkManager sparkManager) throws Exception {
NamespaceMeta inputDatasetNSMeta = new NamespaceMeta.Builder().setName("inputDatasetNS").build();
NamespaceMeta outputDatasetNSMeta = new NamespaceMeta.Builder().setName("outputDatasetNS").build();
getNamespaceAdmin().create(inputDatasetNSMeta);
getNamespaceAdmin().create(outputDatasetNSMeta);
addDatasetInstance(inputDatasetNSMeta.getNamespaceId().dataset("input"), "keyValueTable").create();
addDatasetInstance(outputDatasetNSMeta.getNamespaceId().dataset("output"), "keyValueTable").create();
// write sample stuff in input dataset
addDummyData(inputDatasetNSMeta.getNamespaceId(), "input");
// Switch to Bob and run the spark program. this will fail because bob does not have access to either input or
// output dataset
SecurityRequestContext.setUserId(BOB.getName());
Map<String, String> args = ImmutableMap.of(TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.INPUT_DATASET_NAMESPACE, inputDatasetNSMeta.getNamespaceId().getNamespace(), TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.INPUT_DATASET_NAME, "input", TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.OUTPUT_DATASET_NAMESPACE, outputDatasetNSMeta.getNamespaceId().getNamespace(), TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.OUTPUT_DATASET_NAME, "output");
assertProgramFailure(args, sparkManager);
SecurityRequestContext.setUserId(ALICE.getName());
// Verify nothing write to the output dataset
assertDatasetIsEmpty(outputDatasetNSMeta.getNamespaceId(), "output");
// give privilege to BOB on the input dataset
grantAndAssertSuccess(inputDatasetNSMeta.getNamespaceId().dataset("input"), BOB, EnumSet.of(Action.READ));
// switch back to bob and try running again. this will still fail since bob does not have access on the output
// dataset
SecurityRequestContext.setUserId(BOB.getName());
assertProgramFailure(args, sparkManager);
// Switch back to Alice
SecurityRequestContext.setUserId(ALICE.getName());
// Verify nothing write to the output dataset
assertDatasetIsEmpty(outputDatasetNSMeta.getNamespaceId(), "output");
// give privilege to BOB on the output dataset
grantAndAssertSuccess(outputDatasetNSMeta.getNamespaceId().dataset("output"), BOB, EnumSet.of(Action.WRITE));
// switch back to BOB and run spark again. this should work
SecurityRequestContext.setUserId(BOB.getName());
sparkManager.start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
// Verify the results as alice
SecurityRequestContext.setUserId(ALICE.getName());
verifyDummyData(outputDatasetNSMeta.getNamespaceId(), "output");
getNamespaceAdmin().delete(inputDatasetNSMeta.getNamespaceId());
getNamespaceAdmin().delete(outputDatasetNSMeta.getNamespaceId());
}
use of co.cask.cdap.proto.NamespaceMeta in project cdap by caskdata.
the class SparkStreamIntegrationTestRun method testSparkCrossNS.
@Test
public void testSparkCrossNS() throws Exception {
// Test for reading stream cross namespace, reading and writing to dataset cross namespace
// TestSparkStreamIntegrationApp deployed in default namespace
// which reads a stream from streamNS and writes to a dataset in its own ns (default)
// TestSparkCrossNSDatasetApp deployed at crossNSDatasetAppNS:
// reading from the dataset in default (created by TestSparkStreamIntegrationApp) and write to a dataset
// in outputDatasetNS
NamespaceMeta streamNSMeta = new NamespaceMeta.Builder().setName("streamNS").build();
NamespaceMeta crossNSDatasetAppNS = new NamespaceMeta.Builder().setName("crossNSDatasetAppNS").build();
NamespaceMeta outputDatasetNS = new NamespaceMeta.Builder().setName("outputDatasetNS").build();
getNamespaceAdmin().create(streamNSMeta);
getNamespaceAdmin().create(crossNSDatasetAppNS);
getNamespaceAdmin().create(outputDatasetNS);
addDatasetInstance(outputDatasetNS.getNamespaceId().dataset("finalDataset"), "keyValueTable");
StreamManager streamManager = getStreamManager(streamNSMeta.getNamespaceId().stream("testStream"));
streamManager.createStream();
for (int i = 0; i < 50; i++) {
streamManager.send(String.valueOf(i));
}
// deploy TestSparkStreamIntegrationApp in default namespace
ApplicationManager spark1 = deployApplication(TestSparkStreamIntegrationApp.class);
Map<String, String> args = ImmutableMap.of(TestSparkStreamIntegrationApp.SparkStreamProgram.INPUT_STREAM_NAMESPACE, streamNSMeta.getNamespaceId().getNamespace(), TestSparkStreamIntegrationApp.SparkStreamProgram.INPUT_STREAM_NAME, "testStream");
SparkManager sparkManager = spark1.getSparkManager("SparkStreamProgram").start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
// Verify the results written in default namespace by spark1
DataSetManager<KeyValueTable> datasetManager = getDataset("result");
verifyDatasetResult(datasetManager);
// deploy the cross ns dataset app in datasetNS namespace
ApplicationManager spark2 = deployApplication(crossNSDatasetAppNS.getNamespaceId(), TestSparkCrossNSDatasetApp.class);
args = ImmutableMap.of(TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.INPUT_DATASET_NAMESPACE, NamespaceId.DEFAULT.getNamespace(), TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.INPUT_DATASET_NAME, "result", TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.OUTPUT_DATASET_NAMESPACE, outputDatasetNS.getNamespaceId().getNamespace(), TestSparkCrossNSDatasetApp.SparkCrossNSDatasetProgram.OUTPUT_DATASET_NAME, "finalDataset");
sparkManager = spark2.getSparkManager("SparkCrossNSDatasetProgram").start(args);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
// Verify the results written in DEFAULT by spark2
datasetManager = getDataset(outputDatasetNS.getNamespaceId().dataset("finalDataset"));
verifyDatasetResult(datasetManager);
}
use of co.cask.cdap.proto.NamespaceMeta in project cdap by caskdata.
the class AppFabricClient method reset.
public void reset() throws Exception {
MockResponder responder;
HttpRequest request;
// delete all namespaces
for (NamespaceMeta namespaceMeta : namespaceQueryAdmin.list()) {
Id.Namespace namespace = Id.Namespace.from(namespaceMeta.getName());
responder = new MockResponder();
request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.DELETE, String.format("%s/unrecoverable/namespaces/%s/datasets", Constants.Gateway.API_VERSION_3, namespace.getId()));
namespaceHttpHandler.deleteDatasets(request, responder, namespaceMeta.getName());
verifyResponse(HttpResponseStatus.OK, responder.getStatus(), String.format("could not delete datasets in namespace '%s'", namespace.getId()));
responder = new MockResponder();
request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.DELETE, String.format("/v3/unrecoverable/namespaces/%s", namespace.getId()));
namespaceHttpHandler.delete(request, responder, namespaceMeta.getName());
verifyResponse(HttpResponseStatus.OK, responder.getStatus(), String.format("could not delete namespace '%s'", namespace.getId()));
}
}
use of co.cask.cdap.proto.NamespaceMeta in project cdap by caskdata.
the class RemoteNamespaceQueryTest method testCustomNS.
@Test
public void testCustomNS() throws Exception {
String cdapNamespace = "NS1";
String hbaseNamespace = "custHBase";
String rootDirectory = "/directory";
String hiveDb = "myHive";
String schedulerQueue = "schQ";
String description = "Namespace with custom HBase mapping";
NamespaceConfig namespaceConfig = new NamespaceConfig(schedulerQueue, rootDirectory, hbaseNamespace, hiveDb, null, null, null);
NamespaceMeta meta = new NamespaceMeta.Builder().setName(cdapNamespace).setDescription(description).setSchedulerQueueName(schedulerQueue).setRootDirectory(rootDirectory).setHBaseNamespace(hbaseNamespace).setHiveDatabase(hiveDb).build();
// create the ns location since admin expect it to exists
Location nsLocation = namespacedLocationFactory.get(meta);
nsLocation.mkdirs();
namespaceAdmin.create(meta);
NamespaceId namespaceId = new NamespaceId(cdapNamespace);
Assert.assertTrue(queryClient.exists(namespaceId));
NamespaceMeta resultMeta = queryClient.get(namespaceId);
Assert.assertEquals(namespaceConfig, resultMeta.getConfig());
namespaceAdmin.delete(namespaceId);
Assert.assertTrue(!queryClient.exists(namespaceId));
}
Aggregations