use of org.apache.ranger.authorization.hadoop.config.RangerPluginConfig in project ranger by apache.
the class RangerPolicyEngineImpl method isSuperUser.
private boolean isSuperUser(String userName, Set<String> userGroups) {
boolean ret = serviceConfig.isSuperUser(userName);
if (!ret) {
RangerPluginConfig pluginConfig = policyEngine.getPluginContext().getConfig();
ret = pluginConfig.isSuperUser(userName);
if (!ret && userGroups != null && userGroups.size() > 0) {
ret = serviceConfig.hasSuperGroup(userGroups) || pluginConfig.hasSuperGroup(userGroups);
}
}
return ret;
}
use of org.apache.ranger.authorization.hadoop.config.RangerPluginConfig in project ranger by apache.
the class RangerPolicyEnginePerformanceTest method policyEngineTest.
@Test
public void policyEngineTest() throws InterruptedException {
List<RangerAccessRequest> requests = requestsCache.getUnchecked(concurrency);
ServicePolicies servicePolicies = servicePoliciesCache.getUnchecked(numberOfPolicies);
RangerPluginContext pluginContext = new RangerPluginContext(new RangerPluginConfig("hive", null, "perf-test", "cl1", "on-prem", RangerPolicyFactory.createPolicyEngineOption()));
final RangerPolicyEngineImpl rangerPolicyEngine = new RangerPolicyEngineImpl(servicePolicies, pluginContext, null);
for (int iterations = 0; iterations < WARM_UP__ITERATIONS; iterations++) {
// using return value of 'isAccessAllowed' with a cheap operation: System#identityHashCode so JIT wont remove it as dead code
System.identityHashCode(rangerPolicyEngine.evaluatePolicies(requests.get(iterations % concurrency), RangerPolicy.POLICY_TYPE_ACCESS, null));
PerfDataRecorder.clearStatistics();
}
final CountDownLatch latch = new CountDownLatch(concurrency);
for (int i = 0; i < concurrency; i++) {
final RangerAccessRequest rangerAccessRequest = requests.get(i);
new Thread(new Runnable() {
@Override
public void run() {
System.identityHashCode(rangerPolicyEngine.evaluatePolicies(rangerAccessRequest, RangerPolicy.POLICY_TYPE_ACCESS, null));
latch.countDown();
}
}, String.format("Client #%s", i)).start();
}
latch.await();
}
use of org.apache.ranger.authorization.hadoop.config.RangerPluginConfig in project ranger by apache.
the class RangerYarnAuditHandler method init.
@Override
public void init(Configuration conf) {
if (LOG.isDebugEnabled()) {
LOG.debug("==> RangerYarnAuthorizer.init()");
}
RangerYarnPlugin plugin = yarnPlugin;
if (plugin == null) {
synchronized (RangerYarnAuthorizer.class) {
plugin = yarnPlugin;
if (plugin == null) {
plugin = new RangerYarnPlugin();
plugin.init();
yarnPlugin = plugin;
}
}
}
RangerPluginConfig pluginConfig = yarnPlugin.getConfig();
this.yarnAuthEnabled = pluginConfig.getBoolean(RangerHadoopConstants.RANGER_ADD_YARN_PERMISSION_PROP, RangerHadoopConstants.RANGER_ADD_YARN_PERMISSION_DEFAULT);
this.yarnModuleName = pluginConfig.get(RangerHadoopConstants.AUDITLOG_YARN_MODULE_ACL_NAME_PROP, RangerHadoopConstants.DEFAULT_YARN_MODULE_ACL_NAME);
pluginConfig.setIsFallbackSupported(this.yarnAuthEnabled);
if (LOG.isDebugEnabled()) {
LOG.debug("<== RangerYarnAuthorizer.init()");
}
}
use of org.apache.ranger.authorization.hadoop.config.RangerPluginConfig in project ranger by apache.
the class RangerTagEnricher method createLock.
protected RangerReadWriteLock createLock() {
String propertyPrefix = "ranger.plugin." + serviceDef.getName();
RangerPluginConfig config = getPluginConfig();
boolean deltasEnabled = config != null && config.getBoolean(propertyPrefix + RangerCommonConstants.PLUGIN_CONFIG_SUFFIX_TAG_DELTA, RangerCommonConstants.PLUGIN_CONFIG_SUFFIX_TAG_DELTA_DEFAULT);
boolean inPlaceUpdatesEnabled = config != null && config.getBoolean(propertyPrefix + RangerCommonConstants.PLUGIN_CONFIG_SUFFIX_IN_PLACE_TAG_UPDATES, RangerCommonConstants.PLUGIN_CONFIG_SUFFIX_IN_PLACE_TAG_UPDATES_DEFAULT);
boolean useReadWriteLock = deltasEnabled && inPlaceUpdatesEnabled;
LOG.info("Policy-Engine will" + (useReadWriteLock ? " " : " not ") + "use read-write locking to update tags in place when tag-deltas are provided");
return new RangerReadWriteLock(useReadWriteLock);
}
use of org.apache.ranger.authorization.hadoop.config.RangerPluginConfig in project ranger by apache.
the class TestPolicyEngine method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
pluginContext = new RangerPluginContext(new RangerPluginConfig("hive", null, "hive", "cl1", "on-prem", null));
gsonBuilder = new GsonBuilder().setDateFormat("yyyyMMdd-HH:mm:ss.SSSZ").setPrettyPrinting().registerTypeAdapter(RangerAccessRequest.class, new RangerAccessRequestDeserializer()).registerTypeAdapter(RangerAccessResource.class, new RangerResourceDeserializer()).create();
// For setting up auditProvider
Properties auditProperties = new Properties();
String AUDIT_PROPERTIES_FILE = "xasecure-audit.properties";
File propFile = new File(AUDIT_PROPERTIES_FILE);
if (propFile.exists()) {
System.out.println("Loading Audit properties file" + AUDIT_PROPERTIES_FILE);
auditProperties.load(new FileInputStream(propFile));
} else {
System.out.println("Audit properties file missing: " + AUDIT_PROPERTIES_FILE);
// Set this to true to enable audit logging
auditProperties.setProperty("xasecure.audit.is.enabled", "false");
auditProperties.setProperty("xasecure.audit.log4j.is.enabled", "false");
auditProperties.setProperty("xasecure.audit.log4j.is.async", "false");
auditProperties.setProperty("xasecure.audit.log4j.async.max.queue.size", "100000");
auditProperties.setProperty("xasecure.audit.log4j.async.max.flush.interval.ms", "30000");
}
AuditProviderFactory factory = AuditProviderFactory.getInstance();
// second parameter does not matter for v2
factory.init(auditProperties, "hdfs");
AuditHandler provider = factory.getAuditProvider();
System.out.println("provider=" + provider.toString());
File file = File.createTempFile("ranger-admin-test-site", ".xml");
file.deleteOnExit();
FileOutputStream outStream = new FileOutputStream(file);
OutputStreamWriter writer = new OutputStreamWriter(outStream);
/*
// For setting up TestTagProvider
writer.write("<configuration>\n" +
" <property>\n" +
" <name>ranger.plugin.tag.policy.rest.url</name>\n" +
" <value>http://os-def:6080</value>\n" +
" </property>\n" +
" <property>\n" +
" <name>ranger.externalurl</name>\n" +
" <value>http://os-def:6080</value>\n" +
" </property>\n" +
"</configuration>\n");
*/
writer.write("<configuration>\n" + // For setting up x-forwarded-for for Hive
" <property>\n" + " <name>ranger.plugin.hive.use.x-forwarded-for.ipaddress</name>\n" + " <value>true</value>\n" + " </property>\n" + " <property>\n" + " <name>ranger.plugin.hive.trusted.proxy.ipaddresses</name>\n" + " <value>255.255.255.255; 128.101.101.101;128.101.101.99</value>\n" + " </property>\n" + " <property>\n" + " <name>ranger.plugin.tag.attr.additional.date.formats</name>\n" + " <value>abcd||xyz||yyyy/MM/dd'T'HH:mm:ss.SSS'Z'</value>\n" + " </property>\n" + " <property>\n" + " <name>ranger.policyengine.trie.builder.thread.count</name>\n" + " <value>3</value>\n" + " </property>\n" + "</configuration>\n");
writer.close();
pluginContext.getConfig().addResource(new org.apache.hadoop.fs.Path(file.toURI()));
}
Aggregations