use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.
the class KerberosIT method testUserPrivilegesForTable.
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by test")
@Test
public void testUserPrivilegesForTable() throws Exception {
String user1 = testName.getMethodName();
final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
if (user1Keytab.exists() && !user1Keytab.delete()) {
log.warn("Unable to delete {}", user1Keytab);
}
// Create some new users -- cannot contain realm
kdc.createPrincipal(user1Keytab, user1);
final String qualifiedUser1 = kdc.qualifyUser(user1);
// Log in as user1
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath());
log.info("Logged in as {}", user1);
ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
// Indirectly creates this user when we use it
AccumuloClient client = mac.createAccumuloClient(qualifiedUser1, new KerberosToken());
log.info("Created client as {}", qualifiedUser1);
// The new user should have no system permissions
for (SystemPermission perm : SystemPermission.values()) {
assertFalse(client.securityOperations().hasSystemPermission(qualifiedUser1, perm));
}
return null;
});
final String table = testName.getMethodName() + "_user_table";
final String viz = "viz";
ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
AccumuloClient client = mac.createAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
client.tableOperations().create(table);
// Give our unprivileged user permission on the table we made for them
client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.READ);
client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.WRITE);
client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.ALTER_TABLE);
client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.DROP_TABLE);
client.securityOperations().changeUserAuthorizations(qualifiedUser1, new Authorizations(viz));
return null;
});
// Switch back to the original user
ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath());
ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
AccumuloClient client = mac.createAccumuloClient(qualifiedUser1, new KerberosToken());
// Make sure we can actually use the table we made
// Write data
final long ts = 1000L;
try (BatchWriter bw = client.createBatchWriter(table)) {
Mutation m = new Mutation("a");
m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
bw.addMutation(m);
}
// Compact
client.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
// Alter
client.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
// Read (and proper authorizations)
try (Scanner s = client.createScanner(table, new Authorizations(viz))) {
Iterator<Entry<Key, Value>> iter = s.iterator();
assertTrue("No results from iterator", iter.hasNext());
Entry<Key, Value> entry = iter.next();
assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
assertEquals(new Value("d"), entry.getValue());
assertFalse("Had more results from iterator", iter.hasNext());
return null;
}
});
}
use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.
the class AccumuloRecordReader method initialize.
/**
* Initialize a scanner over the given input split using this task attempt configuration.
*/
public void initialize(InputSplit inSplit, JobConf job) throws IOException {
baseSplit = (org.apache.accumulo.hadoopImpl.mapreduce.RangeInputSplit) inSplit;
log.debug("Initializing input split: " + baseSplit);
client = createClient(job, CLASS);
ClientContext context = (ClientContext) client;
Authorizations authorizations = InputConfigurator.getScanAuthorizations(CLASS, job);
String classLoaderContext = InputConfigurator.getClassLoaderContext(CLASS, job);
String table = baseSplit.getTableName();
// in case the table name changed, we can still use the previous name for terms of
// configuration, but the scanner will use the table id resolved at job setup time
InputTableConfig tableConfig = InputConfigurator.getInputTableConfig(CLASS, job, baseSplit.getTableName());
log.debug("Created client with user: " + context.whoami());
log.debug("Creating scanner for table: " + table);
log.debug("Authorizations are: " + authorizations);
if (baseSplit instanceof BatchInputSplit) {
BatchScanner scanner;
BatchInputSplit multiRangeSplit = (BatchInputSplit) baseSplit;
try {
// Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
// will not span tablets
int scanThreads = 1;
scanner = context.createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads);
setupIterators(job, scanner, baseSplit);
if (classLoaderContext != null) {
scanner.setClassLoaderContext(classLoaderContext);
}
} catch (TableNotFoundException e) {
throw new IOException(e);
}
scanner.setRanges(multiRangeSplit.getRanges());
scannerBase = scanner;
} else if (baseSplit instanceof RangeInputSplit) {
split = (RangeInputSplit) baseSplit;
Boolean isOffline = baseSplit.isOffline();
if (isOffline == null) {
isOffline = tableConfig.isOfflineScan();
}
Boolean isIsolated = baseSplit.isIsolatedScan();
if (isIsolated == null) {
isIsolated = tableConfig.shouldUseIsolatedScanners();
}
Boolean usesLocalIterators = baseSplit.usesLocalIterators();
if (usesLocalIterators == null) {
usesLocalIterators = tableConfig.shouldUseLocalIterators();
}
Scanner scanner;
try {
if (isOffline) {
scanner = new OfflineScanner(context, TableId.of(baseSplit.getTableId()), authorizations);
} else {
scanner = new ScannerImpl(context, TableId.of(baseSplit.getTableId()), authorizations);
}
if (isIsolated) {
log.info("Creating isolated scanner");
scanner = new IsolatedScanner(scanner);
}
if (usesLocalIterators) {
log.info("Using local iterators");
scanner = new ClientSideIteratorScanner(scanner);
}
setupIterators(job, scanner, baseSplit);
} catch (RuntimeException e) {
throw new IOException(e);
}
scanner.setRange(baseSplit.getRange());
scannerBase = scanner;
} else {
throw new IllegalArgumentException("Can not initialize from " + baseSplit.getClass());
}
Collection<IteratorSetting.Column> columns = baseSplit.getFetchedColumns();
if (columns == null) {
columns = tableConfig.getFetchedColumns();
}
// setup a scanner within the bounds of this split
for (Pair<Text, Text> c : columns) {
if (c.getSecond() != null) {
log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
scannerBase.fetchColumn(c.getFirst(), c.getSecond());
} else {
log.debug("Fetching column family " + c.getFirst());
scannerBase.fetchColumnFamily(c.getFirst());
}
}
SamplerConfiguration samplerConfig = baseSplit.getSamplerConfiguration();
if (samplerConfig == null) {
samplerConfig = tableConfig.getSamplerConfiguration();
}
if (samplerConfig != null) {
scannerBase.setSamplerConfiguration(samplerConfig);
}
Map<String, String> executionHints = baseSplit.getExecutionHints();
if (executionHints == null || executionHints.isEmpty()) {
executionHints = tableConfig.getExecutionHints();
}
if (executionHints != null) {
scannerBase.setExecutionHints(executionHints);
}
scannerIterator = scannerBase.iterator();
numKeysRead = 0;
}
use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.
the class AccumuloRecordReader method initialize.
@Override
public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
split = (RangeInputSplit) inSplit;
log.debug("Initializing input split: " + split);
Configuration conf = attempt.getConfiguration();
client = createClient(attempt, this.CLASS);
ClientContext context = (ClientContext) client;
Authorizations authorizations = InputConfigurator.getScanAuthorizations(CLASS, conf);
String classLoaderContext = InputConfigurator.getClassLoaderContext(CLASS, conf);
String table = split.getTableName();
// in case the table name changed, we can still use the previous name for terms of
// configuration,
// but the scanner will use the table id resolved at job setup time
InputTableConfig tableConfig = InputConfigurator.getInputTableConfig(CLASS, conf, split.getTableName());
log.debug("Creating client with user: " + client.whoami());
log.debug("Creating scanner for table: " + table);
log.debug("Authorizations are: " + authorizations);
if (split instanceof BatchInputSplit) {
BatchInputSplit batchSplit = (BatchInputSplit) split;
BatchScanner scanner;
try {
// Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
// will not span tablets
int scanThreads = 1;
scanner = context.createBatchScanner(split.getTableName(), authorizations, scanThreads);
setupIterators(attempt, scanner, split);
if (classLoaderContext != null) {
scanner.setClassLoaderContext(classLoaderContext);
}
} catch (TableNotFoundException e) {
e.printStackTrace();
throw new IOException(e);
}
scanner.setRanges(batchSplit.getRanges());
scannerBase = scanner;
} else {
Scanner scanner;
Boolean isOffline = split.isOffline();
if (isOffline == null) {
isOffline = tableConfig.isOfflineScan();
}
Boolean isIsolated = split.isIsolatedScan();
if (isIsolated == null) {
isIsolated = tableConfig.shouldUseIsolatedScanners();
}
Boolean usesLocalIterators = split.usesLocalIterators();
if (usesLocalIterators == null) {
usesLocalIterators = tableConfig.shouldUseLocalIterators();
}
try {
if (isOffline) {
scanner = new OfflineScanner(context, TableId.of(split.getTableId()), authorizations);
} else {
// Not using public API to create scanner so that we can use table ID
// Table ID is used in case of renames during M/R job
scanner = new ScannerImpl(context, TableId.of(split.getTableId()), authorizations);
}
if (isIsolated) {
log.info("Creating isolated scanner");
scanner = new IsolatedScanner(scanner);
}
if (usesLocalIterators) {
log.info("Using local iterators");
scanner = new ClientSideIteratorScanner(scanner);
}
setupIterators(attempt, scanner, split);
} catch (RuntimeException e) {
throw new IOException(e);
}
scanner.setRange(split.getRange());
scannerBase = scanner;
}
Collection<IteratorSetting.Column> columns = split.getFetchedColumns();
if (columns == null) {
columns = tableConfig.getFetchedColumns();
}
// setup a scanner within the bounds of this split
for (Pair<Text, Text> c : columns) {
if (c.getSecond() != null) {
log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
scannerBase.fetchColumn(c.getFirst(), c.getSecond());
} else {
log.debug("Fetching column family " + c.getFirst());
scannerBase.fetchColumnFamily(c.getFirst());
}
}
SamplerConfiguration samplerConfig = split.getSamplerConfiguration();
if (samplerConfig == null) {
samplerConfig = tableConfig.getSamplerConfiguration();
}
if (samplerConfig != null) {
scannerBase.setSamplerConfiguration(samplerConfig);
}
Map<String, String> executionHints = split.getExecutionHints();
if (executionHints == null || executionHints.isEmpty()) {
executionHints = tableConfig.getExecutionHints();
}
if (executionHints != null) {
scannerBase.setExecutionHints(executionHints);
}
scannerIterator = scannerBase.iterator();
numKeysRead = 0;
}
use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.
the class VisibilityFilterTest method testBadVisibility.
@Test
public void testBadVisibility() throws IOException {
TreeMap<Key, Value> tm = new TreeMap<>();
tm.put(new Key("r1", "cf1", "cq1", "A&"), new Value());
SortedKeyValueIterator<Key, Value> filter = VisibilityFilter.wrap(new SortedMapIterator(tm), new Authorizations("A"), "".getBytes());
filter.seek(new Range(), new HashSet<>(), false);
assertFalse(filter.hasTop());
}
use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.
the class TableOperationsIT method createMergeClonedTable.
@Test
public void createMergeClonedTable() throws Exception {
String[] names = getUniqueNames(2);
String originalTable = names[0];
TableOperations tops = accumuloClient.tableOperations();
TreeSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("a"), new Text("b"), new Text("c"), new Text("d")));
tops.create(originalTable);
tops.addSplits(originalTable, splits);
try (BatchWriter bw = accumuloClient.createBatchWriter(originalTable)) {
for (Text row : splits) {
Mutation m = new Mutation(row);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
m.put(Integer.toString(i), Integer.toString(j), Integer.toString(i + j));
}
}
bw.addMutation(m);
}
}
String clonedTable = names[1];
tops.clone(originalTable, clonedTable, true, null, null);
tops.merge(clonedTable, null, new Text("b"));
Map<String, Integer> rowCounts = new HashMap<>();
try (Scanner s = accumuloClient.createScanner(clonedTable, new Authorizations())) {
for (Entry<Key, Value> entry : s) {
final Key key = entry.getKey();
String row = key.getRow().toString();
String cf = key.getColumnFamily().toString(), cq = key.getColumnQualifier().toString();
String value = entry.getValue().toString();
if (rowCounts.containsKey(row)) {
rowCounts.put(row, rowCounts.get(row) + 1);
} else {
rowCounts.put(row, 1);
}
assertEquals(Integer.parseInt(cf) + Integer.parseInt(cq), Integer.parseInt(value));
}
}
Collection<Text> clonedSplits = tops.listSplits(clonedTable);
Set<Text> expectedSplits = Sets.newHashSet(new Text("b"), new Text("c"), new Text("d"));
for (Text clonedSplit : clonedSplits) {
assertTrue("Encountered unexpected split on the cloned table: " + clonedSplit, expectedSplits.remove(clonedSplit));
}
assertTrue("Did not find all expected splits on the cloned table: " + expectedSplits, expectedSplits.isEmpty());
}
Aggregations