Search in sources :

Example 41 with Authorizations

use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.

the class KerberosIT method testUserPrivilegesForTable.

@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by test")
@Test
public void testUserPrivilegesForTable() throws Exception {
    String user1 = testName.getMethodName();
    final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
    if (user1Keytab.exists() && !user1Keytab.delete()) {
        log.warn("Unable to delete {}", user1Keytab);
    }
    // Create some new users -- cannot contain realm
    kdc.createPrincipal(user1Keytab, user1);
    final String qualifiedUser1 = kdc.qualifyUser(user1);
    // Log in as user1
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath());
    log.info("Logged in as {}", user1);
    ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
        // Indirectly creates this user when we use it
        AccumuloClient client = mac.createAccumuloClient(qualifiedUser1, new KerberosToken());
        log.info("Created client as {}", qualifiedUser1);
        // The new user should have no system permissions
        for (SystemPermission perm : SystemPermission.values()) {
            assertFalse(client.securityOperations().hasSystemPermission(qualifiedUser1, perm));
        }
        return null;
    });
    final String table = testName.getMethodName() + "_user_table";
    final String viz = "viz";
    ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
    ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
        AccumuloClient client = mac.createAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
        client.tableOperations().create(table);
        // Give our unprivileged user permission on the table we made for them
        client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.READ);
        client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.WRITE);
        client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.ALTER_TABLE);
        client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.DROP_TABLE);
        client.securityOperations().changeUserAuthorizations(qualifiedUser1, new Authorizations(viz));
        return null;
    });
    // Switch back to the original user
    ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath());
    ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
        AccumuloClient client = mac.createAccumuloClient(qualifiedUser1, new KerberosToken());
        // Make sure we can actually use the table we made
        // Write data
        final long ts = 1000L;
        try (BatchWriter bw = client.createBatchWriter(table)) {
            Mutation m = new Mutation("a");
            m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
            bw.addMutation(m);
        }
        // Compact
        client.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
        // Alter
        client.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
        // Read (and proper authorizations)
        try (Scanner s = client.createScanner(table, new Authorizations(viz))) {
            Iterator<Entry<Key, Value>> iter = s.iterator();
            assertTrue("No results from iterator", iter.hasNext());
            Entry<Key, Value> entry = iter.next();
            assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
            assertEquals(new Value("d"), entry.getValue());
            assertFalse("Had more results from iterator", iter.hasNext());
            return null;
        }
    });
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Scanner(org.apache.accumulo.core.client.Scanner) Authorizations(org.apache.accumulo.core.security.Authorizations) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) SystemPermission(org.apache.accumulo.core.security.SystemPermission) Entry(java.util.Map.Entry) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Iterator(java.util.Iterator) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) File(java.io.File) Key(org.apache.accumulo.core.data.Key) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 42 with Authorizations

use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.

the class AccumuloRecordReader method initialize.

/**
 * Initialize a scanner over the given input split using this task attempt configuration.
 */
public void initialize(InputSplit inSplit, JobConf job) throws IOException {
    baseSplit = (org.apache.accumulo.hadoopImpl.mapreduce.RangeInputSplit) inSplit;
    log.debug("Initializing input split: " + baseSplit);
    client = createClient(job, CLASS);
    ClientContext context = (ClientContext) client;
    Authorizations authorizations = InputConfigurator.getScanAuthorizations(CLASS, job);
    String classLoaderContext = InputConfigurator.getClassLoaderContext(CLASS, job);
    String table = baseSplit.getTableName();
    // in case the table name changed, we can still use the previous name for terms of
    // configuration, but the scanner will use the table id resolved at job setup time
    InputTableConfig tableConfig = InputConfigurator.getInputTableConfig(CLASS, job, baseSplit.getTableName());
    log.debug("Created client with user: " + context.whoami());
    log.debug("Creating scanner for table: " + table);
    log.debug("Authorizations are: " + authorizations);
    if (baseSplit instanceof BatchInputSplit) {
        BatchScanner scanner;
        BatchInputSplit multiRangeSplit = (BatchInputSplit) baseSplit;
        try {
            // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
            // will not span tablets
            int scanThreads = 1;
            scanner = context.createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads);
            setupIterators(job, scanner, baseSplit);
            if (classLoaderContext != null) {
                scanner.setClassLoaderContext(classLoaderContext);
            }
        } catch (TableNotFoundException e) {
            throw new IOException(e);
        }
        scanner.setRanges(multiRangeSplit.getRanges());
        scannerBase = scanner;
    } else if (baseSplit instanceof RangeInputSplit) {
        split = (RangeInputSplit) baseSplit;
        Boolean isOffline = baseSplit.isOffline();
        if (isOffline == null) {
            isOffline = tableConfig.isOfflineScan();
        }
        Boolean isIsolated = baseSplit.isIsolatedScan();
        if (isIsolated == null) {
            isIsolated = tableConfig.shouldUseIsolatedScanners();
        }
        Boolean usesLocalIterators = baseSplit.usesLocalIterators();
        if (usesLocalIterators == null) {
            usesLocalIterators = tableConfig.shouldUseLocalIterators();
        }
        Scanner scanner;
        try {
            if (isOffline) {
                scanner = new OfflineScanner(context, TableId.of(baseSplit.getTableId()), authorizations);
            } else {
                scanner = new ScannerImpl(context, TableId.of(baseSplit.getTableId()), authorizations);
            }
            if (isIsolated) {
                log.info("Creating isolated scanner");
                scanner = new IsolatedScanner(scanner);
            }
            if (usesLocalIterators) {
                log.info("Using local iterators");
                scanner = new ClientSideIteratorScanner(scanner);
            }
            setupIterators(job, scanner, baseSplit);
        } catch (RuntimeException e) {
            throw new IOException(e);
        }
        scanner.setRange(baseSplit.getRange());
        scannerBase = scanner;
    } else {
        throw new IllegalArgumentException("Can not initialize from " + baseSplit.getClass());
    }
    Collection<IteratorSetting.Column> columns = baseSplit.getFetchedColumns();
    if (columns == null) {
        columns = tableConfig.getFetchedColumns();
    }
    // setup a scanner within the bounds of this split
    for (Pair<Text, Text> c : columns) {
        if (c.getSecond() != null) {
            log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
            scannerBase.fetchColumn(c.getFirst(), c.getSecond());
        } else {
            log.debug("Fetching column family " + c.getFirst());
            scannerBase.fetchColumnFamily(c.getFirst());
        }
    }
    SamplerConfiguration samplerConfig = baseSplit.getSamplerConfiguration();
    if (samplerConfig == null) {
        samplerConfig = tableConfig.getSamplerConfiguration();
    }
    if (samplerConfig != null) {
        scannerBase.setSamplerConfiguration(samplerConfig);
    }
    Map<String, String> executionHints = baseSplit.getExecutionHints();
    if (executionHints == null || executionHints.isEmpty()) {
        executionHints = tableConfig.getExecutionHints();
    }
    if (executionHints != null) {
        scannerBase.setExecutionHints(executionHints);
    }
    scannerIterator = scannerBase.iterator();
    numKeysRead = 0;
}
Also used : BatchScanner(org.apache.accumulo.core.client.BatchScanner) OfflineScanner(org.apache.accumulo.core.clientImpl.OfflineScanner) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) BatchScanner(org.apache.accumulo.core.client.BatchScanner) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) OfflineScanner(org.apache.accumulo.core.clientImpl.OfflineScanner) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) Authorizations(org.apache.accumulo.core.security.Authorizations) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) ScannerImpl(org.apache.accumulo.core.clientImpl.ScannerImpl) InputTableConfig(org.apache.accumulo.hadoopImpl.mapreduce.InputTableConfig) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner)

Example 43 with Authorizations

use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.

the class AccumuloRecordReader method initialize.

@Override
public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
    split = (RangeInputSplit) inSplit;
    log.debug("Initializing input split: " + split);
    Configuration conf = attempt.getConfiguration();
    client = createClient(attempt, this.CLASS);
    ClientContext context = (ClientContext) client;
    Authorizations authorizations = InputConfigurator.getScanAuthorizations(CLASS, conf);
    String classLoaderContext = InputConfigurator.getClassLoaderContext(CLASS, conf);
    String table = split.getTableName();
    // in case the table name changed, we can still use the previous name for terms of
    // configuration,
    // but the scanner will use the table id resolved at job setup time
    InputTableConfig tableConfig = InputConfigurator.getInputTableConfig(CLASS, conf, split.getTableName());
    log.debug("Creating client with user: " + client.whoami());
    log.debug("Creating scanner for table: " + table);
    log.debug("Authorizations are: " + authorizations);
    if (split instanceof BatchInputSplit) {
        BatchInputSplit batchSplit = (BatchInputSplit) split;
        BatchScanner scanner;
        try {
            // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
            // will not span tablets
            int scanThreads = 1;
            scanner = context.createBatchScanner(split.getTableName(), authorizations, scanThreads);
            setupIterators(attempt, scanner, split);
            if (classLoaderContext != null) {
                scanner.setClassLoaderContext(classLoaderContext);
            }
        } catch (TableNotFoundException e) {
            e.printStackTrace();
            throw new IOException(e);
        }
        scanner.setRanges(batchSplit.getRanges());
        scannerBase = scanner;
    } else {
        Scanner scanner;
        Boolean isOffline = split.isOffline();
        if (isOffline == null) {
            isOffline = tableConfig.isOfflineScan();
        }
        Boolean isIsolated = split.isIsolatedScan();
        if (isIsolated == null) {
            isIsolated = tableConfig.shouldUseIsolatedScanners();
        }
        Boolean usesLocalIterators = split.usesLocalIterators();
        if (usesLocalIterators == null) {
            usesLocalIterators = tableConfig.shouldUseLocalIterators();
        }
        try {
            if (isOffline) {
                scanner = new OfflineScanner(context, TableId.of(split.getTableId()), authorizations);
            } else {
                // Not using public API to create scanner so that we can use table ID
                // Table ID is used in case of renames during M/R job
                scanner = new ScannerImpl(context, TableId.of(split.getTableId()), authorizations);
            }
            if (isIsolated) {
                log.info("Creating isolated scanner");
                scanner = new IsolatedScanner(scanner);
            }
            if (usesLocalIterators) {
                log.info("Using local iterators");
                scanner = new ClientSideIteratorScanner(scanner);
            }
            setupIterators(attempt, scanner, split);
        } catch (RuntimeException e) {
            throw new IOException(e);
        }
        scanner.setRange(split.getRange());
        scannerBase = scanner;
    }
    Collection<IteratorSetting.Column> columns = split.getFetchedColumns();
    if (columns == null) {
        columns = tableConfig.getFetchedColumns();
    }
    // setup a scanner within the bounds of this split
    for (Pair<Text, Text> c : columns) {
        if (c.getSecond() != null) {
            log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
            scannerBase.fetchColumn(c.getFirst(), c.getSecond());
        } else {
            log.debug("Fetching column family " + c.getFirst());
            scannerBase.fetchColumnFamily(c.getFirst());
        }
    }
    SamplerConfiguration samplerConfig = split.getSamplerConfiguration();
    if (samplerConfig == null) {
        samplerConfig = tableConfig.getSamplerConfiguration();
    }
    if (samplerConfig != null) {
        scannerBase.setSamplerConfiguration(samplerConfig);
    }
    Map<String, String> executionHints = split.getExecutionHints();
    if (executionHints == null || executionHints.isEmpty()) {
        executionHints = tableConfig.getExecutionHints();
    }
    if (executionHints != null) {
        scannerBase.setExecutionHints(executionHints);
    }
    scannerIterator = scannerBase.iterator();
    numKeysRead = 0;
}
Also used : ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) BatchScanner(org.apache.accumulo.core.client.BatchScanner) OfflineScanner(org.apache.accumulo.core.clientImpl.OfflineScanner) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) Authorizations(org.apache.accumulo.core.security.Authorizations) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) BatchScanner(org.apache.accumulo.core.client.BatchScanner) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ScannerImpl(org.apache.accumulo.core.clientImpl.ScannerImpl) OfflineScanner(org.apache.accumulo.core.clientImpl.OfflineScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner)

Example 44 with Authorizations

use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.

the class VisibilityFilterTest method testBadVisibility.

@Test
public void testBadVisibility() throws IOException {
    TreeMap<Key, Value> tm = new TreeMap<>();
    tm.put(new Key("r1", "cf1", "cq1", "A&"), new Value());
    SortedKeyValueIterator<Key, Value> filter = VisibilityFilter.wrap(new SortedMapIterator(tm), new Authorizations("A"), "".getBytes());
    filter.seek(new Range(), new HashSet<>(), false);
    assertFalse(filter.hasTop());
}
Also used : Authorizations(org.apache.accumulo.core.security.Authorizations) Value(org.apache.accumulo.core.data.Value) TreeMap(java.util.TreeMap) SortedMapIterator(org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) Test(org.junit.jupiter.api.Test)

Example 45 with Authorizations

use of org.apache.accumulo.core.security.Authorizations in project accumulo by apache.

the class TableOperationsIT method createMergeClonedTable.

@Test
public void createMergeClonedTable() throws Exception {
    String[] names = getUniqueNames(2);
    String originalTable = names[0];
    TableOperations tops = accumuloClient.tableOperations();
    TreeSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("a"), new Text("b"), new Text("c"), new Text("d")));
    tops.create(originalTable);
    tops.addSplits(originalTable, splits);
    try (BatchWriter bw = accumuloClient.createBatchWriter(originalTable)) {
        for (Text row : splits) {
            Mutation m = new Mutation(row);
            for (int i = 0; i < 10; i++) {
                for (int j = 0; j < 10; j++) {
                    m.put(Integer.toString(i), Integer.toString(j), Integer.toString(i + j));
                }
            }
            bw.addMutation(m);
        }
    }
    String clonedTable = names[1];
    tops.clone(originalTable, clonedTable, true, null, null);
    tops.merge(clonedTable, null, new Text("b"));
    Map<String, Integer> rowCounts = new HashMap<>();
    try (Scanner s = accumuloClient.createScanner(clonedTable, new Authorizations())) {
        for (Entry<Key, Value> entry : s) {
            final Key key = entry.getKey();
            String row = key.getRow().toString();
            String cf = key.getColumnFamily().toString(), cq = key.getColumnQualifier().toString();
            String value = entry.getValue().toString();
            if (rowCounts.containsKey(row)) {
                rowCounts.put(row, rowCounts.get(row) + 1);
            } else {
                rowCounts.put(row, 1);
            }
            assertEquals(Integer.parseInt(cf) + Integer.parseInt(cq), Integer.parseInt(value));
        }
    }
    Collection<Text> clonedSplits = tops.listSplits(clonedTable);
    Set<Text> expectedSplits = Sets.newHashSet(new Text("b"), new Text("c"), new Text("d"));
    for (Text clonedSplit : clonedSplits) {
        assertTrue("Encountered unexpected split on the cloned table: " + clonedSplit, expectedSplits.remove(clonedSplit));
    }
    assertTrue("Did not find all expected splits on the cloned table: " + expectedSplits, expectedSplits.isEmpty());
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Authorizations(org.apache.accumulo.core.security.Authorizations) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) DefaultKeySizeConstraint(org.apache.accumulo.core.data.constraints.DefaultKeySizeConstraint) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Test(org.junit.Test)

Aggregations

Authorizations (org.apache.accumulo.core.security.Authorizations)242 Test (org.junit.Test)118 Scanner (org.apache.accumulo.core.client.Scanner)117 Key (org.apache.accumulo.core.data.Key)113 Value (org.apache.accumulo.core.data.Value)112 Text (org.apache.hadoop.io.Text)97 Mutation (org.apache.accumulo.core.data.Mutation)74 BatchWriter (org.apache.accumulo.core.client.BatchWriter)70 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)68 Range (org.apache.accumulo.core.data.Range)59 Map (java.util.Map)53 Entry (java.util.Map.Entry)47 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)39 Connector (org.apache.accumulo.core.client.Connector)34 ArrayList (java.util.ArrayList)31 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)30 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)29 AccumuloException (org.apache.accumulo.core.client.AccumuloException)28 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)26 Configuration (org.apache.hadoop.conf.Configuration)24