use of org.apache.accumulo.core.security.Authorizations in project Gaffer by gchq.
the class CoreKeyGroupByAggregatorIteratorTest method testAggregatingSinglePropertySet.
public void testAggregatingSinglePropertySet(final AccumuloStore store, final AccumuloElementConverter elementConverter) throws StoreException, AccumuloElementConversionException {
String visibilityString = "public";
try {
// Create edge
final Edge edge = new Edge(TestGroups.EDGE);
edge.setSource("1");
edge.setDestination("2");
edge.setDirected(true);
edge.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 8);
edge.putProperty(AccumuloPropertyNames.COUNT, 1);
final Properties properties1 = new Properties();
properties1.put(AccumuloPropertyNames.COUNT, 1);
// Accumulo key
final Key key = elementConverter.getKeysFromEdge(edge).getFirst();
// Accumulo values
final Value value1 = elementConverter.getValueFromProperties(TestGroups.EDGE, properties1);
// Create mutation
final Mutation m1 = new Mutation(key.getRow());
m1.put(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibility()), key.getTimestamp(), value1);
// Write mutation
final BatchWriterConfig writerConfig = new BatchWriterConfig();
writerConfig.setMaxMemory(1000000L);
writerConfig.setMaxLatency(1000L, TimeUnit.MILLISECONDS);
writerConfig.setMaxWriteThreads(1);
final BatchWriter writer = store.getConnection().createBatchWriter(store.getProperties().getTable(), writerConfig);
writer.addMutation(m1);
writer.close();
final Edge expectedEdge = new Edge(TestGroups.EDGE);
expectedEdge.setSource("1");
expectedEdge.setDestination("2");
expectedEdge.setDirected(true);
expectedEdge.putProperty(AccumuloPropertyNames.COLUMN_QUALIFIER, 8);
expectedEdge.putProperty(AccumuloPropertyNames.COUNT, 1);
// Read data back and check we get one merged element
final Authorizations authorizations = new Authorizations(visibilityString);
final Scanner scanner = store.getConnection().createScanner(store.getProperties().getTable(), authorizations);
final IteratorSetting iteratorSetting = new IteratorSettingBuilder(AccumuloStoreConstants.COLUMN_QUALIFIER_AGGREGATOR_ITERATOR_PRIORITY, "KeyCombiner", CoreKeyGroupByAggregatorIterator.class).all().view(new View.Builder().edge(TestGroups.EDGE, new ViewElementDefinition.Builder().groupBy().build()).build()).schema(store.getSchema()).keyConverter(store.getKeyPackage().getKeyConverter()).build();
scanner.addScanIterator(iteratorSetting);
final Iterator<Entry<Key, Value>> it = scanner.iterator();
final Entry<Key, Value> entry = it.next();
final Element readEdge = elementConverter.getFullElement(entry.getKey(), entry.getValue());
assertEquals(expectedEdge, readEdge);
assertEquals(8, readEdge.getProperty(AccumuloPropertyNames.COLUMN_QUALIFIER));
assertEquals(1, readEdge.getProperty(AccumuloPropertyNames.COUNT));
// Check no more entries
if (it.hasNext()) {
fail("Additional row found.");
}
} catch (AccumuloException | TableNotFoundException e) {
fail(this.getClass().getSimpleName() + " failed with exception: " + e);
}
}
use of org.apache.accumulo.core.security.Authorizations in project Gaffer by gchq.
the class AccumuloStore method updateConfiguration.
/**
* Updates a Hadoop {@link Configuration} with information needed to connect to the Accumulo store. It adds
* iterators to apply the provided {@link View}. This method will be used by operations that run MapReduce
* or Spark jobs against the Accumulo store.
*
* @param conf A {@link Configuration} to be updated.
* @param view The {@link View} to be applied.
* @param user The {@link User} to be used.
* @throws StoreException if there is a failure to connect to Accumulo or a problem setting the iterators.
*/
public void updateConfiguration(final Configuration conf, final View view, final User user) throws StoreException {
try {
// Table name
InputConfigurator.setInputTableName(AccumuloInputFormat.class, conf, getProperties().getTable());
// User
addUserToConfiguration(conf);
// Authorizations
Authorizations authorisations;
if (null != user && null != user.getDataAuths()) {
authorisations = new Authorizations(user.getDataAuths().toArray(new String[user.getDataAuths().size()]));
} else {
authorisations = new Authorizations();
}
InputConfigurator.setScanAuthorizations(AccumuloInputFormat.class, conf, authorisations);
// Zookeeper
addZookeeperToConfiguration(conf);
// Add keypackage, schema and view to conf
conf.set(ElementInputFormat.KEY_PACKAGE, getProperties().getKeyPackageClass());
conf.set(ElementInputFormat.SCHEMA, new String(getSchema().toCompactJson(), CommonConstants.UTF_8));
conf.set(ElementInputFormat.VIEW, new String(view.toCompactJson(), CommonConstants.UTF_8));
// Add iterators that depend on the view
if (view.hasGroups()) {
IteratorSetting elementPreFilter = getKeyPackage().getIteratorFactory().getElementPreAggregationFilterIteratorSetting(view, this);
IteratorSetting elementPostFilter = getKeyPackage().getIteratorFactory().getElementPostAggregationFilterIteratorSetting(view, this);
InputConfigurator.addIterator(AccumuloInputFormat.class, conf, elementPostFilter);
InputConfigurator.addIterator(AccumuloInputFormat.class, conf, elementPreFilter);
}
} catch (final AccumuloSecurityException | IteratorSettingException | UnsupportedEncodingException e) {
throw new StoreException(e);
}
}
use of org.apache.accumulo.core.security.Authorizations in project presto by prestodb.
the class TestIndexer method testMutationIndex.
@Test
public void testMutationIndex() throws Exception {
Instance inst = new MockInstance();
Connector conn = inst.getConnector("root", new PasswordToken(""));
conn.tableOperations().create(table.getFullTableName());
conn.tableOperations().create(table.getIndexTableName());
conn.tableOperations().create(table.getMetricsTableName());
for (IteratorSetting s : Indexer.getMetricIterators(table)) {
conn.tableOperations().attachIterator(table.getMetricsTableName(), s);
}
Indexer indexer = new Indexer(conn, new Authorizations(), table, new BatchWriterConfig());
indexer.index(m1);
indexer.flush();
Scanner scan = conn.createScanner(table.getIndexTableName(), new Authorizations());
scan.setRange(new Range());
Iterator<Entry<Key, Value>> iter = scan.iterator();
assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", "");
assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", "");
assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", "");
assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", "");
assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", "");
assertFalse(iter.hasNext());
scan.close();
scan = conn.createScanner(table.getMetricsTableName(), new Authorizations());
scan.setRange(new Range());
iter = scan.iterator();
assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "1");
assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___card___", "1");
assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___first_row___", "row1");
assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___last_row___", "row1");
assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "1");
assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "1");
assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "1");
assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "1");
assertFalse(iter.hasNext());
scan.close();
indexer.index(m2);
indexer.close();
scan = conn.createScanner(table.getIndexTableName(), new Authorizations());
scan.setRange(new Range());
iter = scan.iterator();
assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", "");
assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row2", "");
assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", "");
assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row2", "");
assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", "");
assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "row2", "");
assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", "");
assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", "");
assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row2", "");
assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "row2", "");
assertFalse(iter.hasNext());
scan.close();
scan = conn.createScanner(table.getMetricsTableName(), new Authorizations());
scan.setRange(new Range());
iter = scan.iterator();
assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "2");
assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___card___", "2");
assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___first_row___", "row1");
assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___last_row___", "row2");
assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "2");
assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "1");
assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "___card___", "1");
assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "1");
assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "2");
assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "___card___", "1");
assertFalse(iter.hasNext());
scan.close();
}
use of org.apache.accumulo.core.security.Authorizations in project presto by prestodb.
the class AccumuloClient method getTabletSplits.
/**
* Fetches the TabletSplitMetadata for a query against an Accumulo table.
* <p>
* Does a whole bunch of fun stuff! Splitting on row ID ranges, applying secondary indexes, column pruning,
* all sorts of sweet optimizations. What you have here is an important method.
*
* @param session Current session
* @param schema Schema name
* @param table Table Name
* @param rowIdDomain Domain for the row ID
* @param constraints Column constraints for the query
* @param serializer Instance of a row serializer
* @return List of TabletSplitMetadata objects for Presto
*/
public List<TabletSplitMetadata> getTabletSplits(ConnectorSession session, String schema, String table, Optional<Domain> rowIdDomain, List<AccumuloColumnConstraint> constraints, AccumuloRowSerializer serializer) {
try {
String tableName = AccumuloTable.getFullTableName(schema, table);
LOG.debug("Getting tablet splits for table %s", tableName);
// Get the initial Range based on the row ID domain
Collection<Range> rowIdRanges = getRangesFromDomain(rowIdDomain, serializer);
List<TabletSplitMetadata> tabletSplits = new ArrayList<>();
// Use the secondary index, if enabled
if (AccumuloSessionProperties.isOptimizeIndexEnabled(session)) {
// Get the scan authorizations to query the index
Authorizations auths = getScanAuthorizations(session, schema, table);
// If this returns true, return the tablet splits to Presto
if (indexLookup.applyIndex(schema, table, session, constraints, rowIdRanges, tabletSplits, serializer, auths)) {
return tabletSplits;
}
}
// If we can't (or shouldn't) use the secondary index, we will just use the Range from the row ID domain
// Split the ranges on tablet boundaries, if enabled
Collection<Range> splitRanges;
if (AccumuloSessionProperties.isOptimizeSplitRangesEnabled(session)) {
splitRanges = splitByTabletBoundaries(tableName, rowIdRanges);
} else {
// if not enabled, just use the same collection
splitRanges = rowIdRanges;
}
// Create TabletSplitMetadata objects for each range
boolean fetchTabletLocations = AccumuloSessionProperties.isOptimizeLocalityEnabled(session);
LOG.debug("Fetching tablet locations: %s", fetchTabletLocations);
for (Range range : splitRanges) {
// If locality is enabled, then fetch tablet location
if (fetchTabletLocations) {
tabletSplits.add(new TabletSplitMetadata(getTabletLocation(tableName, range.getStartKey()), ImmutableList.of(range)));
} else {
// else, just use the default location
tabletSplits.add(new TabletSplitMetadata(Optional.empty(), ImmutableList.of(range)));
}
}
// Log some fun stuff and return the tablet splits
LOG.debug("Number of splits for table %s is %d with %d ranges", tableName, tabletSplits.size(), splitRanges.size());
return tabletSplits;
} catch (Exception e) {
throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get splits from Accumulo", e);
}
}
use of org.apache.accumulo.core.security.Authorizations in project presto by prestodb.
the class AccumuloRecordSet method getScanAuthorizations.
/**
* Gets the scanner authorizations to use for scanning tables.
* <p>
* In order of priority: session username authorizations, then table property, then the default connector auths.
*
* @param session Current session
* @param split Accumulo split
* @param connector Accumulo connector
* @param username Accumulo username
* @return Scan authorizations
* @throws AccumuloException If a generic Accumulo error occurs
* @throws AccumuloSecurityException If a security exception occurs
*/
private static Authorizations getScanAuthorizations(ConnectorSession session, AccumuloSplit split, Connector connector, String username) throws AccumuloException, AccumuloSecurityException {
String sessionScanUser = AccumuloSessionProperties.getScanUsername(session);
if (sessionScanUser != null) {
Authorizations scanAuths = connector.securityOperations().getUserAuthorizations(sessionScanUser);
LOG.debug("Using session scanner auths for user %s: %s", sessionScanUser, scanAuths);
return scanAuths;
}
Optional<String> scanAuths = split.getScanAuthorizations();
if (scanAuths.isPresent()) {
Authorizations auths = new Authorizations(Iterables.toArray(COMMA_SPLITTER.split(scanAuths.get()), String.class));
LOG.debug("scan_auths table property set: %s", auths);
return auths;
} else {
Authorizations auths = connector.securityOperations().getUserAuthorizations(username);
LOG.debug("scan_auths table property not set, using user auths: %s", auths);
return auths;
}
}
Aggregations