use of org.apache.accumulo.core.security.Authorizations in project incubator-rya by apache.
the class RyaGiraphUtils method initializeAccumuloInputFormat.
public static void initializeAccumuloInputFormat(Configuration conf) {
// get accumulo connect information
boolean mock = MRUtils.getACMock(conf, false);
String zk = MRUtils.getACZK(conf);
String instance = MRUtils.getACInstance(conf);
String userName = MRUtils.getACUserName(conf);
String pwd = MRUtils.getACPwd(conf);
String tablePrefix = MRUtils.getTablePrefix(conf);
TABLE_LAYOUT rdfTableLayout = MRUtils.getTableLayout(conf, TABLE_LAYOUT.SPO);
String authString = conf.get(MRUtils.AC_AUTH_PROP);
Authorizations authorizations;
if (authString != null && !authString.isEmpty()) {
authorizations = new Authorizations(authString.split(","));
// for consistency
conf.set(ConfigUtils.CLOUDBASE_AUTHS, authString);
} else {
authorizations = AccumuloRdfConstants.ALL_AUTHORIZATIONS;
}
// set up the accumulo input format so that we know what table to use and everything
try {
Job job = new Job(conf);
AccumuloInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
String tableName = RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix);
AccumuloInputFormat.setInputTableName(job, tableName);
AccumuloInputFormat.setScanAuthorizations(job, authorizations);
if (mock) {
AccumuloInputFormat.setMockInstance(job, instance);
} else {
ClientConfiguration clientConfig = ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zk);
AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
}
} catch (IOException | AccumuloSecurityException e) {
// TODO better exception handling here
e.printStackTrace();
}
}
use of org.apache.accumulo.core.security.Authorizations in project incubator-rya by apache.
the class PcjTables method getPcjMetadata.
/**
* Fetch the {@link PCJMetadata} from an Accumulo table.
* <p>
* This method assumes the PCJ table has already been created.
*
* @param accumuloConn - A connection to the Accumulo that hosts the PCJ table. (not null)
* @param pcjTableName - The name of the table that will be search. (not null)
* @return The PCJ Metadata that has been stolred in the in the PCJ Table.
* @throws PCJStorageException The PCJ Table does not exist.
*/
public PcjMetadata getPcjMetadata(final Connector accumuloConn, final String pcjTableName) throws PCJStorageException {
checkNotNull(accumuloConn);
checkNotNull(pcjTableName);
Scanner scanner = null;
try {
// Create an Accumulo scanner that iterates through the metadata entries.
scanner = accumuloConn.createScanner(pcjTableName, new Authorizations());
final Iterator<Entry<Key, Value>> entries = scanner.iterator();
// No metadata has been stored in the table yet.
if (!entries.hasNext()) {
throw new PCJStorageException("Could not find any PCJ metadata in the table named: " + pcjTableName);
}
// Fetch the metadata from the entries. Assuming they all have the same cardinality and sparql query.
String sparql = null;
Long cardinality = null;
final Set<VariableOrder> varOrders = new HashSet<>();
while (entries.hasNext()) {
final Entry<Key, Value> entry = entries.next();
final Text columnQualifier = entry.getKey().getColumnQualifier();
final byte[] value = entry.getValue().get();
if (columnQualifier.equals(PCJ_METADATA_SPARQL_QUERY)) {
sparql = stringLexicoder.decode(value);
} else if (columnQualifier.equals(PCJ_METADATA_CARDINALITY)) {
cardinality = longLexicoder.decode(value);
} else if (columnQualifier.equals(PCJ_METADATA_VARIABLE_ORDERS)) {
for (final String varOrderStr : listLexicoder.decode(value)) {
varOrders.add(new VariableOrder(varOrderStr));
}
}
}
return new PcjMetadata(sparql, cardinality, varOrders);
} catch (final TableNotFoundException e) {
throw new PCJStorageException("Could not add results to a PCJ because the PCJ table does not exist.", e);
} finally {
if (scanner != null) {
scanner.close();
}
}
}
use of org.apache.accumulo.core.security.Authorizations in project incubator-rya by apache.
the class AccumuloStorage method setLocationFromUriParts.
protected void setLocationFromUriParts(final String[] urlParts) {
String columns = "";
if (urlParts.length > 1) {
for (final String param : urlParts[1].split("&")) {
final String[] pair = param.split("=");
if (pair[0].equals("instance")) {
inst = pair[1];
} else if (pair[0].equals("user")) {
user = pair[1];
} else if (pair[0].equals("password")) {
userP = pair[1];
} else if (pair[0].equals("zookeepers")) {
zookeepers = pair[1];
} else if (pair[0].equals("auths")) {
auths = pair[1];
} else if (pair[0].equals("columns")) {
columns = pair[1];
} else if (pair[0].equals("range")) {
final String[] r = pair[1].split("\\|");
if (r.length == 2) {
addRange(new Range(r[0], r[1]));
} else {
addRange(new Range(r[0]));
}
} else if (pair[0].equals("mock")) {
this.mock = Boolean.parseBoolean(pair[1]);
}
addLocationFromUriPart(pair);
}
}
final String[] parts = urlParts[0].split("/+");
table = parts[1];
tableName = new Text(table);
if (auths == null || auths.equals("")) {
authorizations = new Authorizations();
} else {
authorizations = new Authorizations(auths.split(","));
}
if (!columns.equals("")) {
for (final String cfCq : columns.split(",")) {
if (cfCq.contains("|")) {
final String[] c = cfCq.split("\\|");
final String cf = c[0];
final String cq = c[1];
addColumnPair(cf, cq);
} else {
addColumnPair(cfCq, null);
}
}
}
}
use of org.apache.accumulo.core.security.Authorizations in project incubator-rya by apache.
the class GraphXGraphGenerator method getVertexRDD.
public RDD<Tuple2<Object, RyaTypeWritable>> getVertexRDD(SparkContext sc, Configuration conf) throws IOException, AccumuloSecurityException {
// Load configuration parameters
zk = MRUtils.getACZK(conf);
instance = MRUtils.getACInstance(conf);
userName = MRUtils.getACUserName(conf);
pwd = MRUtils.getACPwd(conf);
mock = MRUtils.getACMock(conf, false);
tablePrefix = MRUtils.getTablePrefix(conf);
// Set authorizations if specified
String authString = conf.get(MRUtils.AC_AUTH_PROP);
if (authString != null && !authString.isEmpty()) {
authorizations = new Authorizations(authString.split(","));
// for consistency
conf.set(ConfigUtils.CLOUDBASE_AUTHS, authString);
} else {
authorizations = AccumuloRdfConstants.ALL_AUTHORIZATIONS;
}
// Set table prefix to the default if not set
if (tablePrefix == null) {
tablePrefix = RdfCloudTripleStoreConstants.TBL_PRFX_DEF;
MRUtils.setTablePrefix(conf, tablePrefix);
}
// Check for required configuration parameters
Preconditions.checkNotNull(instance, "Accumulo instance name [" + MRUtils.AC_INSTANCE_PROP + "] not set.");
Preconditions.checkNotNull(userName, "Accumulo username [" + MRUtils.AC_USERNAME_PROP + "] not set.");
Preconditions.checkNotNull(pwd, "Accumulo password [" + MRUtils.AC_PWD_PROP + "] not set.");
Preconditions.checkNotNull(tablePrefix, "Table prefix [" + MRUtils.TABLE_PREFIX_PROPERTY + "] not set.");
RdfCloudTripleStoreConstants.prefixTables(tablePrefix);
// for consistency
if (!mock)
conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, zk);
// Ensure consistency between alternative configuration properties
conf.set(ConfigUtils.CLOUDBASE_INSTANCE, instance);
conf.set(ConfigUtils.CLOUDBASE_USER, userName);
conf.set(ConfigUtils.CLOUDBASE_PASSWORD, pwd);
conf.setBoolean(ConfigUtils.USE_MOCK_INSTANCE, mock);
conf.set(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX, tablePrefix);
Job job = Job.getInstance(conf, sc.appName());
ClientConfiguration clientConfig = new ClientConfiguration().with(ClientProperty.INSTANCE_NAME, instance).with(ClientProperty.INSTANCE_ZK_HOST, zk);
GraphXInputFormat.setInputTableName(job, EntityCentricIndex.getTableName(conf));
GraphXInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
GraphXInputFormat.setZooKeeperInstance(job, clientConfig);
GraphXInputFormat.setScanAuthorizations(job, authorizations);
return sc.newAPIHadoopRDD(job.getConfiguration(), GraphXInputFormat.class, Object.class, RyaTypeWritable.class);
}
use of org.apache.accumulo.core.security.Authorizations in project incubator-rya by apache.
the class GraphXGraphGenerator method getEdgeRDD.
public RDD<Tuple2<Object, Edge>> getEdgeRDD(SparkContext sc, Configuration conf) throws IOException, AccumuloSecurityException {
// Load configuration parameters
zk = MRUtils.getACZK(conf);
instance = MRUtils.getACInstance(conf);
userName = MRUtils.getACUserName(conf);
pwd = MRUtils.getACPwd(conf);
mock = MRUtils.getACMock(conf, false);
tablePrefix = MRUtils.getTablePrefix(conf);
// Set authorizations if specified
String authString = conf.get(MRUtils.AC_AUTH_PROP);
if (authString != null && !authString.isEmpty()) {
authorizations = new Authorizations(authString.split(","));
// for consistency
conf.set(ConfigUtils.CLOUDBASE_AUTHS, authString);
} else {
authorizations = AccumuloRdfConstants.ALL_AUTHORIZATIONS;
}
// Set table prefix to the default if not set
if (tablePrefix == null) {
tablePrefix = RdfCloudTripleStoreConstants.TBL_PRFX_DEF;
MRUtils.setTablePrefix(conf, tablePrefix);
}
// Check for required configuration parameters
Preconditions.checkNotNull(instance, "Accumulo instance name [" + MRUtils.AC_INSTANCE_PROP + "] not set.");
Preconditions.checkNotNull(userName, "Accumulo username [" + MRUtils.AC_USERNAME_PROP + "] not set.");
Preconditions.checkNotNull(pwd, "Accumulo password [" + MRUtils.AC_PWD_PROP + "] not set.");
Preconditions.checkNotNull(tablePrefix, "Table prefix [" + MRUtils.TABLE_PREFIX_PROPERTY + "] not set.");
RdfCloudTripleStoreConstants.prefixTables(tablePrefix);
// for consistency
if (!mock)
conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, zk);
// Ensure consistency between alternative configuration properties
conf.set(ConfigUtils.CLOUDBASE_INSTANCE, instance);
conf.set(ConfigUtils.CLOUDBASE_USER, userName);
conf.set(ConfigUtils.CLOUDBASE_PASSWORD, pwd);
conf.setBoolean(ConfigUtils.USE_MOCK_INSTANCE, mock);
conf.set(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX, tablePrefix);
Job job = Job.getInstance(conf, sc.appName());
ClientConfiguration clientConfig = new ClientConfiguration().with(ClientProperty.INSTANCE_NAME, instance).with(ClientProperty.INSTANCE_ZK_HOST, zk);
RyaInputFormat.setTableLayout(job, TABLE_LAYOUT.SPO);
RyaInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
RyaInputFormat.setZooKeeperInstance(job, clientConfig);
RyaInputFormat.setScanAuthorizations(job, authorizations);
String tableName = RdfCloudTripleStoreUtils.layoutPrefixToTable(TABLE_LAYOUT.SPO, tablePrefix);
InputFormatBase.setInputTableName(job, tableName);
return sc.newAPIHadoopRDD(job.getConfiguration(), GraphXEdgeInputFormat.class, Object.class, Edge.class);
}
Aggregations