use of org.apache.accumulo.core.client.AccumuloSecurityException in project hive by apache.
the class AccumuloStorageHandler method preCreateTable.
@Override
public void preCreateTable(Table table) throws MetaException {
boolean isExternal = isExternalTable(table);
if (table.getSd().getLocation() != null) {
throw new MetaException("Location can't be specified for Accumulo");
}
Map<String, String> serdeParams = table.getSd().getSerdeInfo().getParameters();
String columnMapping = serdeParams.get(AccumuloSerDeParameters.COLUMN_MAPPINGS);
if (columnMapping == null) {
throw new MetaException(AccumuloSerDeParameters.COLUMN_MAPPINGS + " missing from SERDEPROPERTIES");
}
try {
String tblName = getTableName(table);
Connector connector = connectionParams.getConnector();
TableOperations tableOpts = connector.tableOperations();
// Attempt to create the table, taking EXTERNAL into consideration
if (!tableOpts.exists(tblName)) {
if (!isExternal) {
tableOpts.create(tblName);
} else {
throw new MetaException("Accumulo table " + tblName + " doesn't exist even though declared external");
}
} else {
if (!isExternal) {
throw new MetaException("Table " + tblName + " already exists in Accumulo. Use CREATE EXTERNAL TABLE to register with Hive.");
}
}
} catch (AccumuloSecurityException e) {
throw new MetaException(StringUtils.stringifyException(e));
} catch (TableExistsException e) {
throw new MetaException(StringUtils.stringifyException(e));
} catch (AccumuloException e) {
throw new MetaException(StringUtils.stringifyException(e));
}
}
use of org.apache.accumulo.core.client.AccumuloSecurityException in project Gaffer by gchq.
the class AccumuloKeyRangePartitioner method getSplits.
public static synchronized String[] getSplits(final AccumuloStore store) throws OperationException {
final Connector connector;
try {
connector = store.getConnection();
} catch (StoreException e) {
throw new OperationException("Failed to create accumulo connection", e);
}
final String table = store.getProperties().getTable();
try {
final Collection<Text> splits = connector.tableOperations().listSplits(table);
final String[] arr = new String[splits.size()];
return splits.parallelStream().map(text -> text.toString()).collect(Collectors.toList()).toArray(arr);
} catch (TableNotFoundException | AccumuloSecurityException | AccumuloException e) {
throw new OperationException("Failed to get accumulo split points from table " + table, e);
}
}
use of org.apache.accumulo.core.client.AccumuloSecurityException in project Gaffer by gchq.
the class AccumuloStore method updateConfiguration.
/**
* Updates a Hadoop {@link Configuration} with information needed to connect to the Accumulo store. It adds
* iterators to apply the provided {@link View}. This method will be used by operations that run MapReduce
* or Spark jobs against the Accumulo store.
*
* @param conf A {@link Configuration} to be updated.
* @param view The {@link View} to be applied.
* @param user The {@link User} to be used.
* @throws StoreException if there is a failure to connect to Accumulo or a problem setting the iterators.
*/
public void updateConfiguration(final Configuration conf, final View view, final User user) throws StoreException {
try {
// Table name
InputConfigurator.setInputTableName(AccumuloInputFormat.class, conf, getProperties().getTable());
// User
addUserToConfiguration(conf);
// Authorizations
Authorizations authorisations;
if (null != user && null != user.getDataAuths()) {
authorisations = new Authorizations(user.getDataAuths().toArray(new String[user.getDataAuths().size()]));
} else {
authorisations = new Authorizations();
}
InputConfigurator.setScanAuthorizations(AccumuloInputFormat.class, conf, authorisations);
// Zookeeper
addZookeeperToConfiguration(conf);
// Add keypackage, schema and view to conf
conf.set(ElementInputFormat.KEY_PACKAGE, getProperties().getKeyPackageClass());
conf.set(ElementInputFormat.SCHEMA, new String(getSchema().toCompactJson(), CommonConstants.UTF_8));
conf.set(ElementInputFormat.VIEW, new String(view.toCompactJson(), CommonConstants.UTF_8));
// Add iterators that depend on the view
if (view.hasGroups()) {
IteratorSetting elementPreFilter = getKeyPackage().getIteratorFactory().getElementPreAggregationFilterIteratorSetting(view, this);
IteratorSetting elementPostFilter = getKeyPackage().getIteratorFactory().getElementPostAggregationFilterIteratorSetting(view, this);
InputConfigurator.addIterator(AccumuloInputFormat.class, conf, elementPostFilter);
InputConfigurator.addIterator(AccumuloInputFormat.class, conf, elementPreFilter);
}
} catch (final AccumuloSecurityException | IteratorSettingException | UnsupportedEncodingException e) {
throw new StoreException(e);
}
}
use of org.apache.accumulo.core.client.AccumuloSecurityException in project Gaffer by gchq.
the class SplitTableTool method run.
@Override
public int run(final String[] arg0) throws OperationException {
LOGGER.info("Running SplitTableTool");
final Configuration conf = getConf();
FileSystem fs;
try {
fs = FileSystem.get(conf);
} catch (final IOException e) {
throw new OperationException("Failed to get Filesystem from configuration: " + e.getMessage(), e);
}
final SortedSet<Text> splits = new TreeSet<>();
try (final BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(new Path(operation.getInputPath())), CommonConstants.UTF_8))) {
String line = br.readLine();
while (line != null) {
splits.add(new Text(line));
line = br.readLine();
}
} catch (final IOException e) {
throw new OperationException(e.getMessage(), e);
}
try {
store.getConnection().tableOperations().addSplits(store.getProperties().getTable(), splits);
LOGGER.info("Added {} splits to table {}", splits.size(), store.getProperties().getTable());
} catch (final TableNotFoundException | AccumuloException | AccumuloSecurityException | StoreException e) {
LOGGER.error("Failed to add {} split points to table {}", splits.size(), store.getProperties().getTable());
throw new OperationException("Failed to add split points to the table specified: " + e.getMessage(), e);
}
return SUCCESS_RESPONSE;
}
use of org.apache.accumulo.core.client.AccumuloSecurityException in project Gaffer by gchq.
the class TableUtils method createTable.
/**
* Creates a table for Gaffer data and enables the correct Bloom filter;
* removes the versioning iterator and adds an aggregator Iterator the
* {@link org.apache.accumulo.core.iterators.user.AgeOffFilter} for the
* specified time period.
*
* @param store the accumulo store
* @throws StoreException failure to create accumulo connection or add iterator settings
* @throws TableExistsException failure to create table
*/
public static synchronized void createTable(final AccumuloStore store) throws StoreException, TableExistsException {
// Create table
final String tableName = store.getProperties().getTable();
if (null == tableName) {
throw new AccumuloRuntimeException("Table name is required.");
}
final Connector connector = store.getConnection();
if (connector.tableOperations().exists(tableName)) {
LOGGER.info("Table {} exists, not creating", tableName);
return;
}
try {
LOGGER.info("Creating table {} as user {}", tableName, connector.whoami());
connector.tableOperations().create(tableName);
final String repFactor = store.getProperties().getTableFileReplicationFactor();
if (null != repFactor) {
LOGGER.info("Table file replication set to {} on table {}", repFactor, tableName);
connector.tableOperations().setProperty(tableName, Property.TABLE_FILE_REPLICATION.getKey(), repFactor);
}
// Enable Bloom filters using ElementFunctor
LOGGER.info("Enabling Bloom filter on table {}", tableName);
connector.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
connector.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), store.getKeyPackage().getKeyFunctor().getClass().getName());
// Remove versioning iterator from table for all scopes
LOGGER.info("Removing versioning iterator from table {}", tableName);
final EnumSet<IteratorScope> iteratorScopes = EnumSet.allOf(IteratorScope.class);
connector.tableOperations().removeIterator(tableName, "vers", iteratorScopes);
if (store.getSchema().hasAggregators()) {
// Add Combiner iterator to table for all scopes
LOGGER.info("Adding Aggregator iterator to table {} for all scopes", tableName);
connector.tableOperations().attachIterator(tableName, store.getKeyPackage().getIteratorFactory().getAggregatorIteratorSetting(store));
} else {
LOGGER.info("Aggregator iterator has not been added to table {}", tableName);
}
if (store.getProperties().getEnableValidatorIterator()) {
// Add validator iterator to table for all scopes
LOGGER.info("Adding Validator iterator to table {} for all scopes", tableName);
connector.tableOperations().attachIterator(tableName, store.getKeyPackage().getIteratorFactory().getValidatorIteratorSetting(store));
} else {
LOGGER.info("Validator iterator has not been added to table {}", tableName);
}
} catch (AccumuloSecurityException | TableNotFoundException | AccumuloException | IteratorSettingException e) {
throw new StoreException(e.getMessage(), e);
}
setLocalityGroups(store);
}
Aggregations