use of org.apache.accumulo.core.client.TableNotFoundException in project Gaffer by gchq.
the class IngestUtils method createSplitsFile.
/**
* Get the existing splits from a table in Accumulo and write a splits file.
* The number of splits is returned.
*
* @param conn - An existing connection to an Accumulo instance
* @param table - The table name
* @param fs - The FileSystem in which to create the splits file
* @param splitsFile - A path for the output splits file
* @param maxSplits - The maximum number of splits
* @return The number of splits in the table
* @throws IOException for any IO issues reading from the file system. Other accumulo exceptions are caught and wrapped in an IOException.
*/
public static int createSplitsFile(final Connector conn, final String table, final FileSystem fs, final Path splitsFile, final int maxSplits) throws IOException {
LOGGER.info("Creating splits file in location {} from table {} with maximum splits {}", splitsFile, table, maxSplits);
// Get the splits from the table
Collection<Text> splits;
try {
splits = conn.tableOperations().listSplits(table, maxSplits);
} catch (final TableNotFoundException | AccumuloSecurityException | AccumuloException e) {
throw new IOException(e.getMessage(), e);
}
// This should have returned at most maxSplits splits, but this is not implemented properly in MockInstance.
if (splits.size() > maxSplits) {
if (conn instanceof MockConnector) {
LOGGER.info("Manually reducing the number of splits to {} due to MockInstance not implementing" + " listSplits(table, maxSplits) properly", maxSplits);
} else {
LOGGER.info("Manually reducing the number of splits to {} (number of splits was {})", maxSplits, splits.size());
}
final Collection<Text> filteredSplits = new TreeSet<>();
final int outputEveryNth = splits.size() / maxSplits;
LOGGER.info("Outputting every {}-th split from {} total", outputEveryNth, splits.size());
int i = 0;
for (final Text text : splits) {
if (i % outputEveryNth == 0) {
filteredSplits.add(text);
}
i++;
if (filteredSplits.size() >= maxSplits) {
break;
}
}
splits = filteredSplits;
}
LOGGER.info("Found {} splits from table {}", splits.size(), table);
try (final PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(splitsFile, true)), false, CommonConstants.UTF_8)) {
// Write the splits to file
if (splits.isEmpty()) {
out.close();
return 0;
}
for (final Text split : splits) {
out.println(new String(Base64.encodeBase64(split.getBytes()), CommonConstants.UTF_8));
}
}
return splits.size();
}
use of org.apache.accumulo.core.client.TableNotFoundException in project Gaffer by gchq.
the class SplitStoreFromIterableHandler method doOperation.
private void doOperation(final SplitStoreFromIterable<String> operation, final AccumuloStore store) throws OperationException {
if (null == operation.getInput()) {
throw new OperationException("Operation input is required.");
}
final SortedSet<Text> splits = new TreeSet<>();
for (final String split : operation.getInput()) {
splits.add(new Text(Base64.decodeBase64(split)));
}
try {
store.getConnection().tableOperations().addSplits(store.getTableName(), splits);
LOGGER.info("Added {} splits to table {}", splits.size(), store.getTableName());
} catch (final TableNotFoundException | AccumuloException | AccumuloSecurityException | StoreException e) {
LOGGER.error("Failed to add {} split points to table {}", splits.size(), store.getTableName());
throw new RuntimeException("Failed to add split points: " + e.getMessage(), e);
}
}
use of org.apache.accumulo.core.client.TableNotFoundException in project Gaffer by gchq.
the class AddElementsFromHdfsHandler method needsSplitting.
/**
* If there are less then 2 split points and more than 1 tablet server
* we should calculate new splits.
*
* @param store the accumulo store
* @return true if the table needs splitting
* @throws OperationException if calculating the number of splits or the number of tablet servers fails.
*/
private boolean needsSplitting(final AccumuloStore store) throws OperationException {
boolean needsSplitting = false;
final boolean lessThan2Splits;
try {
lessThan2Splits = store.getConnection().tableOperations().listSplits(store.getTableName(), 2).size() < 2;
} catch (final TableNotFoundException | AccumuloSecurityException | StoreException | AccumuloException e) {
throw new OperationException("Unable to get accumulo's split points", e);
}
if (lessThan2Splits) {
final int numberTabletServers;
try {
numberTabletServers = store.getTabletServers().size();
} catch (final StoreException e) {
throw new OperationException("Unable to get accumulo's tablet servers", e);
}
if (numberTabletServers > 1) {
needsSplitting = true;
}
}
return needsSplitting;
}
use of org.apache.accumulo.core.client.TableNotFoundException in project Gaffer by gchq.
the class TableUtils method setLocalityGroups.
public static void setLocalityGroups(final AccumuloStore store) throws StoreException {
final String tableName = store.getTableName();
Map<String, Set<Text>> localityGroups = new HashMap<>();
for (final String group : store.getSchema().getGroups()) {
HashSet<Text> localityGroup = new HashSet<>();
localityGroup.add(new Text(group));
localityGroups.put(group, localityGroup);
}
LOGGER.info("Setting locality groups on table {}", tableName);
try {
store.getConnection().tableOperations().setLocalityGroups(tableName, localityGroups);
} catch (final AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
throw new StoreException(e.getMessage(), e);
}
}
use of org.apache.accumulo.core.client.TableNotFoundException in project apex-malhar by apache.
the class AccumuloWindowStore method getCommittedWindowId.
@Override
public long getCommittedWindowId(String appId, int operatorId) {
byte[] value = null;
Authorizations auths = new Authorizations();
Scanner scan = null;
String columnKey = appId + "_" + operatorId + "_" + lastWindowColumnName;
lastWindowColumnBytes = columnKey.getBytes();
try {
scan = connector.createScanner(tableName, auths);
} catch (TableNotFoundException e) {
logger.error("error getting committed window id", e);
DTThrowable.rethrow(e);
}
scan.setRange(new Range(new Text(rowBytes)));
scan.fetchColumn(new Text(columnFamilyBytes), new Text(lastWindowColumnBytes));
for (Entry<Key, Value> entry : scan) {
value = entry.getValue().get();
}
if (value != null) {
long longval = toLong(value);
return longval;
}
return -1;
}
Aggregations