use of org.apache.accumulo.core.client.MutationsRejectedException in project YCSB by brianfrankcooper.
the class AccumuloClient method cleanup.
@Override
public void cleanup() throws DBException {
try {
Iterator<BatchWriter> iterator = writers.values().iterator();
while (iterator.hasNext()) {
BatchWriter writer = iterator.next();
writer.close();
iterator.remove();
}
} catch (MutationsRejectedException e) {
throw new DBException(e);
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project Gaffer by gchq.
the class AccumuloStore method insertGraphElements.
protected void insertGraphElements(final Iterable<? extends Element> elements) throws StoreException {
// Create BatchWriter
final BatchWriter writer = TableUtils.createBatchWriter(this);
// too high a latency, etc.
if (null != elements) {
for (final Element element : elements) {
final Pair<Key, Key> keys;
try {
keys = keyPackage.getKeyConverter().getKeysFromElement(element);
} catch (final AccumuloElementConversionException e) {
LOGGER.error(FAILED_TO_CREATE_AN_ACCUMULO_FROM_ELEMENT_OF_TYPE_WHEN_TRYING_TO_INSERT_ELEMENTS, "key", element.getGroup());
continue;
}
final Value value;
try {
value = keyPackage.getKeyConverter().getValueFromElement(element);
} catch (final AccumuloElementConversionException e) {
LOGGER.error(FAILED_TO_CREATE_AN_ACCUMULO_FROM_ELEMENT_OF_TYPE_WHEN_TRYING_TO_INSERT_ELEMENTS, "value", element.getGroup());
continue;
}
final Mutation m = new Mutation(keys.getFirst().getRow());
m.put(keys.getFirst().getColumnFamily(), keys.getFirst().getColumnQualifier(), new ColumnVisibility(keys.getFirst().getColumnVisibility()), keys.getFirst().getTimestamp(), value);
try {
writer.addMutation(m);
} catch (final MutationsRejectedException e) {
LOGGER.error("Failed to create an accumulo key mutation");
continue;
}
// If the GraphElement is an Edge then there will be 2 keys.
if (null != keys.getSecond()) {
final Mutation m2 = new Mutation(keys.getSecond().getRow());
m2.put(keys.getSecond().getColumnFamily(), keys.getSecond().getColumnQualifier(), new ColumnVisibility(keys.getSecond().getColumnVisibility()), keys.getSecond().getTimestamp(), value);
try {
writer.addMutation(m2);
} catch (final MutationsRejectedException e) {
LOGGER.error("Failed to create an accumulo key mutation");
}
}
}
} else {
throw new GafferRuntimeException("Could not find any elements to add to graph.", Status.BAD_REQUEST);
}
try {
writer.close();
} catch (final MutationsRejectedException e) {
LOGGER.warn("Accumulo batch writer failed to close", e);
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.
the class AccumuloParentMetadataRepository method writeMetadata.
private void writeMetadata(final MergeParentMetadata metadata) throws MergerException {
BatchWriter writer = null;
try {
// Write each result.
final List<Mutation> mutations = makeWriteMetadataMutations(metadata);
writer = connector.createBatchWriter(mergeParentMetadataTableName, new BatchWriterConfig());
writer.addMutations(mutations);
} catch (final AccumuloException | TableNotFoundException e) {
throw new MergerException("Unable to set MergeParentMetadata in Accumulo", e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (final MutationsRejectedException e) {
throw new MergerException("Could not add results to a MergeParentMetadata table because some of the mutations were rejected.", e);
}
}
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.
the class AccumuloFreeTextIndexer method storeStatement.
private void storeStatement(final Statement statement) throws IOException {
Objects.requireNonNull(mtbw, "Freetext indexer attempting to store, but setMultiTableBatchWriter() was not set.");
// if the predicate list is empty, accept all predicates.
// Otherwise, make sure the predicate is on the "valid" list
final boolean isValidPredicate = validPredicates.isEmpty() || validPredicates.contains(statement.getPredicate());
if (isValidPredicate && (statement.getObject() instanceof Literal)) {
// Get the tokens
final String text = statement.getObject().stringValue().toLowerCase();
final SortedSet<String> tokens = tokenizer.tokenize(text);
if (!tokens.isEmpty()) {
// Get Document Data
final String docContent = StatementSerializer.writeStatement(statement);
final String docId = Md5Hash.md5Base64(docContent);
// Setup partition
final Text partition = genPartition(docContent.hashCode(), docTableNumPartitions);
final Mutation docTableMut = new Mutation(partition);
final List<Mutation> termTableMutations = new ArrayList<Mutation>();
final Text docIdText = new Text(docId);
// Store the Document Data
docTableMut.put(ColumnPrefixes.DOCS_CF_PREFIX, docIdText, new Value(docContent.getBytes(Charsets.UTF_8)));
// index the statement parts
docTableMut.put(ColumnPrefixes.getSubjColFam(statement), docIdText, EMPTY_VALUE);
docTableMut.put(ColumnPrefixes.getPredColFam(statement), docIdText, EMPTY_VALUE);
docTableMut.put(ColumnPrefixes.getObjColFam(statement), docIdText, EMPTY_VALUE);
docTableMut.put(ColumnPrefixes.getContextColFam(statement), docIdText, EMPTY_VALUE);
// index the statement terms
for (final String token : tokens) {
// tie the token to the document
docTableMut.put(ColumnPrefixes.getTermColFam(token), docIdText, EMPTY_VALUE);
// store the term in the term table (useful for wildcard searches)
termTableMutations.add(createEmptyPutMutation(ColumnPrefixes.getTermListColFam(token)));
termTableMutations.add(createEmptyPutMutation(ColumnPrefixes.getRevTermListColFam(token)));
}
// write the mutations
try {
docTableBw.addMutation(docTableMut);
termTableBw.addMutations(termTableMutations);
} catch (final MutationsRejectedException e) {
logger.error("error adding mutation", e);
throw new IOException(e);
}
}
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.
the class AccumuloTemporalIndexer method storeStatement.
/**
* Store a statement in the index if it meets the criterion: Object should be
* a literal and one of the validPredicates from the configuration.
* If it does not meet the criteria, it is silently ignored.
* logs a warning if the object is not parse-able.
* Attempts to parse with calendarValue = literalValue.calendarValue()
* if that fails, tries: org.joda.time.DateTime.parse() .
* T O D O parse an interval using multiple predicates for same subject -- ontology dependent.
*/
private void storeStatement(final Statement statement) throws IOException, IllegalArgumentException {
Objects.requireNonNull(temporalIndexBatchWriter, "This is not initialized for writing. Must call setMultiTableBatchWriter() and init().");
// if the predicate list is empty, accept all predicates.
// Otherwise, make sure the predicate is on the "valid" list
final boolean isValidPredicate = validPredicates == null || validPredicates.isEmpty() || validPredicates.contains(statement.getPredicate());
if (!isValidPredicate || !(statement.getObject() instanceof Literal)) {
return;
}
// 0 begin, 1 end of interval
final DateTime[] indexDateTimes = new DateTime[2];
extractDateTime(statement, indexDateTimes);
if (indexDateTimes[0] == null) {
return;
}
if (!this.isInit)
throw new RuntimeException("Method .init() was not called (or failed) before attempting to store statements.");
// Add this as an instant, or interval.
try {
if (indexDateTimes[1] != null) {
final TemporalInterval interval = new TemporalInterval(new TemporalInstantRfc3339(indexDateTimes[0]), new TemporalInstantRfc3339(indexDateTimes[1]));
addInterval(temporalIndexBatchWriter, interval, statement);
} else {
final TemporalInstant instant = new TemporalInstantRfc3339(indexDateTimes[0]);
addInstant(temporalIndexBatchWriter, instant, statement);
}
} catch (final MutationsRejectedException e) {
throw new IOException("While adding interval/instant for statement =" + statement, e);
}
}
Aggregations