Search in sources :

Example 31 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.

the class AccumuloParentMetadataRepository method writeMetadata.

private void writeMetadata(final MergeParentMetadata metadata) throws MergerException {
    BatchWriter writer = null;
    try {
        // Write each result.
        final List<Mutation> mutations = makeWriteMetadataMutations(metadata);
        writer = connector.createBatchWriter(mergeParentMetadataTableName, new BatchWriterConfig());
        writer.addMutations(mutations);
    } catch (final AccumuloException | TableNotFoundException e) {
        throw new MergerException("Unable to set MergeParentMetadata in Accumulo", e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (final MutationsRejectedException e) {
                throw new MergerException("Could not add results to a MergeParentMetadata table because some of the mutations were rejected.", e);
            }
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MergerException(org.apache.rya.export.api.MergerException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 32 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.

the class AccumuloFreeTextIndexer method storeStatement.

private void storeStatement(final Statement statement) throws IOException {
    Objects.requireNonNull(mtbw, "Freetext indexer attempting to store, but setMultiTableBatchWriter() was not set.");
    // if the predicate list is empty, accept all predicates.
    // Otherwise, make sure the predicate is on the "valid" list
    final boolean isValidPredicate = validPredicates.isEmpty() || validPredicates.contains(statement.getPredicate());
    if (isValidPredicate && (statement.getObject() instanceof Literal)) {
        // Get the tokens
        final String text = statement.getObject().stringValue().toLowerCase();
        final SortedSet<String> tokens = tokenizer.tokenize(text);
        if (!tokens.isEmpty()) {
            // Get Document Data
            final String docContent = StatementSerializer.writeStatement(statement);
            final String docId = Md5Hash.md5Base64(docContent);
            // Setup partition
            final Text partition = genPartition(docContent.hashCode(), docTableNumPartitions);
            final Mutation docTableMut = new Mutation(partition);
            final List<Mutation> termTableMutations = new ArrayList<Mutation>();
            final Text docIdText = new Text(docId);
            // Store the Document Data
            docTableMut.put(ColumnPrefixes.DOCS_CF_PREFIX, docIdText, new Value(docContent.getBytes(Charsets.UTF_8)));
            // index the statement parts
            docTableMut.put(ColumnPrefixes.getSubjColFam(statement), docIdText, EMPTY_VALUE);
            docTableMut.put(ColumnPrefixes.getPredColFam(statement), docIdText, EMPTY_VALUE);
            docTableMut.put(ColumnPrefixes.getObjColFam(statement), docIdText, EMPTY_VALUE);
            docTableMut.put(ColumnPrefixes.getContextColFam(statement), docIdText, EMPTY_VALUE);
            // index the statement terms
            for (final String token : tokens) {
                // tie the token to the document
                docTableMut.put(ColumnPrefixes.getTermColFam(token), docIdText, EMPTY_VALUE);
                // store the term in the term table (useful for wildcard searches)
                termTableMutations.add(createEmptyPutMutation(ColumnPrefixes.getTermListColFam(token)));
                termTableMutations.add(createEmptyPutMutation(ColumnPrefixes.getRevTermListColFam(token)));
            }
            // write the mutations
            try {
                docTableBw.addMutation(docTableMut);
                termTableBw.addMutations(termTableMutations);
            } catch (final MutationsRejectedException e) {
                logger.error("error adding mutation", e);
                throw new IOException(e);
            }
        }
    }
}
Also used : Literal(org.openrdf.model.Literal) ArrayList(java.util.ArrayList) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) IOException(java.io.IOException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 33 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.

the class AccumuloTemporalIndexer method storeStatement.

/**
 * Store a statement in the index if it meets the criterion: Object should be
 * a literal and one of the validPredicates from the configuration.
 * If it does not meet the criteria, it is silently ignored.
 * logs a warning if the object is not parse-able.
 * Attempts to parse with calendarValue = literalValue.calendarValue()
 * if that fails, tries: org.joda.time.DateTime.parse() .
 * T O D O parse an interval using multiple predicates for same subject -- ontology dependent.
 */
private void storeStatement(final Statement statement) throws IOException, IllegalArgumentException {
    Objects.requireNonNull(temporalIndexBatchWriter, "This is not initialized for writing.  Must call setMultiTableBatchWriter() and init().");
    // if the predicate list is empty, accept all predicates.
    // Otherwise, make sure the predicate is on the "valid" list
    final boolean isValidPredicate = validPredicates == null || validPredicates.isEmpty() || validPredicates.contains(statement.getPredicate());
    if (!isValidPredicate || !(statement.getObject() instanceof Literal)) {
        return;
    }
    // 0 begin, 1 end of interval
    final DateTime[] indexDateTimes = new DateTime[2];
    extractDateTime(statement, indexDateTimes);
    if (indexDateTimes[0] == null) {
        return;
    }
    if (!this.isInit)
        throw new RuntimeException("Method .init() was not called (or failed) before attempting to store statements.");
    // Add this as an instant, or interval.
    try {
        if (indexDateTimes[1] != null) {
            final TemporalInterval interval = new TemporalInterval(new TemporalInstantRfc3339(indexDateTimes[0]), new TemporalInstantRfc3339(indexDateTimes[1]));
            addInterval(temporalIndexBatchWriter, interval, statement);
        } else {
            final TemporalInstant instant = new TemporalInstantRfc3339(indexDateTimes[0]);
            addInstant(temporalIndexBatchWriter, instant, statement);
        }
    } catch (final MutationsRejectedException e) {
        throw new IOException("While adding interval/instant for statement =" + statement, e);
    }
}
Also used : Literal(org.openrdf.model.Literal) TemporalInstantRfc3339(org.apache.rya.indexing.TemporalInstantRfc3339) IOException(java.io.IOException) TemporalInstant(org.apache.rya.indexing.TemporalInstant) TemporalInterval(org.apache.rya.indexing.TemporalInterval) DateTime(org.joda.time.DateTime) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 34 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.

the class PcjIntegrationTestingUtil method writeResults.

/**
 * Add a collection of results to a specific PCJ table.
 *
 * @param accumuloConn
 *            - A connection to the Accumulo that hosts the PCJ table. (not
 *            null)
 * @param pcjTableName
 *            - The name of the PCJ table that will receive the results.
 *            (not null)
 * @param results
 *            - Binding sets that will be written to the PCJ table. (not
 *            null)
 * @throws PcjException
 *             The provided PCJ table doesn't exist, is missing the PCJ
 *             metadata, or the result could not be written to it.
 */
private static void writeResults(final Connector accumuloConn, final String pcjTableName, final Collection<BindingSet> results) throws PcjException {
    checkNotNull(accumuloConn);
    checkNotNull(pcjTableName);
    checkNotNull(results);
    // Fetch the variable orders from the PCJ table.
    final PcjMetadata metadata = new PcjTables().getPcjMetadata(accumuloConn, pcjTableName);
    // Write each result formatted using each of the variable orders.
    BatchWriter writer = null;
    try {
        writer = accumuloConn.createBatchWriter(pcjTableName, new BatchWriterConfig());
        for (final BindingSet result : results) {
            final Set<Mutation> addResultMutations = makeWriteResultMutations(metadata.getVarOrders(), result);
            writer.addMutations(addResultMutations);
        }
    } catch (TableNotFoundException | MutationsRejectedException e) {
        throw new PcjException("Could not add results to the PCJ table named: " + pcjTableName, e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (final MutationsRejectedException e) {
                throw new PcjException("Could not add results to a PCJ table because some of the mutations were rejected.", e);
            }
        }
    }
}
Also used : VisibilityBindingSet(org.apache.rya.api.model.VisibilityBindingSet) BindingSet(org.openrdf.query.BindingSet) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) PcjException(org.apache.rya.indexing.pcj.storage.PcjException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) PcjMetadata(org.apache.rya.indexing.pcj.storage.PcjMetadata) PcjTables(org.apache.rya.indexing.pcj.storage.accumulo.PcjTables) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 35 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project incubator-rya by apache.

the class PcjTables method updateMockCardinality.

/**
 * Update the cardinality of a PCJ by a {@code delta}.
 *
 * This method updates the PCJ table cardinality using a BatchWriter in the event that
 * the Accumulo Connector is for a MockInstance.  In the event that the cardinality is
 * being updated asynchronously, there are no guarantees that the resulting cardinality
 * will be correct.
 *
 * @param accumuloConn - A connection to a Mock Accumulo Instance that hosts the PCJ table. (not null)
 * @param pcjTableName - The name of the PCJ table that will have its cardinality updated. (not null)
 * @param delta - How much the cardinality will change.
 * @throws PCJStorageException The cardinality could not be updated.
 */
private void updateMockCardinality(final Connector accumuloConn, final String pcjTableName, final long delta) throws PCJStorageException {
    checkNotNull(accumuloConn);
    checkNotNull(pcjTableName);
    BatchWriter batchWriter = null;
    try {
        batchWriter = accumuloConn.createBatchWriter(pcjTableName, new BatchWriterConfig());
        final long cardinality = getPcjMetadata(accumuloConn, pcjTableName).getCardinality();
        final Mutation mutation = new Mutation(PCJ_METADATA_ROW_ID);
        final Value newCardinality = new Value(longLexicoder.encode(cardinality + delta));
        mutation.put(PCJ_METADATA_FAMILY, PCJ_METADATA_CARDINALITY, newCardinality);
        batchWriter.addMutation(mutation);
    } catch (TableNotFoundException | MutationsRejectedException e) {
        throw new PCJStorageException("Could not update the cardinality value of the PCJ Table named: " + pcjTableName, e);
    } finally {
        if (batchWriter != null) {
            try {
                batchWriter.close();
            } catch (final MutationsRejectedException e) {
                throw new PCJStorageException("Could not update the cardinality value of the PCJ Table named: " + pcjTableName, e);
            }
        }
    }
}
Also used : TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ConditionalMutation(org.apache.accumulo.core.data.ConditionalMutation) PCJStorageException(org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Aggregations

MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)68 Mutation (org.apache.accumulo.core.data.Mutation)48 BatchWriter (org.apache.accumulo.core.client.BatchWriter)40 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)23 Value (org.apache.accumulo.core.data.Value)23 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)21 Text (org.apache.hadoop.io.Text)20 Key (org.apache.accumulo.core.data.Key)13 IOException (java.io.IOException)12 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 HashMap (java.util.HashMap)10 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)9 ArrayList (java.util.ArrayList)8 Test (org.junit.Test)8 Entry (java.util.Map.Entry)6 TableExistsException (org.apache.accumulo.core.client.TableExistsException)6 ConditionalMutation (org.apache.accumulo.core.data.ConditionalMutation)6 ConstraintViolationSummary (org.apache.accumulo.core.data.ConstraintViolationSummary)6 PrestoException (com.facebook.presto.spi.PrestoException)5