Search in sources :

Example 21 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.

the class StatusMaker method deleteStatusRecord.

/**
 * Because there is only one active Master, and thus one active StatusMaker, the only safe time that we can issue the delete for a Status which is closed is
 * immediately after writing it to the replication table.
 * <p>
 * If we try to defer and delete these entries in another thread/process, we will have no assurance that the Status message was propagated to the replication
 * table. It is easiest, in terms of concurrency, to do this all in one step.
 *
 * @param k
 *          The Key to delete
 */
protected void deleteStatusRecord(Key k) {
    log.debug("Deleting {} from metadata table as it's no longer needed", k.toStringNoTruncate());
    if (null == metadataWriter) {
        try {
            metadataWriter = conn.createBatchWriter(sourceTableName, new BatchWriterConfig());
        } catch (TableNotFoundException e) {
            throw new RuntimeException("Metadata table doesn't exist");
        }
    }
    try {
        Mutation m = new Mutation(k.getRow());
        m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
        metadataWriter.addMutation(m);
        metadataWriter.flush();
    } catch (MutationsRejectedException e) {
        log.warn("Failed to delete status mutations for metadata table, will retry", e);
    }
}
Also used : TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Mutation(org.apache.accumulo.core.data.Mutation) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 22 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.

the class StatusMaker method addOrderRecord.

/**
 * Create a record to track when the file was closed to ensure that replication preference is given to files that have been closed the longest and allow the
 * work assigner to try to replicate in order that data was ingested (avoid replay in different order)
 *
 * @param file
 *          File being replicated
 * @param tableId
 *          Table ID the file was used by
 * @param stat
 *          Status msg
 * @param value
 *          Serialized version of the Status msg
 */
protected boolean addOrderRecord(Text file, Table.ID tableId, Status stat, Value value) {
    try {
        if (!stat.hasCreatedTime()) {
            try {
                // If the createdTime is not set, work around the issue by retrieving the WAL creation time
                // from HDFS (or the current time if the WAL does not exist). See ACCUMULO-4751
                long createdTime = setAndGetCreatedTime(new Path(file.toString()), tableId.toString());
                stat = Status.newBuilder(stat).setCreatedTime(createdTime).build();
                value = ProtobufUtil.toValue(stat);
                log.debug("Status was lacking createdTime, set to {} for {}", createdTime, file);
            } catch (IOException e) {
                log.warn("Failed to get file status, will retry", e);
                return false;
            } catch (MutationsRejectedException e) {
                log.warn("Failed to write status mutation for replication, will retry", e);
                return false;
            }
        }
        log.info("Creating order record for {} for {} with {}", file, tableId, ProtobufUtil.toString(stat));
        Mutation m = OrderSection.createMutation(file.toString(), stat.getCreatedTime());
        OrderSection.add(m, tableId, value);
        try {
            replicationWriter.addMutation(m);
        } catch (MutationsRejectedException e) {
            log.warn("Failed to write order mutation for replication, will retry", e);
            return false;
        }
    } finally {
        try {
            replicationWriter.flush();
        } catch (MutationsRejectedException e) {
            log.warn("Failed to write order mutation for replication, will retry", e);
            return false;
        }
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) Mutation(org.apache.accumulo.core.data.Mutation) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 23 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.

the class WorkMaker method addWorkRecord.

protected void addWorkRecord(Text file, Value v, Map<String, String> targets, Table.ID sourceTableId) {
    log.info("Adding work records for {} to targets {}", file, targets);
    try {
        Mutation m = new Mutation(file);
        ReplicationTarget target = new ReplicationTarget();
        DataOutputBuffer buffer = new DataOutputBuffer();
        Text t = new Text();
        for (Entry<String, String> entry : targets.entrySet()) {
            buffer.reset();
            // Set up the writable
            target.setPeerName(entry.getKey());
            target.setRemoteIdentifier(entry.getValue());
            target.setSourceTableId(sourceTableId);
            target.write(buffer);
            // Throw it in a text for the mutation
            t.set(buffer.getData(), 0, buffer.getLength());
            // Add it to the work section
            WorkSection.add(m, t, v);
        }
        try {
            writer.addMutation(m);
        } catch (MutationsRejectedException e) {
            log.warn("Failed to write work mutations for replication, will retry", e);
        }
    } catch (IOException e) {
        log.warn("Failed to serialize data to Text, will retry", e);
    } finally {
        try {
            writer.flush();
        } catch (MutationsRejectedException e) {
            log.warn("Failed to write work mutations for replication, will retry", e);
        }
    }
}
Also used : ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) IOException(java.io.IOException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 24 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.

the class NamespacesIT method verifyConstraintInheritance.

@Test
public void verifyConstraintInheritance() throws Exception {
    String t1 = namespace + ".1";
    c.namespaceOperations().create(namespace);
    c.tableOperations().create(t1, new NewTableConfiguration().withoutDefaultIterators());
    String constraintClassName = NumericValueConstraint.class.getName();
    assertFalse(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
    assertFalse(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
    c.namespaceOperations().addConstraint(namespace, constraintClassName);
    boolean passed = false;
    for (int i = 0; i < 5; i++) {
        if (!c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName)) {
            Thread.sleep(500);
            continue;
        }
        if (!c.tableOperations().listConstraints(t1).containsKey(constraintClassName)) {
            Thread.sleep(500);
            continue;
        }
        passed = true;
        break;
    }
    assertTrue("Failed to observe newly-added constraint", passed);
    passed = false;
    Integer namespaceNum = null;
    for (int i = 0; i < 5; i++) {
        namespaceNum = c.namespaceOperations().listConstraints(namespace).get(constraintClassName);
        if (null == namespaceNum) {
            Thread.sleep(500);
            continue;
        }
        Integer tableNum = c.tableOperations().listConstraints(t1).get(constraintClassName);
        if (null == tableNum) {
            Thread.sleep(500);
            continue;
        }
        assertEquals(namespaceNum, tableNum);
        passed = true;
    }
    assertTrue("Failed to observe constraint in both table and namespace", passed);
    Mutation m1 = new Mutation("r1");
    Mutation m2 = new Mutation("r2");
    Mutation m3 = new Mutation("r3");
    m1.put("a", "b", new Value("abcde".getBytes(UTF_8)));
    m2.put("e", "f", new Value("123".getBytes(UTF_8)));
    m3.put("c", "d", new Value("zyxwv".getBytes(UTF_8)));
    passed = false;
    for (int i = 0; i < 5; i++) {
        BatchWriter bw = c.createBatchWriter(t1, new BatchWriterConfig());
        bw.addMutations(Arrays.asList(m1, m2, m3));
        try {
            bw.close();
            Thread.sleep(500);
        } catch (MutationsRejectedException e) {
            passed = true;
            assertEquals(1, e.getConstraintViolationSummaries().size());
            assertEquals(2, e.getConstraintViolationSummaries().get(0).getNumberOfViolatingMutations());
            break;
        }
    }
    assertTrue("Failed to see mutations rejected after constraint was added", passed);
    assertNotNull("Namespace constraint ID should not be null", namespaceNum);
    c.namespaceOperations().removeConstraint(namespace, namespaceNum);
    passed = false;
    for (int i = 0; i < 5; i++) {
        if (c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName)) {
            Thread.sleep(500);
            continue;
        }
        if (c.tableOperations().listConstraints(t1).containsKey(constraintClassName)) {
            Thread.sleep(500);
            continue;
        }
        passed = true;
    }
    assertTrue("Failed to verify that constraint was removed from namespace and table", passed);
    passed = false;
    for (int i = 0; i < 5; i++) {
        BatchWriter bw = c.createBatchWriter(t1, new BatchWriterConfig());
        try {
            bw.addMutations(Arrays.asList(m1, m2, m3));
            bw.close();
        } catch (MutationsRejectedException e) {
            Thread.sleep(500);
            continue;
        }
        passed = true;
    }
    assertTrue("Failed to add mutations that should be allowed", passed);
}
Also used : NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) NumericValueConstraint(org.apache.accumulo.test.constraints.NumericValueConstraint) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) Test(org.junit.Test)

Example 25 with MutationsRejectedException

use of org.apache.accumulo.core.client.MutationsRejectedException in project presto by prestodb.

the class Indexer method flush.

/**
 * Flushes all Mutations in the index writer. And all metric mutations to the metrics table.
 * Note that the metrics table is not updated until this method is explicitly called (or implicitly via close).
 */
public void flush() {
    try {
        // Flush index writer
        indexWriter.flush();
        // Write out metrics mutations
        BatchWriter metricsWriter = connector.createBatchWriter(table.getMetricsTableName(), writerConfig);
        metricsWriter.addMutations(getMetricsMutations());
        metricsWriter.close();
        // Re-initialize the metrics
        metrics.clear();
        metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0));
    } catch (MutationsRejectedException e) {
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation was rejected by server on flush", e);
    } catch (TableNotFoundException e) {
        throw new PrestoException(ACCUMULO_TABLE_DNE, "Accumulo table does not exist", e);
    }
}
Also used : TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AtomicLong(java.util.concurrent.atomic.AtomicLong) PrestoException(com.facebook.presto.spi.PrestoException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Aggregations

MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)68 Mutation (org.apache.accumulo.core.data.Mutation)48 BatchWriter (org.apache.accumulo.core.client.BatchWriter)40 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)23 Value (org.apache.accumulo.core.data.Value)23 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)21 Text (org.apache.hadoop.io.Text)20 Key (org.apache.accumulo.core.data.Key)13 IOException (java.io.IOException)12 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 HashMap (java.util.HashMap)10 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)9 ArrayList (java.util.ArrayList)8 Test (org.junit.Test)8 Entry (java.util.Map.Entry)6 TableExistsException (org.apache.accumulo.core.client.TableExistsException)6 ConditionalMutation (org.apache.accumulo.core.data.ConditionalMutation)6 ConstraintViolationSummary (org.apache.accumulo.core.data.ConstraintViolationSummary)6 PrestoException (com.facebook.presto.spi.PrestoException)5