Search in sources :

Example 96 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class MetadataTableUtil method deleteTable.

public static void deleteTable(TableId tableId, boolean insertDeletes, ServerContext context, ServiceLock lock) throws AccumuloException {
    try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
        BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
        // scan metadata for our table and delete everything we find
        Mutation m = null;
        Ample ample = context.getAmple();
        ms.setRange(new KeyExtent(tableId, null, null).toMetaRange());
        // insert deletes before deleting data from metadata... this makes the code fault tolerant
        if (insertDeletes) {
            ms.fetchColumnFamily(DataFileColumnFamily.NAME);
            ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
            for (Entry<Key, Value> cell : ms) {
                Key key = cell.getKey();
                if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    String ref = TabletFileUtil.validate(key.getColumnQualifierData().toString());
                    bw.addMutation(ample.createDeleteMutation(ref));
                }
                if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                    String uri = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, cell.getValue().toString());
                    bw.addMutation(ample.createDeleteMutation(uri));
                }
            }
            bw.flush();
            ms.clearColumns();
        }
        for (Entry<Key, Value> cell : ms) {
            Key key = cell.getKey();
            if (m == null) {
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(context, lock, m);
            }
            if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
                bw.addMutation(m);
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(context, lock, m);
            }
            m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
        }
        if (m != null)
            bw.addMutation(m);
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.clientImpl.ScannerImpl) BatchWriterImpl(org.apache.accumulo.core.clientImpl.BatchWriterImpl) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Key(org.apache.accumulo.core.data.Key)

Example 97 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.

the class AccumuloParentMetadataRepository method writeMetadata.

private void writeMetadata(final MergeParentMetadata metadata) throws MergerException {
    BatchWriter writer = null;
    try {
        // Write each result.
        final List<Mutation> mutations = makeWriteMetadataMutations(metadata);
        writer = connector.createBatchWriter(mergeParentMetadataTableName, new BatchWriterConfig());
        writer.addMutations(mutations);
    } catch (final AccumuloException | TableNotFoundException e) {
        throw new MergerException("Unable to set MergeParentMetadata in Accumulo", e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (final MutationsRejectedException e) {
                throw new MergerException("Could not add results to a MergeParentMetadata table because some of the mutations were rejected.", e);
            }
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MergerException(org.apache.rya.export.api.MergerException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 98 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.

the class Upgrade322ToolTest method setUp.

@Override
public void setUp() throws Exception {
    super.setUp();
    final String spoTable = tablePrefix + RdfCloudTripleStoreConstants.TBL_SPO_SUFFIX;
    final String poTable = tablePrefix + RdfCloudTripleStoreConstants.TBL_PO_SUFFIX;
    final String ospTable = tablePrefix + RdfCloudTripleStoreConstants.TBL_OSP_SUFFIX;
    connector = new MockInstance(instance).getConnector(user, pwd.getBytes());
    connector.tableOperations().create(spoTable);
    connector.tableOperations().create(poTable);
    connector.tableOperations().create(ospTable);
    connector.tableOperations().create(tablePrefix + RdfCloudTripleStoreConstants.TBL_NS_SUFFIX);
    connector.tableOperations().create(tablePrefix + RdfCloudTripleStoreConstants.TBL_EVAL_SUFFIX);
    SecurityOperations secOps = connector.securityOperations();
    secOps.createUser(user, pwd.getBytes(), auths);
    secOps.grantTablePermission(user, spoTable, TablePermission.READ);
    secOps.grantTablePermission(user, poTable, TablePermission.READ);
    secOps.grantTablePermission(user, ospTable, TablePermission.READ);
    secOps.grantTablePermission(user, tablePrefix + RdfCloudTripleStoreConstants.TBL_NS_SUFFIX, TablePermission.READ);
    secOps.grantTablePermission(user, tablePrefix + RdfCloudTripleStoreConstants.TBL_EVAL_SUFFIX, TablePermission.READ);
    secOps.grantTablePermission(user, tablePrefix + RdfCloudTripleStoreConstants.TBL_EVAL_SUFFIX, TablePermission.WRITE);
    // load data
    final BatchWriter ospWriter = connector.createBatchWriter(ospTable, new BatchWriterConfig());
    ospWriter.addMutation(getMutation("00000000000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010/tracked-data-provenance/ns#longLit\u0001\u0004"));
    ospWriter.addMutation(getMutation("00000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#intLit\u0001\u0005"));
    ospWriter.addMutation(getMutation("00000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#byteLit\u0001\t"));
    ospWriter.addMutation(getMutation("00001 1.0\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#doubleLit\u0001\u0006"));
    ospWriter.addMutation(getMutation("10\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0000http" + "://here/2010/tracked-data-provenance/ns#shortLit\u0001http://www.w3" + ".org/2001/XMLSchema#short\u0001\b"));
    ospWriter.addMutation(getMutation("10.0\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#floatLit\u0001http" + "://www.w3.org/2001/XMLSchema#float\u0001\b"));
    ospWriter.addMutation(getMutation("3.0.0\u0000urn:org.apache.rya/2012/05#rts\u0000urn:org.apache" + ".rya/2012/05#version\u0001\u0003"));
    ospWriter.addMutation(getMutation("9223370726404375807\u0000http://here/2010/tracked-data-provenance/ns" + "#uuid10\u0000http://here/2010/tracked-data-provenance/ns#dateLit" + "\u0001\u0007"));
    ospWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#Created\u0000http://here" + "/2010/tracked-data-provenance/ns#uuid10\u0000http://www.w3" + ".org/1999/02/22-rdf-syntax-ns#type\u0001\u0002"));
    ospWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#objectuuid1\u0000http" + "://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010" + "/tracked-data-provenance/ns#uriLit\u0001\u0002"));
    ospWriter.addMutation(getMutation("stringLit\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#stringLit\u0001" + "\u0003"));
    ospWriter.addMutation(getMutation("true\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#booleanLit\u0001\n"));
    ospWriter.flush();
    ospWriter.close();
    final BatchWriter spoWriter = connector.createBatchWriter(spoTable, new BatchWriterConfig());
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010/tracked-data-provenance/ns#longLit\u000000000000000000000010\u0001\u0004"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#intLit\u000000000000010\u0001\u0005"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#byteLit\u000000000010\u0001\t"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#doubleLit\u000000001 1.0\u0001\u0006"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10\u0000http" + "://here/2010/tracked-data-provenance/ns#shortLit\u000010\u0001http://www.w3" + ".org/2001/XMLSchema#short\u0001\b"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#floatLit\u0001http" + "://www.w3.org/2001/XMLSchema#float\u000010.0\u0001\b"));
    spoWriter.addMutation(getMutation("urn:org.apache.rya/2012/05#rts\u0000urn:org.apache" + ".rya/2012/05#version\u00003.0.0\u0001\u0003"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns" + "#uuid10\u0000http://here/2010/tracked-data-provenance/ns#dateLit" + "\u00009223370726404375807\u0001\u0007"));
    spoWriter.addMutation(getMutation("http://here" + "/2010/tracked-data-provenance/ns#uuid10\u0000http://www.w3" + ".org/1999/02/22-rdf-syntax-ns#type\u0000http://here/2010/tracked-data-provenance/ns#Created\u0001\u0002"));
    spoWriter.addMutation(getMutation("http" + "://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010" + "/tracked-data-provenance/ns#uriLit\u0000http://here/2010/tracked-data-provenance/ns#objectuuid1\u0001\u0002"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#stringLit\u0000stringLit\u0001" + "\u0003"));
    spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#booleanLit\u0000true\u0001\n"));
    spoWriter.flush();
    spoWriter.close();
    final BatchWriter poWriter = connector.createBatchWriter(poTable, new BatchWriterConfig());
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#longLit\u000000000000000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0004"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#intLit\u000000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0005"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#byteLit\u000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\t"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#doubleLit\u000000001 1.0\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0006"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#shortLit\u000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001http://www.w3" + ".org/2001/XMLSchema#short\u0001\b"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#floatLit\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001http" + "://www.w3.org/2001/XMLSchema#float\u000010.0\u0001\b"));
    poWriter.addMutation(getMutation("urn:org.apache" + ".rya/2012/05#version\u00003.0.0\u0000urn:org.apache.rya/2012/05#rts\u0001\u0003"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#dateLit" + "\u00009223370726404375807\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0007"));
    poWriter.addMutation(getMutation("http://www.w3" + ".org/1999/02/22-rdf-syntax-ns#type\u0000http://here/2010/tracked-data-provenance/ns#Created\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0002"));
    poWriter.addMutation(getMutation("http://here/2010" + "/tracked-data-provenance/ns#uriLit\u0000http://here/2010/tracked-data-provenance/ns#objectuuid1\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0002"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#stringLit\u0000stringLit\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001" + "\u0003"));
    poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#booleanLit\u0000true\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\n"));
    poWriter.flush();
    poWriter.close();
}
Also used : MockInstance(org.apache.accumulo.core.client.mock.MockInstance) SecurityOperations(org.apache.accumulo.core.client.admin.SecurityOperations) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter)

Example 99 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.

the class RyaOutputFormatTest method testTemporalIndexing.

@Test
public void testTemporalIndexing() throws Exception {
    final TemporalInstant[] instants = { new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 01), new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 02), new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 03), new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 03) };
    final Statement[] statements = new Statement[instants.length];
    RyaOutputFormat.setCoreTablesEnabled(job, false);
    RyaOutputFormat.setFreeTextEnabled(job, false);
    RyaOutputFormat.setTemporalEnabled(job, true);
    RyaOutputFormat.setEntityEnabled(job, false);
    final ValueFactory vf = new ValueFactoryImpl();
    for (int i = 0; i < instants.length; i++) {
        final RyaType time = RdfToRyaConversions.convertLiteral(vf.createLiteral(instants[i].toString()));
        final RyaStatement input = RyaStatement.builder().setSubject(new RyaURI(GRAPH + ":s")).setPredicate(new RyaURI(GRAPH + ":p")).setObject(time).build();
        write(input);
        statements[i] = RyaToRdfConversions.convertStatement(input);
    }
    final AccumuloTemporalIndexer temporal = new AccumuloTemporalIndexer();
    temporal.setConf(conf);
    Connector connector = ConfigUtils.getConnector(conf);
    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
    temporal.setConnector(connector);
    temporal.setMultiTableBatchWriter(mtbw);
    temporal.init();
    final Set<Statement> empty = new HashSet<>();
    final Set<Statement> head = new HashSet<>();
    final Set<Statement> tail = new HashSet<>();
    head.add(statements[0]);
    tail.add(statements[2]);
    tail.add(statements[3]);
    Assert.assertEquals(empty, getSet(temporal.queryInstantBeforeInstant(instants[0], new StatementConstraints())));
    Assert.assertEquals(empty, getSet(temporal.queryInstantAfterInstant(instants[3], new StatementConstraints())));
    Assert.assertEquals(head, getSet(temporal.queryInstantBeforeInstant(instants[1], new StatementConstraints())));
    Assert.assertEquals(tail, getSet(temporal.queryInstantAfterInstant(instants[1], new StatementConstraints())));
    temporal.close();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) MultiTableBatchWriter(org.apache.accumulo.core.client.MultiTableBatchWriter) Statement(org.openrdf.model.Statement) RyaStatement(org.apache.rya.api.domain.RyaStatement) TemporalInstantRfc3339(org.apache.rya.indexing.TemporalInstantRfc3339) ValueFactoryImpl(org.openrdf.model.impl.ValueFactoryImpl) RyaStatement(org.apache.rya.api.domain.RyaStatement) ValueFactory(org.openrdf.model.ValueFactory) TemporalInstant(org.apache.rya.indexing.TemporalInstant) RyaType(org.apache.rya.api.domain.RyaType) RyaURI(org.apache.rya.api.domain.RyaURI) StatementConstraints(org.apache.rya.indexing.StatementConstraints) AccumuloTemporalIndexer(org.apache.rya.indexing.accumulo.temporal.AccumuloTemporalIndexer) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 100 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.

the class RyaOutputFormat method getTemporalIndexer.

private static TemporalIndexer getTemporalIndexer(final Configuration conf) throws IOException {
    if (!conf.getBoolean(ENABLE_TEMPORAL, true)) {
        return null;
    }
    final AccumuloTemporalIndexer temporal = new AccumuloTemporalIndexer();
    temporal.setConf(conf);
    Connector connector;
    try {
        connector = ConfigUtils.getConnector(conf);
    } catch (AccumuloException | AccumuloSecurityException e) {
        throw new IOException("Error when attempting to create a connection for writing the temporal index.", e);
    }
    final MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
    temporal.setConnector(connector);
    temporal.setMultiTableBatchWriter(mtbw);
    temporal.init();
    return temporal;
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AccumuloException(org.apache.accumulo.core.client.AccumuloException) MultiTableBatchWriter(org.apache.accumulo.core.client.MultiTableBatchWriter) AccumuloTemporalIndexer(org.apache.rya.indexing.accumulo.temporal.AccumuloTemporalIndexer) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)182 BatchWriter (org.apache.accumulo.core.client.BatchWriter)135 Mutation (org.apache.accumulo.core.data.Mutation)131 Value (org.apache.accumulo.core.data.Value)88 Text (org.apache.hadoop.io.Text)60 Key (org.apache.accumulo.core.data.Key)59 Test (org.junit.Test)58 Scanner (org.apache.accumulo.core.client.Scanner)57 Connector (org.apache.accumulo.core.client.Connector)38 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)33 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)26 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)24 Authorizations (org.apache.accumulo.core.security.Authorizations)22 Range (org.apache.accumulo.core.data.Range)20 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)19 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)19 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)19 Entry (java.util.Map.Entry)18 IOException (java.io.IOException)14