use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class MetadataTableUtil method deleteTable.
public static void deleteTable(TableId tableId, boolean insertDeletes, ServerContext context, ServiceLock lock) throws AccumuloException {
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
// scan metadata for our table and delete everything we find
Mutation m = null;
Ample ample = context.getAmple();
ms.setRange(new KeyExtent(tableId, null, null).toMetaRange());
// insert deletes before deleting data from metadata... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(DataFileColumnFamily.NAME);
ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String ref = TabletFileUtil.validate(key.getColumnQualifierData().toString());
bw.addMutation(ample.createDeleteMutation(ref));
}
if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
String uri = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, cell.getValue().toString());
bw.addMutation(ample.createDeleteMutation(uri));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (m == null) {
m = new Mutation(key.getRow());
if (lock != null)
putLockID(context, lock, m);
}
if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
bw.addMutation(m);
m = new Mutation(key.getRow());
if (lock != null)
putLockID(context, lock, m);
}
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
}
if (m != null)
bw.addMutation(m);
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.
the class AccumuloParentMetadataRepository method writeMetadata.
private void writeMetadata(final MergeParentMetadata metadata) throws MergerException {
BatchWriter writer = null;
try {
// Write each result.
final List<Mutation> mutations = makeWriteMetadataMutations(metadata);
writer = connector.createBatchWriter(mergeParentMetadataTableName, new BatchWriterConfig());
writer.addMutations(mutations);
} catch (final AccumuloException | TableNotFoundException e) {
throw new MergerException("Unable to set MergeParentMetadata in Accumulo", e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (final MutationsRejectedException e) {
throw new MergerException("Could not add results to a MergeParentMetadata table because some of the mutations were rejected.", e);
}
}
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.
the class Upgrade322ToolTest method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
final String spoTable = tablePrefix + RdfCloudTripleStoreConstants.TBL_SPO_SUFFIX;
final String poTable = tablePrefix + RdfCloudTripleStoreConstants.TBL_PO_SUFFIX;
final String ospTable = tablePrefix + RdfCloudTripleStoreConstants.TBL_OSP_SUFFIX;
connector = new MockInstance(instance).getConnector(user, pwd.getBytes());
connector.tableOperations().create(spoTable);
connector.tableOperations().create(poTable);
connector.tableOperations().create(ospTable);
connector.tableOperations().create(tablePrefix + RdfCloudTripleStoreConstants.TBL_NS_SUFFIX);
connector.tableOperations().create(tablePrefix + RdfCloudTripleStoreConstants.TBL_EVAL_SUFFIX);
SecurityOperations secOps = connector.securityOperations();
secOps.createUser(user, pwd.getBytes(), auths);
secOps.grantTablePermission(user, spoTable, TablePermission.READ);
secOps.grantTablePermission(user, poTable, TablePermission.READ);
secOps.grantTablePermission(user, ospTable, TablePermission.READ);
secOps.grantTablePermission(user, tablePrefix + RdfCloudTripleStoreConstants.TBL_NS_SUFFIX, TablePermission.READ);
secOps.grantTablePermission(user, tablePrefix + RdfCloudTripleStoreConstants.TBL_EVAL_SUFFIX, TablePermission.READ);
secOps.grantTablePermission(user, tablePrefix + RdfCloudTripleStoreConstants.TBL_EVAL_SUFFIX, TablePermission.WRITE);
// load data
final BatchWriter ospWriter = connector.createBatchWriter(ospTable, new BatchWriterConfig());
ospWriter.addMutation(getMutation("00000000000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010/tracked-data-provenance/ns#longLit\u0001\u0004"));
ospWriter.addMutation(getMutation("00000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#intLit\u0001\u0005"));
ospWriter.addMutation(getMutation("00000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#byteLit\u0001\t"));
ospWriter.addMutation(getMutation("00001 1.0\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#doubleLit\u0001\u0006"));
ospWriter.addMutation(getMutation("10\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0000http" + "://here/2010/tracked-data-provenance/ns#shortLit\u0001http://www.w3" + ".org/2001/XMLSchema#short\u0001\b"));
ospWriter.addMutation(getMutation("10.0\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#floatLit\u0001http" + "://www.w3.org/2001/XMLSchema#float\u0001\b"));
ospWriter.addMutation(getMutation("3.0.0\u0000urn:org.apache.rya/2012/05#rts\u0000urn:org.apache" + ".rya/2012/05#version\u0001\u0003"));
ospWriter.addMutation(getMutation("9223370726404375807\u0000http://here/2010/tracked-data-provenance/ns" + "#uuid10\u0000http://here/2010/tracked-data-provenance/ns#dateLit" + "\u0001\u0007"));
ospWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#Created\u0000http://here" + "/2010/tracked-data-provenance/ns#uuid10\u0000http://www.w3" + ".org/1999/02/22-rdf-syntax-ns#type\u0001\u0002"));
ospWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#objectuuid1\u0000http" + "://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010" + "/tracked-data-provenance/ns#uriLit\u0001\u0002"));
ospWriter.addMutation(getMutation("stringLit\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#stringLit\u0001" + "\u0003"));
ospWriter.addMutation(getMutation("true\u0000http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#booleanLit\u0001\n"));
ospWriter.flush();
ospWriter.close();
final BatchWriter spoWriter = connector.createBatchWriter(spoTable, new BatchWriterConfig());
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010/tracked-data-provenance/ns#longLit\u000000000000000000000010\u0001\u0004"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#intLit\u000000000000010\u0001\u0005"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#byteLit\u000000000010\u0001\t"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#doubleLit\u000000001 1.0\u0001\u0006"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10\u0000http" + "://here/2010/tracked-data-provenance/ns#shortLit\u000010\u0001http://www.w3" + ".org/2001/XMLSchema#short\u0001\b"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#floatLit\u0001http" + "://www.w3.org/2001/XMLSchema#float\u000010.0\u0001\b"));
spoWriter.addMutation(getMutation("urn:org.apache.rya/2012/05#rts\u0000urn:org.apache" + ".rya/2012/05#version\u00003.0.0\u0001\u0003"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns" + "#uuid10\u0000http://here/2010/tracked-data-provenance/ns#dateLit" + "\u00009223370726404375807\u0001\u0007"));
spoWriter.addMutation(getMutation("http://here" + "/2010/tracked-data-provenance/ns#uuid10\u0000http://www.w3" + ".org/1999/02/22-rdf-syntax-ns#type\u0000http://here/2010/tracked-data-provenance/ns#Created\u0001\u0002"));
spoWriter.addMutation(getMutation("http" + "://here/2010/tracked-data-provenance/ns#uuid10\u0000http://here/2010" + "/tracked-data-provenance/ns#uriLit\u0000http://here/2010/tracked-data-provenance/ns#objectuuid1\u0001\u0002"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#stringLit\u0000stringLit\u0001" + "\u0003"));
spoWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#uuid10" + "\u0000http://here/2010/tracked-data-provenance/ns#booleanLit\u0000true\u0001\n"));
spoWriter.flush();
spoWriter.close();
final BatchWriter poWriter = connector.createBatchWriter(poTable, new BatchWriterConfig());
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#longLit\u000000000000000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0004"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#intLit\u000000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0005"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#byteLit\u000000000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\t"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#doubleLit\u000000001 1.0\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0006"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#shortLit\u000010\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001http://www.w3" + ".org/2001/XMLSchema#short\u0001\b"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#floatLit\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001http" + "://www.w3.org/2001/XMLSchema#float\u000010.0\u0001\b"));
poWriter.addMutation(getMutation("urn:org.apache" + ".rya/2012/05#version\u00003.0.0\u0000urn:org.apache.rya/2012/05#rts\u0001\u0003"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#dateLit" + "\u00009223370726404375807\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0007"));
poWriter.addMutation(getMutation("http://www.w3" + ".org/1999/02/22-rdf-syntax-ns#type\u0000http://here/2010/tracked-data-provenance/ns#Created\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0002"));
poWriter.addMutation(getMutation("http://here/2010" + "/tracked-data-provenance/ns#uriLit\u0000http://here/2010/tracked-data-provenance/ns#objectuuid1\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\u0002"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#stringLit\u0000stringLit\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001" + "\u0003"));
poWriter.addMutation(getMutation("http://here/2010/tracked-data-provenance/ns#booleanLit\u0000true\u0000http://here/2010/tracked-data-provenance/ns#uuid10\u0001\n"));
poWriter.flush();
poWriter.close();
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.
the class RyaOutputFormatTest method testTemporalIndexing.
@Test
public void testTemporalIndexing() throws Exception {
final TemporalInstant[] instants = { new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 01), new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 02), new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 03), new TemporalInstantRfc3339(2015, 12, 30, 12, 00, 03) };
final Statement[] statements = new Statement[instants.length];
RyaOutputFormat.setCoreTablesEnabled(job, false);
RyaOutputFormat.setFreeTextEnabled(job, false);
RyaOutputFormat.setTemporalEnabled(job, true);
RyaOutputFormat.setEntityEnabled(job, false);
final ValueFactory vf = new ValueFactoryImpl();
for (int i = 0; i < instants.length; i++) {
final RyaType time = RdfToRyaConversions.convertLiteral(vf.createLiteral(instants[i].toString()));
final RyaStatement input = RyaStatement.builder().setSubject(new RyaURI(GRAPH + ":s")).setPredicate(new RyaURI(GRAPH + ":p")).setObject(time).build();
write(input);
statements[i] = RyaToRdfConversions.convertStatement(input);
}
final AccumuloTemporalIndexer temporal = new AccumuloTemporalIndexer();
temporal.setConf(conf);
Connector connector = ConfigUtils.getConnector(conf);
MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
temporal.setConnector(connector);
temporal.setMultiTableBatchWriter(mtbw);
temporal.init();
final Set<Statement> empty = new HashSet<>();
final Set<Statement> head = new HashSet<>();
final Set<Statement> tail = new HashSet<>();
head.add(statements[0]);
tail.add(statements[2]);
tail.add(statements[3]);
Assert.assertEquals(empty, getSet(temporal.queryInstantBeforeInstant(instants[0], new StatementConstraints())));
Assert.assertEquals(empty, getSet(temporal.queryInstantAfterInstant(instants[3], new StatementConstraints())));
Assert.assertEquals(head, getSet(temporal.queryInstantBeforeInstant(instants[1], new StatementConstraints())));
Assert.assertEquals(tail, getSet(temporal.queryInstantAfterInstant(instants[1], new StatementConstraints())));
temporal.close();
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project incubator-rya by apache.
the class RyaOutputFormat method getTemporalIndexer.
private static TemporalIndexer getTemporalIndexer(final Configuration conf) throws IOException {
if (!conf.getBoolean(ENABLE_TEMPORAL, true)) {
return null;
}
final AccumuloTemporalIndexer temporal = new AccumuloTemporalIndexer();
temporal.setConf(conf);
Connector connector;
try {
connector = ConfigUtils.getConnector(conf);
} catch (AccumuloException | AccumuloSecurityException e) {
throw new IOException("Error when attempting to create a connection for writing the temporal index.", e);
}
final MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
temporal.setConnector(connector);
temporal.setMultiTableBatchWriter(mtbw);
temporal.init();
return temporal;
}
Aggregations