use of org.apache.accumulo.tserver.logger.LogFileValue in project accumulo by apache.
the class BatchWriterReplicationReplayer method replicateLog.
@Override
public long replicateLog(ClientContext context, String tableName, WalEdits data) throws RemoteReplicationException, AccumuloException, AccumuloSecurityException {
final LogFileKey key = new LogFileKey();
final LogFileValue value = new LogFileValue();
final long memoryInBytes = context.getConfiguration().getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY);
BatchWriter bw = null;
long mutationsApplied = 0l;
try {
for (ByteBuffer edit : data.getEdits()) {
DataInputStream dis = new DataInputStream(ByteBufferUtil.toByteArrayInputStream(edit));
try {
key.readFields(dis);
// TODO this is brittle because AccumuloReplicaSystem isn't actually calling LogFileValue.write, but we're expecting
// what we receive to be readable by the LogFileValue.
value.readFields(dis);
} catch (IOException e) {
log.error("Could not deserialize edit from stream", e);
throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_DESERIALIZE, "Could not deserialize edit from stream");
}
// Create the batchScanner if we don't already have one.
if (null == bw) {
BatchWriterConfig bwConfig = new BatchWriterConfig();
bwConfig.setMaxMemory(memoryInBytes);
try {
bw = context.getConnector().createBatchWriter(tableName, bwConfig);
} catch (TableNotFoundException e) {
throw new RemoteReplicationException(RemoteReplicationErrorCode.TABLE_DOES_NOT_EXIST, "Table " + tableName + " does not exist");
}
}
log.info("Applying {} mutations to table {} as part of batch", value.mutations.size(), tableName);
// If we got a ServerMutation, we have to make sure that we preserve the systemTimestamp otherwise
// the local system will assign a new timestamp.
List<Mutation> mutationsCopy = new ArrayList<>(value.mutations.size());
long mutationsCopied = 0l;
for (Mutation orig : value.mutations) {
if (orig instanceof ServerMutation) {
mutationsCopied++;
ServerMutation origServer = (ServerMutation) orig;
Mutation copy = new Mutation(orig.getRow());
for (ColumnUpdate update : orig.getUpdates()) {
long timestamp;
// If the update doesn't have a timestamp, pull it from the ServerMutation
if (!update.hasTimestamp()) {
timestamp = origServer.getSystemTimestamp();
} else {
timestamp = update.getTimestamp();
}
// TODO ACCUMULO-2937 cache the CVs
if (update.isDeleted()) {
copy.putDelete(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp);
} else {
copy.put(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp, update.getValue());
}
}
// We also need to preserve the replicationSource information to prevent cycles
Set<String> replicationSources = orig.getReplicationSources();
if (null != replicationSources && !replicationSources.isEmpty()) {
for (String replicationSource : replicationSources) {
copy.addReplicationSource(replicationSource);
}
}
mutationsCopy.add(copy);
} else {
mutationsCopy.add(orig);
}
}
log.debug("Copied {} mutations to ensure server-assigned timestamps are propagated", mutationsCopied);
try {
bw.addMutations(mutationsCopy);
} catch (MutationsRejectedException e) {
log.error("Could not apply mutations to {}", tableName);
throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_APPLY, "Could not apply mutations to " + tableName);
}
log.debug("{} mutations added to the BatchScanner", mutationsCopy.size());
mutationsApplied += mutationsCopy.size();
}
} finally {
if (null != bw) {
try {
bw.close();
} catch (MutationsRejectedException e) {
log.error("Could not apply mutations to {}", tableName);
throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_APPLY, "Could not apply mutations to " + tableName);
}
}
}
log.info("Applied {} mutations in total to {}", mutationsApplied, tableName);
return mutationsApplied;
}
use of org.apache.accumulo.tserver.logger.LogFileValue in project accumulo by apache.
the class SortedLogRecovery method playbackMutations.
private void playbackMutations(MultiReader reader, int tid, LastStartToFinish lastStartToFinish, MutationReceiver mr) throws IOException {
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// Playback mutations after the last stop to finish
log.info("Scanning for mutations starting at sequence number {} for tid {}", lastStartToFinish.seq, tid);
key.event = MUTATION;
key.tid = tid;
// the seq number for the minor compaction start is now the same as the
// last update made to memory. Scan up to that mutation, but not past it.
key.seq = lastStartToFinish.seq;
reader.seek(key);
while (true) {
if (!reader.next(key, value))
break;
if (key.tid != tid)
break;
if (key.event == MUTATION) {
mr.receive(value.mutations.get(0));
} else if (key.event == MANY_MUTATIONS) {
for (Mutation m : value.mutations) {
mr.receive(m);
}
} else {
throw new RuntimeException("unexpected log key type: " + key.event);
}
}
}
use of org.apache.accumulo.tserver.logger.LogFileValue in project accumulo by apache.
the class DfsLogger method logManyTablets.
public LoggerOperation logManyTablets(List<TabletMutations> mutations) throws IOException {
Durability durability = Durability.NONE;
List<Pair<LogFileKey, LogFileValue>> data = new ArrayList<>();
for (TabletMutations tabletMutations : mutations) {
LogFileKey key = new LogFileKey();
key.event = MANY_MUTATIONS;
key.seq = tabletMutations.getSeq();
key.tid = tabletMutations.getTid();
LogFileValue value = new LogFileValue();
value.mutations = tabletMutations.getMutations();
data.add(new Pair<>(key, value));
if (tabletMutations.getDurability().ordinal() > durability.ordinal()) {
durability = tabletMutations.getDurability();
}
}
return logFileData(data, chooseDurabilityForGroupCommit(mutations));
}
use of org.apache.accumulo.tserver.logger.LogFileValue in project accumulo by apache.
the class AccumuloReplicaSystemTest method onlyChooseMutationsForDesiredTableWithClosedStatus.
@Test
public void onlyChooseMutationsForDesiredTableWithClosedStatus() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// What is seq used for?
key.seq = 1l;
/*
* Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing whatsoever in reality regarding what these entries would actually
* look like in a WAL. They are solely for testing that each LogEvents is handled, order is not important.
*/
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
key.tid = 1;
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("2"), null, null);
key.tid = 2;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.OPEN;
key.tid = LogFileKey.VERSION;
key.tserverSession = "foobar";
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("badrow")));
key.write(dos);
value.write(dos);
key.event = LogEvents.COMPACTION_START;
key.tid = 2;
key.filename = "/accumulo/tables/1/t-000001/A000001.rf";
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
key.tid = 3;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.COMPACTION_FINISH;
key.tid = 6;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.tid = 3;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
dos.close();
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
// Setting the file to be closed with the infinite end implies that we need to bump the begin up to Long.MAX_VALUE
// If it were still open, more data could be appended that we need to process
Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(true).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
// We stopped because we got to the end of the file
Assert.assertEquals(Long.MAX_VALUE, repl.entriesConsumed);
Assert.assertEquals(2, repl.walEdits.getEditsSize());
Assert.assertEquals(2, repl.sizeInRecords);
Assert.assertNotEquals(0, repl.sizeInBytes);
}
use of org.apache.accumulo.tserver.logger.LogFileValue in project accumulo by apache.
the class AccumuloReplicaSystemTest method onlyChooseMutationsForDesiredTableWithOpenStatus.
@Test
public void onlyChooseMutationsForDesiredTableWithOpenStatus() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// What is seq used for?
key.seq = 1l;
/*
* Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing whatsoever in reality regarding what these entries would actually
* look like in a WAL. They are solely for testing that each LogEvents is handled, order is not important.
*/
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
key.tid = 1;
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("2"), null, null);
key.tid = 2;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.OPEN;
key.tid = LogFileKey.VERSION;
key.tserverSession = "foobar";
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("badrow")));
key.write(dos);
value.write(dos);
key.event = LogEvents.COMPACTION_START;
key.tid = 2;
key.filename = "/accumulo/tables/1/t-000001/A000001.rf";
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
key.tid = 3;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.COMPACTION_FINISH;
key.tid = 6;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.tid = 3;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
dos.close();
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
// We stopped because we got to the end of the file
Assert.assertEquals(9, repl.entriesConsumed);
Assert.assertEquals(2, repl.walEdits.getEditsSize());
Assert.assertEquals(2, repl.sizeInRecords);
Assert.assertNotEquals(0, repl.sizeInBytes);
}
Aggregations