use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class SortedLogRecovery method findLastStartToFinish.
int findLastStartToFinish(MultiReader reader, int fileno, KeyExtent extent, Set<String> tabletFiles, LastStartToFinish lastStartToFinish) throws IOException, EmptyMapFileException, UnusedException {
HashSet<String> suffixes = new HashSet<>();
for (String path : tabletFiles) suffixes.add(getPathSuffix(path));
// Scan for tableId for this extent (should always be in the log)
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
int tid = -1;
if (!reader.next(key, value))
throw new EmptyMapFileException();
if (key.event != OPEN)
throw new RuntimeException("First log entry value is not OPEN");
if (key.tserverSession.compareTo(lastStartToFinish.tserverSession) != 0) {
if (lastStartToFinish.compactionStatus == Status.LOOKING_FOR_FINISH)
throw new RuntimeException("COMPACTION_FINISH (without preceding COMPACTION_START) is not followed by a successful minor compaction.");
lastStartToFinish.update(key.tserverSession);
}
KeyExtent alternative = extent;
if (extent.isRootTablet()) {
alternative = RootTable.OLD_EXTENT;
}
LogFileKey defineKey = null;
// for the maximum tablet id, find the minimum sequence #... may be ok to find the max seq, but just want to make the code behave like it used to
while (reader.next(key, value)) {
if (key.event != DEFINE_TABLET)
break;
if (key.tablet.equals(extent) || key.tablet.equals(alternative)) {
if (tid != key.tid) {
tid = key.tid;
defineKey = key;
key = new LogFileKey();
}
}
}
if (tid < 0) {
throw new UnusedException();
}
log.debug("Found tid, seq {} {}", tid, defineKey.seq);
// Scan start/stop events for this tablet
key = defineKey;
key.event = COMPACTION_START;
reader.seek(key);
while (reader.next(key, value)) {
// LogFileEntry.printEntry(entry);
if (key.tid != tid)
break;
if (key.event == COMPACTION_START) {
if (lastStartToFinish.compactionStatus == Status.INITIAL)
lastStartToFinish.compactionStatus = Status.COMPLETE;
if (key.seq <= lastStartToFinish.lastStart)
throw new RuntimeException("Sequence numbers are not increasing for start/stop events: " + key.seq + " vs " + lastStartToFinish.lastStart);
lastStartToFinish.update(fileno, key.seq);
// Tablet server finished the minor compaction, but didn't remove the entry from the METADATA table.
log.debug("minor compaction into {} finished, but was still in the METADATA", key.filename);
if (suffixes.contains(getPathSuffix(key.filename)))
lastStartToFinish.update(-1);
} else if (key.event == COMPACTION_FINISH) {
if (key.seq <= lastStartToFinish.lastStart)
throw new RuntimeException("Sequence numbers are not increasing for start/stop events: " + key.seq + " vs " + lastStartToFinish.lastStart);
if (lastStartToFinish.compactionStatus == Status.INITIAL)
lastStartToFinish.compactionStatus = Status.LOOKING_FOR_FINISH;
else if (lastStartToFinish.lastFinish > lastStartToFinish.lastStart)
throw new RuntimeException("COMPACTION_FINISH does not have preceding COMPACTION_START event.");
else
lastStartToFinish.compactionStatus = Status.COMPLETE;
lastStartToFinish.update(key.seq);
} else
break;
}
return tid;
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class AccumuloReplicaSystemTest method restartInFileKnowsAboutPreviousTableDefines.
@Test
public void restartInFileKnowsAboutPreviousTableDefines() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// What is seq used for?
key.seq = 1l;
/*
* Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing whatsoever in reality regarding what these entries would actually
* look like in a WAL. They are solely for testing that each LogEvents is handled, order is not important.
*/
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
key.tid = 1;
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.tid = 1;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
dos.close();
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
HashSet<Integer> tids = new HashSet<>();
// Only consume the first mutation, not the second
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1l, tids);
// We stopped because we got to the end of the file
Assert.assertEquals(2, repl.entriesConsumed);
Assert.assertEquals(1, repl.walEdits.getEditsSize());
Assert.assertEquals(1, repl.sizeInRecords);
Assert.assertNotEquals(0, repl.sizeInBytes);
status = Status.newBuilder(status).setBegin(2).build();
// Consume the rest of the mutations
repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1l, tids);
// We stopped because we got to the end of the file
Assert.assertEquals(1, repl.entriesConsumed);
Assert.assertEquals(1, repl.walEdits.getEditsSize());
Assert.assertEquals(1, repl.sizeInRecords);
Assert.assertNotEquals(0, repl.sizeInBytes);
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class BatchWriterReplicationReplayer method replicateLog.
@Override
public long replicateLog(ClientContext context, String tableName, WalEdits data) throws RemoteReplicationException, AccumuloException, AccumuloSecurityException {
final LogFileKey key = new LogFileKey();
final LogFileValue value = new LogFileValue();
final long memoryInBytes = context.getConfiguration().getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY);
BatchWriter bw = null;
long mutationsApplied = 0l;
try {
for (ByteBuffer edit : data.getEdits()) {
DataInputStream dis = new DataInputStream(ByteBufferUtil.toByteArrayInputStream(edit));
try {
key.readFields(dis);
// TODO this is brittle because AccumuloReplicaSystem isn't actually calling LogFileValue.write, but we're expecting
// what we receive to be readable by the LogFileValue.
value.readFields(dis);
} catch (IOException e) {
log.error("Could not deserialize edit from stream", e);
throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_DESERIALIZE, "Could not deserialize edit from stream");
}
// Create the batchScanner if we don't already have one.
if (null == bw) {
BatchWriterConfig bwConfig = new BatchWriterConfig();
bwConfig.setMaxMemory(memoryInBytes);
try {
bw = context.getConnector().createBatchWriter(tableName, bwConfig);
} catch (TableNotFoundException e) {
throw new RemoteReplicationException(RemoteReplicationErrorCode.TABLE_DOES_NOT_EXIST, "Table " + tableName + " does not exist");
}
}
log.info("Applying {} mutations to table {} as part of batch", value.mutations.size(), tableName);
// If we got a ServerMutation, we have to make sure that we preserve the systemTimestamp otherwise
// the local system will assign a new timestamp.
List<Mutation> mutationsCopy = new ArrayList<>(value.mutations.size());
long mutationsCopied = 0l;
for (Mutation orig : value.mutations) {
if (orig instanceof ServerMutation) {
mutationsCopied++;
ServerMutation origServer = (ServerMutation) orig;
Mutation copy = new Mutation(orig.getRow());
for (ColumnUpdate update : orig.getUpdates()) {
long timestamp;
// If the update doesn't have a timestamp, pull it from the ServerMutation
if (!update.hasTimestamp()) {
timestamp = origServer.getSystemTimestamp();
} else {
timestamp = update.getTimestamp();
}
// TODO ACCUMULO-2937 cache the CVs
if (update.isDeleted()) {
copy.putDelete(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp);
} else {
copy.put(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp, update.getValue());
}
}
// We also need to preserve the replicationSource information to prevent cycles
Set<String> replicationSources = orig.getReplicationSources();
if (null != replicationSources && !replicationSources.isEmpty()) {
for (String replicationSource : replicationSources) {
copy.addReplicationSource(replicationSource);
}
}
mutationsCopy.add(copy);
} else {
mutationsCopy.add(orig);
}
}
log.debug("Copied {} mutations to ensure server-assigned timestamps are propagated", mutationsCopied);
try {
bw.addMutations(mutationsCopy);
} catch (MutationsRejectedException e) {
log.error("Could not apply mutations to {}", tableName);
throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_APPLY, "Could not apply mutations to " + tableName);
}
log.debug("{} mutations added to the BatchScanner", mutationsCopy.size());
mutationsApplied += mutationsCopy.size();
}
} finally {
if (null != bw) {
try {
bw.close();
} catch (MutationsRejectedException e) {
log.error("Could not apply mutations to {}", tableName);
throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_APPLY, "Could not apply mutations to " + tableName);
}
}
}
log.info("Applied {} mutations in total to {}", mutationsApplied, tableName);
return mutationsApplied;
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class SortedLogRecovery method playbackMutations.
private void playbackMutations(MultiReader reader, int tid, LastStartToFinish lastStartToFinish, MutationReceiver mr) throws IOException {
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// Playback mutations after the last stop to finish
log.info("Scanning for mutations starting at sequence number {} for tid {}", lastStartToFinish.seq, tid);
key.event = MUTATION;
key.tid = tid;
// the seq number for the minor compaction start is now the same as the
// last update made to memory. Scan up to that mutation, but not past it.
key.seq = lastStartToFinish.seq;
reader.seek(key);
while (true) {
if (!reader.next(key, value))
break;
if (key.tid != tid)
break;
if (key.event == MUTATION) {
mr.receive(value.mutations.get(0));
} else if (key.event == MANY_MUTATIONS) {
for (Mutation m : value.mutations) {
mr.receive(m);
}
} else {
throw new RuntimeException("unexpected log key type: " + key.event);
}
}
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class DfsLogger method minorCompactionFinished.
public LoggerOperation minorCompactionFinished(long seq, int tid, String fqfn, Durability durability) throws IOException {
LogFileKey key = new LogFileKey();
key.event = COMPACTION_FINISH;
key.seq = seq;
key.tid = tid;
return logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), durability);
}
Aggregations