use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.
the class TestReplicationSource method testLogMoving.
/**
* Sanity check that we can move logs around while we are reading
* from them. Should this test fail, ReplicationSource would have a hard
* time reading logs that are being archived.
* @throws Exception
*/
@Test
public void testLogMoving() throws Exception {
Path logPath = new Path(logDir, "log");
if (!FS.exists(logDir))
FS.mkdirs(logDir);
if (!FS.exists(oldLogDir))
FS.mkdirs(oldLogDir);
WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath, TEST_UTIL.getConfiguration());
for (int i = 0; i < 3; i++) {
byte[] b = Bytes.toBytes(Integer.toString(i));
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
WALKey key = new WALKey(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID);
writer.append(new WAL.Entry(key, edit));
writer.sync();
}
writer.close();
WAL.Reader reader = WALFactory.createReader(FS, logPath, TEST_UTIL.getConfiguration());
WAL.Entry entry = reader.next();
assertNotNull(entry);
Path oldLogPath = new Path(oldLogDir, "log");
FS.rename(logPath, oldLogPath);
entry = reader.next();
assertNotNull(entry);
entry = reader.next();
entry = reader.next();
assertNull(entry);
reader.close();
}
use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.
the class TestFSHLogProvider method addEdits.
protected void addEdits(WAL log, HRegionInfo hri, HTableDescriptor htd, int times, NavigableMap<byte[], Integer> scopes) throws IOException {
final byte[] row = Bytes.toBytes("row");
for (int i = 0; i < times; i++) {
long timestamp = System.currentTimeMillis();
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, row, row, timestamp, row));
log.append(hri, getWalKey(hri.getEncodedNameAsBytes(), htd.getTableName(), timestamp, scopes), cols, true);
}
log.sync();
}
use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.
the class TestWALSplit method appendCompactionEvent.
private static void appendCompactionEvent(Writer w, HRegionInfo hri, String[] inputs, String output) throws IOException {
WALProtos.CompactionDescriptor.Builder desc = WALProtos.CompactionDescriptor.newBuilder();
desc.setTableName(ByteString.copyFrom(hri.getTable().toBytes())).setEncodedRegionName(ByteString.copyFrom(hri.getEncodedNameAsBytes())).setRegionName(ByteString.copyFrom(hri.getRegionName())).setFamilyName(ByteString.copyFrom(FAMILY)).setStoreHomeDir(hri.getEncodedName() + "/" + Bytes.toString(FAMILY)).addAllCompactionInput(Arrays.asList(inputs)).addCompactionOutput(output);
WALEdit edit = WALEdit.createCompaction(hri, desc.build());
WALKey key = new WALKey(hri.getEncodedNameAsBytes(), TABLE_NAME, 1, EnvironmentEdgeManager.currentTime(), HConstants.DEFAULT_CLUSTER_ID);
w.append(new Entry(key, edit));
w.sync();
}
use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.
the class TestWALMethods method createTestLogEntry.
private WAL.Entry createTestLogEntry(int i) {
long seq = i;
long now = i * 1000;
WALEdit edit = new WALEdit();
edit.add(KeyValueTestUtil.create("row", "fam", "qual", 1234, "val"));
WALKey key = new WALKey(TEST_REGION, TEST_TABLE, seq, now, HConstants.DEFAULT_CLUSTER_ID);
WAL.Entry entry = new WAL.Entry(key, edit);
return entry;
}
use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.
the class HRegion method doDelta.
/**
* Add "deltas" to Cells. Deltas are increments or appends. Switch on <code>op</code>.
*
* <p>If increment, add deltas to current values or if an append, then
* append the deltas to the current Cell values.
*
* <p>Append and Increment code paths are mostly the same. They differ in just a few places.
* This method does the code path for increment and append and then in key spots, switches
* on the passed in <code>op</code> to do increment or append specific paths.
*/
private Result doDelta(Operation op, Mutation mutation, long nonceGroup, long nonce, boolean returnResults) throws IOException {
checkReadOnly();
checkResources();
checkRow(mutation.getRow(), op.toString());
checkFamilies(mutation.getFamilyCellMap().keySet());
this.writeRequestsCount.increment();
WriteEntry writeEntry = null;
startRegionOperation(op);
List<Cell> results = returnResults ? new ArrayList<>(mutation.size()) : null;
RowLock rowLock = null;
MemstoreSize memstoreSize = new MemstoreSize();
try {
rowLock = getRowLockInternal(mutation.getRow(), false);
lock(this.updatesLock.readLock());
try {
Result cpResult = doCoprocessorPreCall(op, mutation);
if (cpResult != null) {
return returnResults ? cpResult : null;
}
Durability effectiveDurability = getEffectiveDurability(mutation.getDurability());
Map<Store, List<Cell>> forMemStore = new HashMap<>(mutation.getFamilyCellMap().size());
// Reckon Cells to apply to WAL -- in returned walEdit -- and what to add to memstore and
// what to return back to the client (in 'forMemStore' and 'results' respectively).
WALEdit walEdit = reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
// Actually write to WAL now if a walEdit to apply.
if (walEdit != null && !walEdit.isEmpty()) {
writeEntry = doWALAppend(walEdit, effectiveDurability, nonceGroup, nonce);
} else {
// If walEdits is empty, it means we skipped the WAL; update LongAdders and start an mvcc
// transaction.
recordMutationWithoutWal(mutation.getFamilyCellMap());
writeEntry = mvcc.begin();
updateSequenceId(forMemStore.values(), writeEntry.getWriteNumber());
}
// Now write to MemStore. Do it a column family at a time.
for (Map.Entry<Store, List<Cell>> e : forMemStore.entrySet()) {
applyToMemstore(e.getKey(), e.getValue(), true, memstoreSize);
}
mvcc.completeAndWait(writeEntry);
if (rsServices != null && rsServices.getNonceManager() != null) {
rsServices.getNonceManager().addMvccToOperationContext(nonceGroup, nonce, writeEntry.getWriteNumber());
}
writeEntry = null;
} finally {
this.updatesLock.readLock().unlock();
}
// If results is null, then client asked that we not return the calculated results.
return results != null && returnResults ? Result.create(results) : Result.EMPTY_RESULT;
} finally {
// a 0 increment.
if (writeEntry != null)
mvcc.complete(writeEntry);
if (rowLock != null) {
rowLock.release();
}
// Request a cache flush if over the limit. Do it outside update lock.
if (isFlushSize(addAndGetMemstoreSize(memstoreSize))) {
requestFlush();
}
closeRegionOperation(op);
if (this.metricsRegion != null) {
switch(op) {
case INCREMENT:
this.metricsRegion.updateIncrement();
break;
case APPEND:
this.metricsRegion.updateAppend();
break;
default:
break;
}
}
}
}
Aggregations