use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.
the class TestWALFactory method testVisitors.
/**
* Test that we can visit entries before they are appended
* @throws Exception
*/
@Test
public void testVisitors() throws Exception {
final int COL_COUNT = 10;
final TableName tableName = TableName.valueOf(currentTest.getMethodName());
final byte[] row = Bytes.toBytes("row");
final DumbWALActionsListener visitor = new DumbWALActionsListener();
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
long timestamp = EnvironmentEdgeManager.currentTime();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
scopes.put(Bytes.toBytes("column"), 0);
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
final WAL log = wals.getWAL(hri);
log.registerWALActionsListener(visitor);
for (int i = 0; i < COL_COUNT; i++) {
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') }));
log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols);
}
log.sync();
assertEquals(COL_COUNT, visitor.increments);
log.unregisterWALActionsListener(visitor);
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(11)), timestamp, new byte[] { (byte) (11 + '0') }));
log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols);
log.sync();
assertEquals(COL_COUNT, visitor.increments);
}
use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.
the class TestWALFactory method testEditAdd.
/**
* Tests that we can write out an edit, close, and then read it back in again.
*/
@Test
public void testEditAdd() throws IOException {
int colCount = 10;
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(currentTest.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.of("column")).build();
NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
byte[] row = Bytes.toBytes("row");
WAL.Reader reader = null;
try {
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
long timestamp = EnvironmentEdgeManager.currentTime();
WALEdit cols = new WALEdit();
for (int i = 0; i < colCount; i++) {
cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') }));
}
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(row).setEndKey(Bytes.toBytes(Bytes.toString(row) + "1")).build();
final WAL log = wals.getWAL(info);
final long txid = log.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols);
log.sync(txid);
log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getColumnFamilyNames());
log.completeCacheFlush(info.getEncodedNameAsBytes(), HConstants.NO_SEQNUM);
log.shutdown();
Path filename = AbstractFSWALProvider.getCurrentFileName(log);
// Now open a reader on the log and assert append worked.
reader = wals.createReader(fs, filename);
// entry in the below... thats why we have '1'.
for (int i = 0; i < 1; i++) {
WAL.Entry entry = reader.next(null);
if (entry == null)
break;
WALKey key = entry.getKey();
WALEdit val = entry.getEdit();
assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
assertTrue(htd.getTableName().equals(key.getTableName()));
Cell cell = val.getCells().get(0);
assertTrue(Bytes.equals(row, 0, row.length, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
assertEquals((byte) (i + '0'), CellUtil.cloneValue(cell)[0]);
System.out.println(key + " " + val);
}
} finally {
if (reader != null) {
reader.close();
}
}
}
use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.
the class TestWALFactory method testAppendClose.
/*
* We pass different values to recoverFileLease() so that different code paths are covered
*
* For this test to pass, requires:
* 1. HDFS-200 (append support)
* 2. HDFS-988 (SafeMode should freeze file operations
* [FSNamesystem.nextGenerationStampForBlock])
* 3. HDFS-142 (on restart, maintain pendingCreates)
*/
@Test
public void testAppendClose() throws Exception {
TableName tableName = TableName.valueOf(currentTest.getMethodName());
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
WAL wal = wals.getWAL(regionInfo);
int total = 20;
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
scopes.put(tableName.getName(), 0);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs);
}
// Now call sync to send the data to HDFS datanodes
wal.sync();
int namenodePort = cluster.getNameNodePort();
final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal);
// Stop the cluster. (ensure restart since we're sharing MiniDFSCluster)
try {
DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
TEST_UTIL.shutdownMiniDFSCluster();
try {
// wal.writer.close() will throw an exception,
// but still call this since it closes the LogSyncer thread first
wal.shutdown();
} catch (IOException e) {
LOG.info(e.toString(), e);
}
// closing FS last so DFSOutputStream can't call close
fs.close();
LOG.info("STOPPED first instance of the cluster");
} finally {
// Restart the cluster
while (cluster.isClusterUp()) {
LOG.error("Waiting for cluster to go down");
Thread.sleep(1000);
}
assertFalse(cluster.isClusterUp());
cluster = null;
for (int i = 0; i < 100; i++) {
try {
cluster = TEST_UTIL.startMiniDFSClusterForTestWAL(namenodePort);
break;
} catch (BindException e) {
LOG.info("Sleeping. BindException bringing up new cluster");
Threads.sleep(1000);
}
}
cluster.waitActive();
fs = cluster.getFileSystem();
LOG.info("STARTED second instance.");
}
// set the lease period to be 1 second so that the
// namenode triggers lease recovery upon append request
Method setLeasePeriod = cluster.getClass().getDeclaredMethod("setLeasePeriod", new Class[] { Long.TYPE, Long.TYPE });
setLeasePeriod.setAccessible(true);
setLeasePeriod.invoke(cluster, 1000L, 1000L);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOG.info(e.toString(), e);
}
// Now try recovering the log, like the HMaster would do
final FileSystem recoveredFs = fs;
final Configuration rlConf = conf;
class RecoverLogThread extends Thread {
public Exception exception = null;
@Override
public void run() {
try {
RecoverLeaseFSUtils.recoverFileLease(recoveredFs, walPath, rlConf, null);
} catch (IOException e) {
exception = e;
}
}
}
RecoverLogThread t = new RecoverLogThread();
t.start();
// Timeout after 60 sec. Without correct patches, would be an infinite loop
t.join(60 * 1000);
if (t.isAlive()) {
t.interrupt();
throw new Exception("Timed out waiting for WAL.recoverLog()");
}
if (t.exception != null)
throw t.exception;
// Make sure you can read all the content
WAL.Reader reader = wals.createReader(fs, walPath);
int count = 0;
WAL.Entry entry = new WAL.Entry();
while (reader.next(entry) != null) {
count++;
assertTrue("Should be one KeyValue per WALEdit", entry.getEdit().getCells().size() == 1);
}
assertEquals(total, count);
reader.close();
// Reset the lease period
setLeasePeriod.invoke(cluster, new Object[] { 60000L, 3600000L });
}
use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.
the class TestFSHLogProvider method setUp.
@Before
public void setUp() throws Exception {
mvcc = new MultiVersionConcurrencyControl();
FileStatus[] entries = fs.listStatus(new Path("/"));
for (FileStatus dir : entries) {
fs.delete(dir.getPath(), true);
}
}
use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.
the class TestSyncReplicationWALProvider method testReadWrite.
private void testReadWrite(DualAsyncFSWAL wal) throws Exception {
int recordCount = 100;
int columnCount = 10;
byte[] row = Bytes.toBytes("testRow");
long timestamp = EnvironmentEdgeManager.currentTime();
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
ProtobufLogTestHelper.doWrite(wal, REGION, TABLE, columnCount, recordCount, row, timestamp, mvcc);
Path localFile = wal.getCurrentFileName();
Path remoteFile = new Path(REMOTE_WAL_DIR + "/" + PEER_ID, localFile.getName());
try (ProtobufLogReader reader = (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row, timestamp);
}
try (ProtobufLogReader reader = (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row, timestamp);
}
wal.rollWriter();
DistributedFileSystem dfs = (DistributedFileSystem) UTIL.getDFSCluster().getFileSystem();
UTIL.waitFor(5000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return dfs.isFileClosed(localFile) && dfs.isFileClosed(remoteFile);
}
@Override
public String explainFailure() throws Exception {
StringBuilder sb = new StringBuilder();
if (!dfs.isFileClosed(localFile)) {
sb.append(localFile + " has not been closed yet.");
}
if (!dfs.isFileClosed(remoteFile)) {
sb.append(remoteFile + " has not been closed yet.");
}
return sb.toString();
}
});
try (ProtobufLogReader reader = (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row, timestamp);
}
try (ProtobufLogReader reader = (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row, timestamp);
}
}
Aggregations