use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.
the class AbstractTestProtobufLog method doRead.
/**
* Appends entries in the WAL and reads it.
* @param withTrailer If 'withTrailer' is true, it calls a close on the WALwriter before reading
* so that a trailer is appended to the WAL. Otherwise, it starts reading after the sync
* call. This means that reader is not aware of the trailer. In this scenario, if the
* reader tries to read the trailer in its next() call, it returns false from
* ProtoBufLogReader.
* @throws IOException
*/
private void doRead(boolean withTrailer) throws IOException {
final int columnCount = 5;
final int recordCount = 5;
final TableName tableName = TableName.valueOf("tablename");
final byte[] row = Bytes.toBytes("row");
long timestamp = System.currentTimeMillis();
Path path = new Path(dir, "tempwal");
// delete the log if already exists, for test only
fs.delete(path, true);
W writer = null;
ProtobufLogReader reader = null;
try {
HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HTableDescriptor htd = new HTableDescriptor(tableName);
fs.mkdirs(dir);
// Write log in pb format.
writer = createWriter(path);
for (int i = 0; i < recordCount; ++i) {
WALKey key = new WALKey(hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
WALEdit edit = new WALEdit();
for (int j = 0; j < columnCount; ++j) {
if (i == 0) {
htd.addFamily(new HColumnDescriptor("column" + j));
}
String value = i + "" + j;
edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
}
append(writer, new WAL.Entry(key, edit));
}
sync(writer);
if (withTrailer)
writer.close();
// Now read the log using standard means.
reader = (ProtobufLogReader) wals.createReader(fs, path);
if (withTrailer) {
assertNotNull(reader.trailer);
} else {
assertNull(reader.trailer);
}
for (int i = 0; i < recordCount; ++i) {
WAL.Entry entry = reader.next();
assertNotNull(entry);
assertEquals(columnCount, entry.getEdit().size());
assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
assertEquals(tableName, entry.getKey().getTablename());
int idx = 0;
for (Cell val : entry.getEdit().getCells()) {
assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(), val.getRowLength()));
String value = i + "" + idx;
assertArrayEquals(Bytes.toBytes(value), CellUtil.cloneValue(val));
idx++;
}
}
WAL.Entry entry = reader.next();
assertNull(entry);
} finally {
if (writer != null) {
writer.close();
}
if (reader != null) {
reader.close();
}
}
}
use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.
the class TestWALActionsListener method testActionListener.
/**
* Add a bunch of dummy data and roll the logs every two insert. We
* should end up with 10 rolled files (plus the roll called in
* the constructor). Also test adding a listener while it's running.
*/
@Test
public void testActionListener() throws Exception {
DummyWALActionsListener observer = new DummyWALActionsListener();
List<WALActionsListener> list = new ArrayList<>(1);
list.add(observer);
final WALFactory wals = new WALFactory(conf, list, "testActionListener");
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES), SOME_BYTES, SOME_BYTES, false);
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < 20; i++) {
byte[] b = Bytes.toBytes(i + "");
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES));
htd.addFamily(new HColumnDescriptor(b));
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
final long txid = wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit, true);
wal.sync(txid);
if (i == 10) {
wal.registerWALActionsListener(laterobserver);
}
if (i % 2 == 0) {
wal.rollWriter();
}
}
wal.close();
assertEquals(11, observer.preLogRollCounter);
assertEquals(11, observer.postLogRollCounter);
assertEquals(5, laterobserver.preLogRollCounter);
assertEquals(5, laterobserver.postLogRollCounter);
assertEquals(1, observer.closedCount);
}
use of org.apache.hadoop.hbase.wal.WALKey in project phoenix by apache.
the class SystemCatalogWALEntryFilterIT method testOtherTablesAutoPass.
@Test
public void testOtherTablesAutoPass() throws Exception {
//Cell is nonsense but we should auto pass because the table name's not System.Catalog
WAL.Entry entry = new WAL.Entry(new WALKey(REGION, TableName.valueOf(TestUtil.ENTITY_HISTORY_TABLE_NAME)), new WALEdit());
entry.getEdit().add(CellUtil.createCell(Bytes.toBytes("foo")));
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertEquals(1, filter.filter(entry).getEdit().size());
}
use of org.apache.hadoop.hbase.wal.WALKey in project phoenix by apache.
the class SystemCatalogWALEntryFilterIT method setup.
@BeforeClass
public static void setup() throws Exception {
setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
Properties tenantProperties = new Properties();
tenantProperties.setProperty("TenantId", TENANT_ID);
//create two versions of a view -- one with a tenantId and one without
try (java.sql.Connection connection = ConnectionUtil.getInputConnection(getUtility().getConfiguration(), tenantProperties)) {
ensureTableCreated(getUrl(), TestUtil.ENTITY_HISTORY_TABLE_NAME);
connection.createStatement().execute(CREATE_TENANT_VIEW_SQL);
catalogTable = PhoenixRuntime.getTable(connection, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
walKey = new WALKey(REGION, TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME), 0, 0, uuid);
}
;
Assert.assertNotNull(catalogTable);
try (java.sql.Connection connection = ConnectionUtil.getInputConnection(getUtility().getConfiguration(), new Properties())) {
connection.createStatement().execute(CREATE_NONTENANT_VIEW_SQL);
}
;
}
use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.
the class TestReplicationSmallTests method testCompactionWALEdits.
/**
* Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
* the compaction WALEdit
* @throws Exception
*/
@Test(timeout = 300000)
public void testCompactionWALEdits() throws Exception {
WALProtos.CompactionDescriptor compactionDescriptor = WALProtos.CompactionDescriptor.getDefaultInstance();
HRegionInfo hri = new HRegionInfo(htable1.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
Replication.scopeWALEdits(new WALKey(), edit, htable1.getConfiguration(), null);
}
Aggregations