use of org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser in project hbase by apache.
the class TestImportTsvParser method testTsvParserBadTsvLineExcessiveColumns.
/**
* Test cases that throw BadTsvLineException
*/
@Test(expected = BadTsvLineException.class)
public void testTsvParserBadTsvLineExcessiveColumns() throws BadTsvLineException {
TsvParser parser = new TsvParser("HBASE_ROW_KEY,col_a", "\t");
byte[] line = Bytes.toBytes("val_a\tval_b\tval_c");
parser.parse(line, line.length);
}
use of org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser in project hbase by apache.
the class TestImportTsvParser method testTsvParserInvalidTimestamp.
@Test(expected = BadTsvLineException.class)
public void testTsvParserInvalidTimestamp() throws BadTsvLineException {
TsvParser parser = new TsvParser("HBASE_ROW_KEY,HBASE_TS_KEY,col_a,", "\t");
assertEquals(1, parser.getTimestampKeyColumnIndex());
byte[] line = Bytes.toBytes("rowkey\ttimestamp\tval_a");
ParsedLine parsed = parser.parse(line, line.length);
assertEquals(-1, parsed.getTimestamp(-1));
checkParsing(parsed, Splitter.on("\t").split(Bytes.toString(line)));
}
use of org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser in project hbase by apache.
the class TestImportTsvParser method testTsvParserBadTsvLineNoRowKey.
@Test(expected = BadTsvLineException.class)
public void testTsvParserBadTsvLineNoRowKey() throws BadTsvLineException {
TsvParser parser = new TsvParser("col_a,HBASE_ROW_KEY", "\t");
byte[] line = Bytes.toBytes("only_cola_data_and_no_row_key");
parser.parse(line, line.length);
}
use of org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser in project hbase by apache.
the class TestImportTsvParser method testTsvParserParseRowKey.
@Test
public void testTsvParserParseRowKey() throws BadTsvLineException {
TsvParser parser = new TsvParser("HBASE_ROW_KEY,col_a,HBASE_TS_KEY", "\t");
assertEquals(0, parser.getRowKeyColumnIndex());
byte[] line = Bytes.toBytes("rowkey\tval_a\t1234");
Pair<Integer, Integer> rowKeyOffsets = parser.parseRowKey(line, line.length);
assertEquals(0, rowKeyOffsets.getFirst().intValue());
assertEquals(6, rowKeyOffsets.getSecond().intValue());
try {
line = Bytes.toBytes("\t\tval_a\t1234");
parser.parseRowKey(line, line.length);
fail("Should get BadTsvLineException on empty rowkey.");
} catch (BadTsvLineException b) {
}
parser = new TsvParser("col_a,HBASE_ROW_KEY,HBASE_TS_KEY", "\t");
assertEquals(1, parser.getRowKeyColumnIndex());
line = Bytes.toBytes("val_a\trowkey\t1234");
rowKeyOffsets = parser.parseRowKey(line, line.length);
assertEquals(6, rowKeyOffsets.getFirst().intValue());
assertEquals(6, rowKeyOffsets.getSecond().intValue());
try {
line = Bytes.toBytes("val_a");
rowKeyOffsets = parser.parseRowKey(line, line.length);
fail("Should get BadTsvLineException when number of columns less than rowkey position.");
} catch (BadTsvLineException b) {
}
parser = new TsvParser("col_a,HBASE_TS_KEY,HBASE_ROW_KEY", "\t");
assertEquals(2, parser.getRowKeyColumnIndex());
line = Bytes.toBytes("val_a\t1234\trowkey");
rowKeyOffsets = parser.parseRowKey(line, line.length);
assertEquals(11, rowKeyOffsets.getFirst().intValue());
assertEquals(6, rowKeyOffsets.getSecond().intValue());
}
use of org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser in project hbase by apache.
the class TestImportTsvParser method testTsvParser.
@Test
public void testTsvParser() throws BadTsvLineException {
TsvParser parser = new TsvParser("col_a,col_b:qual,HBASE_ROW_KEY,col_d", "\t");
assertBytesEquals(Bytes.toBytes("col_a"), parser.getFamily(0));
assertBytesEquals(HConstants.EMPTY_BYTE_ARRAY, parser.getQualifier(0));
assertBytesEquals(Bytes.toBytes("col_b"), parser.getFamily(1));
assertBytesEquals(Bytes.toBytes("qual"), parser.getQualifier(1));
assertNull(parser.getFamily(2));
assertNull(parser.getQualifier(2));
assertEquals(2, parser.getRowKeyColumnIndex());
assertEquals(TsvParser.DEFAULT_TIMESTAMP_COLUMN_INDEX, parser.getTimestampKeyColumnIndex());
byte[] line = Bytes.toBytes("val_a\tval_b\tval_c\tval_d");
ParsedLine parsed = parser.parse(line, line.length);
checkParsing(parsed, Splitter.on("\t").split(Bytes.toString(line)));
}
Aggregations