use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testOutOfOrder.
@Test(expected = IllegalArgumentException.class)
public void testOutOfOrder() throws Exception {
// test that exception declared in API is thrown
Key k1 = new Key("r1", "f1", "q1");
Value v1 = new Value("1".getBytes());
Key k2 = new Key("r2", "f1", "q1");
Value v2 = new Value("2".getBytes());
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
String testFile = createTmpTestFile();
try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
writer.append(k2, v2);
writer.append(k1, v1);
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testDoubleStart.
@Test(expected = IllegalStateException.class)
public void testDoubleStart() throws Exception {
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
String testFile = createTmpTestFile();
try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
writer.startDefaultLocalityGroup();
writer.startDefaultLocalityGroup();
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testMultipleSources.
@Test
public void testMultipleSources() throws Exception {
SortedMap<Key, Value> testData1 = createTestData(10, 10, 10);
SortedMap<Key, Value> testData2 = createTestData(0, 10, 0, 10, 10);
String testFile1 = createRFile(testData1);
String testFile2 = createRFile(testData2);
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
Scanner scanner = RFile.newScanner().from(testFile1, testFile2).withFileSystem(localFs).build();
TreeMap<Key, Value> expected = new TreeMap<>(testData1);
expected.putAll(testData2);
Assert.assertEquals(expected, toMap(scanner));
Range range = new Range(rowStr(3), true, rowStr(14), true);
scanner.setRange(range);
Assert.assertEquals(expected.subMap(range.getStartKey(), range.getEndKey()), toMap(scanner));
scanner.close();
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testIndependance.
@Test
public void testIndependance() throws Exception {
// test to ensure two iterators allocated from same RFile scanner are independent.
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
SortedMap<Key, Value> testData = createTestData(10, 10, 10);
String testFile = createRFile(testData);
Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).build();
Range range1 = Range.exact(rowStr(5));
scanner.setRange(range1);
Iterator<Entry<Key, Value>> scnIter1 = scanner.iterator();
Iterator<Entry<Key, Value>> mapIter1 = testData.subMap(range1.getStartKey(), range1.getEndKey()).entrySet().iterator();
Range range2 = new Range(rowStr(3), true, rowStr(4), true);
scanner.setRange(range2);
Iterator<Entry<Key, Value>> scnIter2 = scanner.iterator();
Iterator<Entry<Key, Value>> mapIter2 = testData.subMap(range2.getStartKey(), range2.getEndKey()).entrySet().iterator();
while (scnIter1.hasNext() || scnIter2.hasNext()) {
if (scnIter1.hasNext()) {
Assert.assertTrue(mapIter1.hasNext());
Assert.assertEquals(scnIter1.next(), mapIter1.next());
} else {
Assert.assertFalse(mapIter1.hasNext());
}
if (scnIter2.hasNext()) {
Assert.assertTrue(mapIter2.hasNext());
Assert.assertEquals(scnIter2.next(), mapIter2.next());
} else {
Assert.assertFalse(mapIter2.hasNext());
}
}
Assert.assertFalse(mapIter1.hasNext());
Assert.assertFalse(mapIter2.hasNext());
scanner.close();
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testWriterTableProperties.
@Test
public void testWriterTableProperties() throws Exception {
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
String testFile = createTmpTestFile();
Map<String, String> props = new HashMap<>();
props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "1K");
RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).withTableProperties(props).build();
SortedMap<Key, Value> testData1 = createTestData(10, 10, 10);
writer.append(testData1.entrySet());
writer.close();
Reader reader = getReader(localFs, testFile);
FileSKVIterator iiter = reader.getIndex();
int count = 0;
while (iiter.hasTop()) {
count++;
iiter.next();
}
// if settings are used then should create multiple index entries
Assert.assertTrue(count > 10);
reader.close();
Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).build();
Assert.assertEquals(testData1, toMap(scanner));
scanner.close();
}
Aggregations