use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testPartialFlushWithCleanAndRollOverAndFailure.
/**
* This tests the clean when the files are roll-over and the storage fails
*
* @throws Exception
*/
@Test
public void testPartialFlushWithCleanAndRollOverAndFailure() throws Exception {
Assert.assertNull(storage.retrieve(new byte[8]));
byte[] b = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
byte[] b_org = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
byte[] address = storage.store(new Slice(b, 0, b.length));
Assert.assertNotNull(address);
storage.flush();
storage.clean(address);
byte[] addr = null;
for (int i = 0; i < 5; i++) {
b[0] = (byte) (b[0] + 1);
addr = storage.store(new Slice(b, 0, b.length));
}
storage = getStorage("1", true);
Assert.assertNull(storage.retrieve(addr));
for (int i = 0; i < 5; i++) {
b[0] = (byte) (b[0] + 1);
Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
}
storage.flush();
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieve(address), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieveNext(), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieveNext(), new String(b_org));
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class InterceptorTestHelper method processFile.
private int processFile(File file, Interceptor interceptor) throws IOException {
InputStream stream = getClass().getResourceAsStream("/test_data/gentxns/" + file.getName());
BufferedReader br = new BufferedReader(new InputStreamReader(stream));
String line;
int i = 0;
while ((line = br.readLine()) != null) {
byte[] body = interceptor.intercept(new MyEvent(line.getBytes())).getBody();
RawEvent event = RawEvent.from(body, FIELD_SEPARATOR);
Assert.assertEquals("GUID", new Slice(line.getBytes(), 0, 32), event.guid);
logger.debug("guid = {}, time = {}", event.guid, event.time);
i++;
}
br.close();
return i;
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testCleanup.
@Test
public void testCleanup() throws IOException {
RandomAccessFile r = new RandomAccessFile(testMeta.testFile, "r");
r.seek(0);
byte[] b = r.readLine().getBytes();
storage.store(new Slice(b, 0, b.length));
byte[] val = storage.store(new Slice(b, 0, b.length));
storage.flush();
storage.clean(val);
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
boolean exists = fs.exists(new Path(STORAGE_DIRECTORY + "/" + "0"));
Assert.assertEquals("file should not exist", false, exists);
r.close();
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testCleanForUnflushedData.
@Test
public void testCleanForUnflushedData() throws IOException {
byte[] address = null;
byte[] b = new byte[200];
storage.retrieve(new byte[8]);
for (int i = 0; i < 5; i++) {
storage.store(new Slice(b, 0, b.length));
address = storage.store(new Slice(b, 0, b.length));
storage.flush();
// storage.clean(address);
}
byte[] lastWrittenAddress = null;
for (int i = 0; i < 5; i++) {
storage.store(new Slice(b, 0, b.length));
lastWrittenAddress = storage.store(new Slice(b, 0, b.length));
}
storage.clean(lastWrittenAddress);
byte[] cleanedOffset = storage.readData(new Path(STORAGE_DIRECTORY + "/1/cleanoffsetFile"));
Assert.assertArrayEquals(address, cleanedOffset);
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testPartialFlushRollOverWithFailure.
/**
* This test covers following use case 1. Some data is stored to make sure that there is no roll over 2. File is
* flushed but the file is not closed 3. Some more data is stored. The data stored is enough to make the file roll
* over 4. The storage crashes and new storage is instiated. 5. Retrieve is called for the last returned address and
* it return nulls as the data is not flushed 6. Some more data is stored again but the address is returned null
* because of previous retrieve call 7. The data is flushed to make sure that the data is committed. 8. Now the data
* is retrieved from the starting and data returned matches the data stored
*
* @throws Exception
*/
@Test
public void testPartialFlushRollOverWithFailure() throws Exception {
Assert.assertNull(storage.retrieve(new byte[8]));
byte[] b = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
byte[] b_org = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
byte[] address = storage.store(new Slice(b, 0, b.length));
Assert.assertNotNull(address);
storage.flush();
byte[] addr = null;
for (int i = 0; i < 5; i++) {
b[0] = (byte) (b[0] + 1);
addr = storage.store(new Slice(b, 0, b.length));
}
storage = getStorage("1", true);
Assert.assertNull(storage.retrieve(addr));
for (int i = 0; i < 5; i++) {
b[0] = (byte) (b[0] + 1);
Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
}
storage.flush();
match(storage.retrieve(new byte[8]), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieve(address), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieveNext(), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieveNext(), new String(b_org));
}
Aggregations