use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageMatching method main.
public static void main(String[] args) {
HDFSStorage storage = new HDFSStorage();
storage.setBaseDir(args[0]);
storage.setId(args[1]);
storage.setRestore(true);
storage.setup(null);
int count = 100000000;
logger.debug(" start time {}", System.currentTimeMillis());
int index = 10000;
byte[] b = Ints.toByteArray(index);
for (int i = 0; i < count; i++) {
storage.store(new Slice(b, 0, b.length));
index++;
b = Ints.toByteArray(index);
}
storage.flush();
for (int i = 0; i < count; i++) {
storage.store(new Slice(b, 0, b.length));
index++;
b = Ints.toByteArray(index);
}
storage.flush();
for (int i = 0; i < count; i++) {
storage.store(new Slice(b, 0, b.length));
index++;
b = Ints.toByteArray(index);
}
storage.flush();
for (int i = 0; i < count; i++) {
storage.store(new Slice(b, 0, b.length));
index++;
b = Ints.toByteArray(index);
}
storage.flush();
for (int i = 0; i < count; i++) {
storage.store(new Slice(b, 0, b.length));
index++;
b = Ints.toByteArray(index);
}
storage.flush();
logger.debug(" end time {}", System.currentTimeMillis());
logger.debug(" start time for retrieve {}", System.currentTimeMillis());
b = storage.retrieve(new byte[8]);
int org_index = index;
index = 10000;
match(b, index);
while (true) {
index++;
b = storage.retrieveNext();
if (b == null) {
logger.debug(" end time for retrieve {}/{}/{}", System.currentTimeMillis(), index, org_index);
return;
} else {
if (!match(b, index)) {
throw new RuntimeException("failed : " + index);
}
}
}
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testStorageWithRestore.
@Test
public void testStorageWithRestore() throws IOException {
Assert.assertNull(storage.retrieve(new byte[8]));
byte[] b = new byte[200];
Assert.assertNotNull(storage.store(new Slice(b, 0, b.length)));
storage.flush();
storage.teardown();
storage = getStorage("1", true);
storage.store(new Slice(b, 0, b.length));
storage.flush();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
boolean exists = fs.exists(new Path(STORAGE_DIRECTORY + "/1/" + "1"));
Assert.assertEquals("file should exist", true, exists);
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testPartialFlushWithFailure.
/**
* This test covers following use case The file is flushed and then more data is written to the same file, but the new
* data is not flushed and file is not roll over and storage fails The new storage comes up and client asks for data
* at the last returned address from earlier storage instance. The new storage returns null. Client stores the data
* again but the address returned this time is null and the retrieval of the earlier address now returns data
*
* @throws Exception
*/
@Test
public void testPartialFlushWithFailure() throws Exception {
Assert.assertNull(storage.retrieve(new byte[8]));
byte[] b = "ab".getBytes();
byte[] address = storage.store(new Slice(b, 0, b.length));
Assert.assertNotNull(address);
storage.flush();
b = "cb".getBytes();
byte[] addr = storage.store(new Slice(b, 0, b.length));
storage = getStorage("1", true);
Assert.assertNull(storage.retrieve(addr));
Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
storage.flush();
match(storage.retrieve(address), "cb");
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testPartialFlushRollOver.
/**
* This test covers following use case 1. Some data is stored to make sure that there is no roll over 2. File is
* flushed but the file is not closed 3. Some more data is stored. The data stored is enough to make the file roll
* over 4. Retrieve is called for the last returned address and it return nulls as the data is not flushed 5. Some
* more data is stored again but the address is returned null because of previous retrieve call 6. The data is flushed
* to make sure that the data is committed. 7. Now the data is retrieved from the starting and data returned matches
* the data stored
*
* @throws Exception
*/
@Test
public void testPartialFlushRollOver() throws Exception {
Assert.assertNull(storage.retrieve(new byte[8]));
byte[] b = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
byte[] b_org = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
byte[] address = storage.store(new Slice(b, 0, b.length));
Assert.assertNotNull(address);
storage.flush();
byte[] addr = null;
for (int i = 0; i < 5; i++) {
b[0] = (byte) (b[0] + 1);
addr = storage.store(new Slice(b, 0, b.length));
}
Assert.assertNull(storage.retrieve(addr));
for (int i = 0; i < 5; i++) {
b[0] = (byte) (b[0] + 1);
Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
}
storage.flush();
match(storage.retrieve(new byte[8]), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieve(address), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieveNext(), new String(b_org));
b_org[0] = (byte) (b_org[0] + 1);
match(storage.retrieveNext(), new String(b_org));
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class HDFSStorageTest method testNext.
@Test
public void testNext() throws IOException {
RandomAccessFile r = new RandomAccessFile(testMeta.testFile, "r");
r.seek(0);
Assert.assertNull(storage.retrieve(new byte[8]));
byte[] b = r.readLine().getBytes();
storage.store(new Slice(b, 0, b.length));
byte[] b1 = r.readLine().getBytes();
storage.store(new Slice(b1, 0, b1.length));
storage.store(new Slice(b, 0, b.length));
storage.flush();
storage.store(new Slice(b1, 0, b1.length));
storage.store(new Slice(b, 0, b.length));
storage.flush();
byte[] data = storage.retrieve(new byte[8]);
byte[] tempData = new byte[data.length - 8];
System.arraycopy(data, 8, tempData, 0, tempData.length);
Assert.assertEquals("matched the stored value with retrieved value", new String(b), new String(tempData));
data = storage.retrieveNext();
tempData = new byte[data.length - 8];
System.arraycopy(data, 8, tempData, 0, tempData.length);
Assert.assertEquals("matched the stored value with retrieved value", new String(b1), new String(tempData));
data = storage.retrieveNext();
tempData = new byte[data.length - 8];
System.arraycopy(data, 8, tempData, 0, tempData.length);
Assert.assertEquals("matched the stored value with retrieved value", new String(b), new String(tempData));
r.close();
}
Aggregations