Search in sources :

Example 41 with Slice

use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.

the class FSWindowDataManager method findLargestCompletedWindow.

private long findLargestCompletedWindow(FSWindowReplayWAL wal, Long ceilingWindow) throws IOException {
    if (!wal.fileDescriptors.isEmpty()) {
        FileSystemWAL.FileSystemWALReader reader = wal.getReader();
        // to find the largest window, we only need to look at the last file.
        NavigableSet<Integer> descendingParts = new TreeSet<>(wal.fileDescriptors.keySet()).descendingSet();
        for (int part : descendingParts) {
            FSWindowReplayWAL.FileDescriptor last = wal.fileDescriptors.get(part).last();
            reader.seek(new FileSystemWAL.FileSystemWALPointer(last.part, 0));
            long endOffset = -1;
            long lastWindow = Stateless.WINDOW_ID;
            Slice slice = readNext(reader);
            while (slice != null) {
                // skip the artifact because we need just the largest window id.
                boolean skipComplete = skipNext(reader);
                if (!skipComplete) {
                    // artifact not saved so this window was not finished.
                    break;
                }
                long offset = reader.getCurrentPointer().getOffset();
                long window = Longs.fromByteArray(slice.toByteArray());
                if (ceilingWindow != null && window > ceilingWindow) {
                    break;
                }
                endOffset = offset;
                lastWindow = window;
                // either null or next window
                slice = readNext(reader);
            }
            if (endOffset != -1) {
                wal.walEndPointerAfterRecovery = new FileSystemWAL.FileSystemWALPointer(last.part, endOffset);
                wal.windowWalParts.put(lastWindow, wal.walEndPointerAfterRecovery.getPartNum());
                return lastWindow;
            }
        }
    }
    return Stateless.WINDOW_ID;
}
Also used : Slice(com.datatorrent.netlet.util.Slice)

Example 42 with Slice

use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.

the class HDFSStorageTest method testPartialFlushWithCleanAndFailure.

/**
 * This tests clean when the file doesn't roll over
 *
 * @throws Exception
 */
@Test
public void testPartialFlushWithCleanAndFailure() throws Exception {
    Assert.assertNull(storage.retrieve(new byte[8]));
    byte[] b = "ab".getBytes();
    byte[] address = storage.store(new Slice(b, 0, b.length));
    Assert.assertNotNull(address);
    storage.flush();
    storage.clean(address);
    b = "cb".getBytes();
    byte[] addr = storage.store(new Slice(b, 0, b.length));
    storage = getStorage("1", true);
    Assert.assertNull(storage.retrieve(addr));
    Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
    storage.flush();
    match(storage.retrieve(new byte[8]), "cb");
    match(storage.retrieve(address), "cb");
    Assert.assertNotNull(storage.store(new Slice(b, 0, b.length)));
}
Also used : Slice(com.datatorrent.netlet.util.Slice) Test(org.junit.Test)

Example 43 with Slice

use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.

the class HDFSStorageTest method testRandomSequence.

@Test
public void testRandomSequence() throws IOException {
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    byte[] bytes = new byte[] { 48, 48, 48, 51, 101, 100, 55, 56, 55, 49, 53, 99, 52, 101, 55, 50, 97, 52, 48, 49, 51, 99, 97, 54, 102, 57, 55, 53, 57, 100, 49, 99, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 48, 48, 58, 52, 54, 1, 52, 50, 49, 50, 51, 1, 50, 1, 49, 53, 49, 49, 52, 50, 54, 53, 1, 49, 53, 49, 49, 57, 51, 53, 49, 1, 49, 53, 49, 50, 57, 56, 50, 52, 1, 49, 53, 49, 50, 49, 55, 48, 55, 1, 49, 48, 48, 55, 55, 51, 57, 51, 1, 49, 57, 49, 52, 55, 50, 53, 52, 54, 49, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
    storage.store(new Slice(bytes, 0, bytes.length));
    storage.flush();
    storage.clean(new byte[] { -109, 0, 0, 0, 0, 0, 0, 0 });
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 2555; i++) {
        byte[] bytes1 = new byte[] { 48, 48, 48, 55, 56, 51, 98, 101, 50, 54, 50, 98, 52, 102, 50, 54, 56, 97, 55, 56, 102, 48, 54, 54, 50, 49, 49, 54, 99, 98, 101, 99, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 48, 48, 58, 53, 49, 1, 49, 49, 49, 49, 54, 51, 57, 1, 50, 1, 49, 53, 49, 48, 57, 57, 56, 51, 1, 49, 53, 49, 49, 49, 55, 48, 52, 1, 49, 53, 49, 50, 49, 51, 55, 49, 1, 49, 53, 49, 49, 52, 56, 51, 49, 1, 49, 48, 48, 55, 49, 57, 56, 49, 1, 49, 50, 48, 50, 55, 54, 49, 54, 56, 53, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
        storage.store(new Slice(bytes1, 0, bytes1.length));
        storage.flush();
    }
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 1297; i++) {
        storage.retrieveNext();
    }
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 1302; i++) {
        storage.retrieveNext();
    }
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 1317; i++) {
        storage.retrieveNext();
    }
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 2007; i++) {
        storage.retrieveNext();
    }
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 2556; i++) {
        storage.retrieveNext();
    }
    byte[] bytes1 = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
    storage.store(new Slice(bytes1, 0, bytes1.length));
    storage.flush();
    storage.retrieve(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 });
    for (int i = 0; i < 2062; i++) {
        storage.retrieveNext();
    }
}
Also used : Slice(com.datatorrent.netlet.util.Slice) Test(org.junit.Test)

Example 44 with Slice

use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.

the class HDFSStorageTest method testPartialFlushWithClean.

/**
 * This tests clean when the file doesn't roll over
 *
 * @throws Exception
 */
@Test
public void testPartialFlushWithClean() throws Exception {
    Assert.assertNull(storage.retrieve(new byte[8]));
    byte[] b = "ab".getBytes();
    byte[] address = storage.store(new Slice(b, 0, b.length));
    Assert.assertNotNull(address);
    storage.flush();
    storage.clean(address);
    b = "cb".getBytes();
    byte[] addr = storage.store(new Slice(b, 0, b.length));
    Assert.assertNull(storage.retrieve(addr));
    Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
    storage.flush();
    match(storage.retrieve(new byte[8]), "cb");
    match(storage.retrieve(address), "cb");
    Assert.assertNotNull(storage.store(new Slice(b, 0, b.length)));
}
Also used : Slice(com.datatorrent.netlet.util.Slice) Test(org.junit.Test)

Example 45 with Slice

use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.

the class HDFSStorageTest method testPartialFlushWithCleanAndRollOver.

/**
 * This test covers following use case 1. Some data is stored to make sure that there is no roll over 2. File is
 * flushed but the file is not closed 3. The data is cleaned till the last returned address 4. Some more data is
 * stored. The data stored is enough to make the file roll over 5. Retrieve is called for the last returned address
 * and it return nulls as the data is not flushed 6. Some more data is stored again but the address is returned null
 * because of previous retrieve call 7. The data is flushed to make sure that the data is committed. 8. Now the data
 * is retrieved from the starting and data returned matches the data stored
 *
 * @throws Exception
 */
@Test
public void testPartialFlushWithCleanAndRollOver() throws Exception {
    Assert.assertNull(storage.retrieve(new byte[8]));
    byte[] b = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
    byte[] b_org = new byte[] { 48, 48, 48, 48, 98, 48, 52, 54, 49, 57, 55, 51, 52, 97, 53, 101, 56, 56, 97, 55, 98, 53, 52, 51, 98, 50, 102, 51, 49, 97, 97, 54, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 1, 50, 48, 49, 51, 45, 49, 49, 45, 48, 55, 32, 48, 48, 58, 51, 49, 58, 52, 56, 1, 49, 48, 53, 53, 57, 52, 50, 1, 50, 1, 49, 53, 49, 49, 54, 49, 56, 52, 1, 49, 53, 49, 49, 57, 50, 49, 49, 1, 49, 53, 49, 50, 57, 54, 54, 53, 1, 49, 53, 49, 50, 49, 53, 52, 56, 1, 49, 48, 48, 56, 48, 51, 52, 50, 1, 55, 56, 56, 50, 54, 53, 52, 56, 1, 49, 1, 48, 1, 48, 46, 48, 1, 48, 46, 48, 1, 48, 46, 48 };
    byte[] address = storage.store(new Slice(b, 0, b.length));
    Assert.assertNotNull(address);
    storage.flush();
    storage.clean(address);
    byte[] addr = null;
    for (int i = 0; i < 5; i++) {
        b[0] = (byte) (b[0] + 1);
        addr = storage.store(new Slice(b, 0, b.length));
    }
    Assert.assertNull(storage.retrieve(addr));
    for (int i = 0; i < 5; i++) {
        b[0] = (byte) (b[0] + 1);
        Assert.assertNull(storage.store(new Slice(b, 0, b.length)));
    }
    storage.flush();
    b_org[0] = (byte) (b_org[0] + 1);
    match(storage.retrieve(new byte[8]), new String(b_org));
    match(storage.retrieve(address), new String(b_org));
    b_org[0] = (byte) (b_org[0] + 1);
    match(storage.retrieveNext(), new String(b_org));
    b_org[0] = (byte) (b_org[0] + 1);
    match(storage.retrieveNext(), new String(b_org));
}
Also used : Slice(com.datatorrent.netlet.util.Slice) Test(org.junit.Test)

Aggregations

Slice (com.datatorrent.netlet.util.Slice)114 Test (org.junit.Test)65 ByteArrayOutputStream (java.io.ByteArrayOutputStream)10 Input (com.esotericsoftware.kryo.io.Input)9 IOException (java.io.IOException)6 Map (java.util.Map)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 BufferSlice (org.apache.apex.malhar.lib.utils.serde.BufferSlice)4 Path (org.apache.hadoop.fs.Path)4 ObjectMapperString (com.datatorrent.common.util.ObjectMapperString)3 SerializationBuffer (org.apache.apex.malhar.lib.utils.serde.SerializationBuffer)3 StringSerde (org.apache.apex.malhar.lib.utils.serde.StringSerde)3 Attribute (com.datatorrent.api.Attribute)2 OperatorContext (com.datatorrent.api.Context.OperatorContext)2 Output (com.esotericsoftware.kryo.io.Output)2 RandomAccessFile (java.io.RandomAccessFile)2 Serializable (java.io.Serializable)2 HashSet (java.util.HashSet)2