Search in sources :

Example 1 with Packet

use of net.ripe.hadoop.pcap.packet.Packet in project hadoop-pcap by RIPE-NCC.

the class PcapReaderRunner method run.

public void run(String pcapReaderClass, String path) throws IOException {
    Joiner.MapJoiner mapJoiner = Joiner.on('\n').withKeyValueSeparator(": ").useForNull("null");
    InputStream is = null;
    try {
        long packets = 0;
        System.out.println("=== START ===");
        is = new FileInputStream(path);
        if (path.endsWith(".gz") || path.endsWith(".gzip"))
            is = new GZIPInputStream(is);
        PcapReader reader = initPcapReader(pcapReaderClass, new DataInputStream(is));
        for (Packet packet : reader) {
            System.out.println("--- Packet ---");
            System.out.println(mapJoiner.join(packet));
            System.out.println();
            packets++;
        }
        System.out.println("=== STOP ===");
        System.out.println("Packets: " + packets);
    } finally {
        if (is != null)
            is.close();
    }
}
Also used : GZIPInputStream(java.util.zip.GZIPInputStream) Packet(net.ripe.hadoop.pcap.packet.Packet) Joiner(com.google.common.base.Joiner) PcapReader(net.ripe.hadoop.pcap.PcapReader) DataInputStream(java.io.DataInputStream) GZIPInputStream(java.util.zip.GZIPInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream)

Example 2 with Packet

use of net.ripe.hadoop.pcap.packet.Packet in project hadoop-pcap by RIPE-NCC.

the class PcapReader method nextPacket.

private Packet nextPacket() {
    pcapPacketHeader = new byte[PACKET_HEADER_SIZE];
    if (!readBytes(pcapPacketHeader))
        return null;
    Packet packet = createPacket();
    long packetTimestamp = PcapReaderUtil.convertInt(pcapPacketHeader, TIMESTAMP_OFFSET, reverseHeaderByteOrder);
    packet.put(Packet.TIMESTAMP, packetTimestamp);
    long packetTimestampMicros = PcapReaderUtil.convertInt(pcapPacketHeader, TIMESTAMP_MICROS_OFFSET, reverseHeaderByteOrder);
    packet.put(Packet.TIMESTAMP_MICROS, packetTimestampMicros);
    BigDecimal packetTimestampUsec = new BigDecimal(packetTimestamp + packetTimestampMicros / 1000000.0, tsUsecMc);
    packet.put(Packet.TIMESTAMP_USEC, packetTimestampUsec.doubleValue());
    long packetSize = PcapReaderUtil.convertInt(pcapPacketHeader, CAP_LEN_OFFSET, reverseHeaderByteOrder);
    packetData = new byte[(int) packetSize];
    if (!readBytes(packetData))
        return packet;
    int ipStart = findIPStart(packetData);
    if (ipStart == -1)
        return packet;
    int ipProtocolHeaderVersion = getInternetProtocolHeaderVersion(packetData, ipStart);
    packet.put(Packet.IP_VERSION, ipProtocolHeaderVersion);
    if (ipProtocolHeaderVersion == 4 || ipProtocolHeaderVersion == 6) {
        int ipHeaderLen = getInternetProtocolHeaderLength(packetData, ipProtocolHeaderVersion, ipStart);
        int totalLength = 0;
        if (ipProtocolHeaderVersion == 4) {
            buildInternetProtocolV4Packet(packet, packetData, ipStart);
            totalLength = PcapReaderUtil.convertShort(packetData, ipStart + IP_TOTAL_LEN_OFFSET);
        } else if (ipProtocolHeaderVersion == 6) {
            buildInternetProtocolV6Packet(packet, packetData, ipStart);
            ipHeaderLen += buildInternetProtocolV6ExtensionHeaderFragment(packet, packetData, ipStart);
            int payloadLength = PcapReaderUtil.convertShort(packetData, ipStart + IPV6_PAYLOAD_LEN_OFFSET);
            totalLength = payloadLength + IPV6_HEADER_SIZE;
        }
        packet.put(Packet.IP_HEADER_LENGTH, ipHeaderLen);
        if ((Boolean) packet.get(Packet.FRAGMENT)) {
            if (isReassembleDatagram()) {
                Datagram datagram = packet.getDatagram();
                Long fragmentOffset = (Long) packet.get(Packet.FRAGMENT_OFFSET);
                byte[] fragmentPacketData = Arrays.copyOfRange(packetData, ipStart + ipHeaderLen, ipStart + totalLength);
                DatagramPayload payload = new DatagramPayload(fragmentOffset, fragmentPacketData);
                datagrams.put(datagram, payload);
                if ((Boolean) packet.get(Packet.LAST_FRAGMENT)) {
                    Collection<DatagramPayload> datagramPayloads = datagrams.removeAll(datagram);
                    if (datagramPayloads != null && datagramPayloads.size() > 0) {
                        // Start re-fragmented packet with header from current packet
                        byte[] reassmbledPacketData = Arrays.copyOfRange(packetData, 0, ipStart + ipHeaderLen);
                        int reassmbledTotalLength = ipHeaderLen;
                        int reassembledFragments = 0;
                        DatagramPayload prev = null;
                        for (DatagramPayload datagramPayload : datagramPayloads) {
                            if (prev == null && datagramPayload.offset != 0) {
                                LOG.warn("Datagram chain not starting at 0. Probably received packets out-of-order. Can't reassemble this packet.");
                                break;
                            }
                            if (prev != null && !datagramPayload.linked(prev)) {
                                LOG.warn("Broken datagram chain between " + datagramPayload + " and " + prev + ". Can't reassemble this packet.");
                                break;
                            }
                            reassmbledPacketData = Bytes.concat(reassmbledPacketData, datagramPayload.payload);
                            reassmbledTotalLength += datagramPayload.payload.length;
                            reassembledFragments++;
                            prev = datagramPayload;
                        }
                        if (reassembledFragments == datagramPayloads.size()) {
                            packetData = reassmbledPacketData;
                            totalLength = reassmbledTotalLength;
                            packet.put(Packet.REASSEMBLED_DATAGRAM_FRAGMENTS, reassembledFragments);
                        }
                    }
                } else {
                    packet.put(Packet.PROTOCOL, PROTOCOL_FRAGMENT);
                }
            } else {
                packet.put(Packet.PROTOCOL, PROTOCOL_FRAGMENT);
            }
        }
        String protocol = (String) packet.get(Packet.PROTOCOL);
        int payloadDataStart = ipStart + ipHeaderLen;
        int payloadLength = totalLength - ipHeaderLen;
        byte[] packetPayload = readPayload(packetData, payloadDataStart, payloadLength);
        if (PROTOCOL_UDP == protocol || PROTOCOL_TCP == protocol) {
            packetPayload = buildTcpAndUdpPacket(packet, packetData, ipProtocolHeaderVersion, ipStart, ipHeaderLen, totalLength);
            if (isReassembleTcp() && PROTOCOL_TCP == protocol) {
                Flow flow = packet.getFlow();
                if (packetPayload.length > 0) {
                    Long seq = (Long) packet.get(Packet.TCP_SEQ);
                    SequencePayload sequencePayload = new SequencePayload(seq, packetPayload);
                    flows.put(flow, sequencePayload);
                }
                if ((Boolean) packet.get(Packet.TCP_FLAG_FIN) || (isPush() && (Boolean) packet.get(Packet.TCP_FLAG_PSH))) {
                    Collection<SequencePayload> fragments = flows.removeAll(flow);
                    if (fragments != null && fragments.size() > 0) {
                        packet.put(Packet.REASSEMBLED_TCP_FRAGMENTS, fragments.size());
                        packetPayload = new byte[0];
                        SequencePayload prev = null;
                        for (SequencePayload seqPayload : fragments) {
                            if (prev != null && !seqPayload.linked(prev)) {
                                LOG.warn("Broken sequence chain between " + seqPayload + " and " + prev + ". Returning empty payload.");
                                packetPayload = new byte[0];
                                break;
                            }
                            packetPayload = Bytes.concat(packetPayload, seqPayload.payload);
                            prev = seqPayload;
                        }
                    }
                }
            }
        }
        packet.put(Packet.LEN, packetPayload.length);
        processPacketPayload(packet, packetPayload);
    }
    return packet;
}
Also used : Packet(net.ripe.hadoop.pcap.packet.Packet) Datagram(net.ripe.hadoop.pcap.packet.Datagram) BigDecimal(java.math.BigDecimal) Flow(net.ripe.hadoop.pcap.packet.Flow)

Example 3 with Packet

use of net.ripe.hadoop.pcap.packet.Packet in project hadoop-pcap by RIPE-NCC.

the class PcapReaderTest method assembled.

@Test
public void assembled() throws IOException {
    for (String file : new String[] { "src/test/resources/tcp-stream-v4.pcap", "src/test/resources/tcp-stream-v6.pcap" }) {
        PcapReader reader = new PcapReader(new DataInputStream(new FileInputStream(file))) {

            @Override
            protected void processPacketPayload(Packet packet, byte[] payload) {
                Integer fragments = (Integer) packet.get(Packet.REASSEMBLED_TCP_FRAGMENTS);
                if (fragments != null) {
                    assertTrue(2 == fragments);
                    assertEquals("part1\npart2\n", new String(payload));
                }
            }

            @Override
            protected boolean isReassembleTcp() {
                return true;
            }

            @Override
            protected boolean isPush() {
                return false;
            }
        };
        assertEquals(10, Iterables.size(reader));
    }
}
Also used : Packet(net.ripe.hadoop.pcap.packet.Packet) PcapReader(net.ripe.hadoop.pcap.PcapReader) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) Test(org.junit.Test)

Example 4 with Packet

use of net.ripe.hadoop.pcap.packet.Packet in project hadoop-pcap by RIPE-NCC.

the class PcapDeserializer method deserialize.

@Override
public Object deserialize(Writable w) throws SerDeException {
    ObjectWritable obj = (ObjectWritable) w;
    Packet packet = (Packet) obj.get();
    for (int i = 0; i < numColumns; i++) {
        String columName = columnNames.get(i);
        Object value = packet.get(columName);
        row.set(i, value);
    }
    return row;
}
Also used : Packet(net.ripe.hadoop.pcap.packet.Packet) ObjectWritable(org.apache.hadoop.io.ObjectWritable)

Example 5 with Packet

use of net.ripe.hadoop.pcap.packet.Packet in project hadoop-pcap by RIPE-NCC.

the class PcapReaderTest method assembledWithPush.

@Test
public void assembledWithPush() throws IOException {
    for (String file : new String[] { "src/test/resources/tcp-stream-v4.pcap", "src/test/resources/tcp-stream-v6.pcap" }) {
        PcapReader reader = new PcapReader(new DataInputStream(new FileInputStream(file))) {

            public int counter = 1;

            @Override
            protected void processPacketPayload(Packet packet, byte[] payload) {
                Integer fragments = (Integer) packet.get(Packet.REASSEMBLED_TCP_FRAGMENTS);
                if (fragments != null) {
                    assertTrue(1 == fragments);
                    switch(counter) {
                        case 1:
                            assertEquals("part1\n", new String(payload));
                            break;
                        case 2:
                            assertEquals("part2\n", new String(payload));
                            break;
                    }
                    counter++;
                }
            }

            @Override
            protected boolean isReassembleTcp() {
                return true;
            }

            @Override
            protected boolean isPush() {
                return true;
            }
        };
        assertEquals(10, Iterables.size(reader));
    }
}
Also used : Packet(net.ripe.hadoop.pcap.packet.Packet) PcapReader(net.ripe.hadoop.pcap.PcapReader) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) Test(org.junit.Test)

Aggregations

Packet (net.ripe.hadoop.pcap.packet.Packet)5 DataInputStream (java.io.DataInputStream)3 FileInputStream (java.io.FileInputStream)3 PcapReader (net.ripe.hadoop.pcap.PcapReader)3 Test (org.junit.Test)2 Joiner (com.google.common.base.Joiner)1 InputStream (java.io.InputStream)1 BigDecimal (java.math.BigDecimal)1 GZIPInputStream (java.util.zip.GZIPInputStream)1 Datagram (net.ripe.hadoop.pcap.packet.Datagram)1 Flow (net.ripe.hadoop.pcap.packet.Flow)1 ObjectWritable (org.apache.hadoop.io.ObjectWritable)1