Search in sources :

Example 1 with Flow

use of net.ripe.hadoop.pcap.packet.Flow in project hadoop-pcap by RIPE-NCC.

the class PcapReader method nextPacket.

private Packet nextPacket() {
    pcapPacketHeader = new byte[PACKET_HEADER_SIZE];
    if (!readBytes(pcapPacketHeader))
        return null;
    Packet packet = createPacket();
    long packetTimestamp = PcapReaderUtil.convertInt(pcapPacketHeader, TIMESTAMP_OFFSET, reverseHeaderByteOrder);
    packet.put(Packet.TIMESTAMP, packetTimestamp);
    long packetTimestampMicros = PcapReaderUtil.convertInt(pcapPacketHeader, TIMESTAMP_MICROS_OFFSET, reverseHeaderByteOrder);
    packet.put(Packet.TIMESTAMP_MICROS, packetTimestampMicros);
    BigDecimal packetTimestampUsec = new BigDecimal(packetTimestamp + packetTimestampMicros / 1000000.0, tsUsecMc);
    packet.put(Packet.TIMESTAMP_USEC, packetTimestampUsec.doubleValue());
    long packetSize = PcapReaderUtil.convertInt(pcapPacketHeader, CAP_LEN_OFFSET, reverseHeaderByteOrder);
    packetData = new byte[(int) packetSize];
    if (!readBytes(packetData))
        return packet;
    int ipStart = findIPStart(packetData);
    if (ipStart == -1)
        return packet;
    int ipProtocolHeaderVersion = getInternetProtocolHeaderVersion(packetData, ipStart);
    packet.put(Packet.IP_VERSION, ipProtocolHeaderVersion);
    if (ipProtocolHeaderVersion == 4 || ipProtocolHeaderVersion == 6) {
        int ipHeaderLen = getInternetProtocolHeaderLength(packetData, ipProtocolHeaderVersion, ipStart);
        int totalLength = 0;
        if (ipProtocolHeaderVersion == 4) {
            buildInternetProtocolV4Packet(packet, packetData, ipStart);
            totalLength = PcapReaderUtil.convertShort(packetData, ipStart + IP_TOTAL_LEN_OFFSET);
        } else if (ipProtocolHeaderVersion == 6) {
            buildInternetProtocolV6Packet(packet, packetData, ipStart);
            ipHeaderLen += buildInternetProtocolV6ExtensionHeaderFragment(packet, packetData, ipStart);
            int payloadLength = PcapReaderUtil.convertShort(packetData, ipStart + IPV6_PAYLOAD_LEN_OFFSET);
            totalLength = payloadLength + IPV6_HEADER_SIZE;
        }
        packet.put(Packet.IP_HEADER_LENGTH, ipHeaderLen);
        if ((Boolean) packet.get(Packet.FRAGMENT)) {
            if (isReassembleDatagram()) {
                Datagram datagram = packet.getDatagram();
                Long fragmentOffset = (Long) packet.get(Packet.FRAGMENT_OFFSET);
                byte[] fragmentPacketData = Arrays.copyOfRange(packetData, ipStart + ipHeaderLen, ipStart + totalLength);
                DatagramPayload payload = new DatagramPayload(fragmentOffset, fragmentPacketData);
                datagrams.put(datagram, payload);
                if ((Boolean) packet.get(Packet.LAST_FRAGMENT)) {
                    Collection<DatagramPayload> datagramPayloads = datagrams.removeAll(datagram);
                    if (datagramPayloads != null && datagramPayloads.size() > 0) {
                        // Start re-fragmented packet with header from current packet
                        byte[] reassmbledPacketData = Arrays.copyOfRange(packetData, 0, ipStart + ipHeaderLen);
                        int reassmbledTotalLength = ipHeaderLen;
                        int reassembledFragments = 0;
                        DatagramPayload prev = null;
                        for (DatagramPayload datagramPayload : datagramPayloads) {
                            if (prev == null && datagramPayload.offset != 0) {
                                LOG.warn("Datagram chain not starting at 0. Probably received packets out-of-order. Can't reassemble this packet.");
                                break;
                            }
                            if (prev != null && !datagramPayload.linked(prev)) {
                                LOG.warn("Broken datagram chain between " + datagramPayload + " and " + prev + ". Can't reassemble this packet.");
                                break;
                            }
                            reassmbledPacketData = Bytes.concat(reassmbledPacketData, datagramPayload.payload);
                            reassmbledTotalLength += datagramPayload.payload.length;
                            reassembledFragments++;
                            prev = datagramPayload;
                        }
                        if (reassembledFragments == datagramPayloads.size()) {
                            packetData = reassmbledPacketData;
                            totalLength = reassmbledTotalLength;
                            packet.put(Packet.REASSEMBLED_DATAGRAM_FRAGMENTS, reassembledFragments);
                        }
                    }
                } else {
                    packet.put(Packet.PROTOCOL, PROTOCOL_FRAGMENT);
                }
            } else {
                packet.put(Packet.PROTOCOL, PROTOCOL_FRAGMENT);
            }
        }
        String protocol = (String) packet.get(Packet.PROTOCOL);
        int payloadDataStart = ipStart + ipHeaderLen;
        int payloadLength = totalLength - ipHeaderLen;
        byte[] packetPayload = readPayload(packetData, payloadDataStart, payloadLength);
        if (PROTOCOL_UDP == protocol || PROTOCOL_TCP == protocol) {
            packetPayload = buildTcpAndUdpPacket(packet, packetData, ipProtocolHeaderVersion, ipStart, ipHeaderLen, totalLength);
            if (isReassembleTcp() && PROTOCOL_TCP == protocol) {
                Flow flow = packet.getFlow();
                if (packetPayload.length > 0) {
                    Long seq = (Long) packet.get(Packet.TCP_SEQ);
                    SequencePayload sequencePayload = new SequencePayload(seq, packetPayload);
                    flows.put(flow, sequencePayload);
                }
                if ((Boolean) packet.get(Packet.TCP_FLAG_FIN) || (isPush() && (Boolean) packet.get(Packet.TCP_FLAG_PSH))) {
                    Collection<SequencePayload> fragments = flows.removeAll(flow);
                    if (fragments != null && fragments.size() > 0) {
                        packet.put(Packet.REASSEMBLED_TCP_FRAGMENTS, fragments.size());
                        packetPayload = new byte[0];
                        SequencePayload prev = null;
                        for (SequencePayload seqPayload : fragments) {
                            if (prev != null && !seqPayload.linked(prev)) {
                                LOG.warn("Broken sequence chain between " + seqPayload + " and " + prev + ". Returning empty payload.");
                                packetPayload = new byte[0];
                                break;
                            }
                            packetPayload = Bytes.concat(packetPayload, seqPayload.payload);
                            prev = seqPayload;
                        }
                    }
                }
            }
        }
        packet.put(Packet.LEN, packetPayload.length);
        processPacketPayload(packet, packetPayload);
    }
    return packet;
}
Also used : Packet(net.ripe.hadoop.pcap.packet.Packet) Datagram(net.ripe.hadoop.pcap.packet.Datagram) BigDecimal(java.math.BigDecimal) Flow(net.ripe.hadoop.pcap.packet.Flow)

Aggregations

BigDecimal (java.math.BigDecimal)1 Datagram (net.ripe.hadoop.pcap.packet.Datagram)1 Flow (net.ripe.hadoop.pcap.packet.Flow)1 Packet (net.ripe.hadoop.pcap.packet.Packet)1