From a37ac5347c41c0a86d1ff2176ec082591b8e6117 Mon Sep 17 00:00:00 2001 From: Louis Date: Fri, 23 Mar 2018 18:44:34 +0100 Subject: [PATCH] Init --- decoders/decoder.go | 230 ++++++++ decoders/netflow/ipfix.go | 989 ++++++++++++++++++++++++++++++++ decoders/netflow/netflow.go | 599 +++++++++++++++++++ decoders/netflow/nfv9.go | 315 ++++++++++ decoders/netflow/packet.go | 153 +++++ decoders/sflow/datastructure.go | 103 ++++ decoders/sflow/packet.go | 69 +++ decoders/sflow/sflow.go | 326 +++++++++++ decoders/utils/utils.go | 16 + goflow.go | 296 ++++++++++ pb/flow.pb.go | 390 +++++++++++++ pb/flow.proto | 78 +++ producer/kafka.go | 66 +++ producer/producer.go | 49 ++ producer/producer_nf.go | 785 +++++++++++++++++++++++++ producer/producer_sf.go | 329 +++++++++++ 16 files changed, 4793 insertions(+) create mode 100644 decoders/decoder.go create mode 100644 decoders/netflow/ipfix.go create mode 100644 decoders/netflow/netflow.go create mode 100644 decoders/netflow/nfv9.go create mode 100644 decoders/netflow/packet.go create mode 100644 decoders/sflow/datastructure.go create mode 100644 decoders/sflow/packet.go create mode 100644 decoders/sflow/sflow.go create mode 100644 decoders/utils/utils.go create mode 100644 goflow.go create mode 100644 pb/flow.pb.go create mode 100644 pb/flow.proto create mode 100644 producer/kafka.go create mode 100644 producer/producer.go create mode 100644 producer/producer_nf.go create mode 100644 producer/producer_sf.go diff --git a/decoders/decoder.go b/decoders/decoder.go new file mode 100644 index 0000000..cc84e82 --- /dev/null +++ b/decoders/decoder.go @@ -0,0 +1,230 @@ +package decoder + +import ( + log "github.com/Sirupsen/logrus" + "github.com/prometheus/client_golang/prometheus" + "strconv" + "sync" + "time" +) + +type Message interface{} +type MessageDecoded interface{} +type DecoderConfig interface{} +type CallbackArgs interface{} + +type DecoderFunc func(Message, DecoderConfig) (MessageDecoded, error) +type DoneCallback func(interface{}, interface{}, interface{}) (bool, error) +type ErrorCallback func(interface{}, error, interface{}, interface{}) (bool, error) + +//type DoneCallback func(MessageDecoded, DoneCallbackConfig) (bool, error) + +var ( + MetricsRegistered bool + MetricsRegistrationLock = &sync.Mutex{} + + DecoderStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_decoder_count", + Help: "Decoder processed count.", + }, + []string{"worker", "name"}, + ) + DecoderErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_decoder_error_count", + Help: "Decoder processed error count.", + }, + []string{"worker", "name"}, + ) + DecoderTime = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_summary_decoding_time_us", + Help: "Decoding time summary.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"name"}, + ) + DecoderProcessTime = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_summary_processing_time_us", + Help: "Processing time summary.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"name"}, + ) +) + +// Worker structure +type Worker struct { + Id int + DecoderParams DecoderParams + WorkerPool chan chan Message + Name string + InMsg chan Message + Quit chan bool +} + +// Create a worker and add it to the pool. +func CreateWorker(workerPool chan chan Message, decoderParams DecoderParams, id int, name string) Worker { + return Worker{ + Id: id, + DecoderParams: decoderParams, + WorkerPool: workerPool, + Name: name, + InMsg: make(chan Message), + Quit: make(chan bool), + } +} + +// Start the worker. Launches a goroutine to process NFv9 messages. +// The worker will add its input channel of NFv9 messages to decode to the pool. +func (w Worker) Start() { + go func() { + log.Debugf("Worker %v started", w.Id) + for { + w.WorkerPool <- w.InMsg + select { + case <-w.Quit: + break + case msg := <-w.InMsg: + //log.Printf("Worker %v: Received msg\n", w.Id) + timeTrackStart := time.Now() + msgdec, err := w.DecoderParams.DecoderFunc(msg, w.DecoderParams.DecoderConfig) + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": w.Name, + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + if err != nil { + //fmt.Printf("Worker %v: error: %v\n", w.Id, err) + if w.DecoderParams.ErrorCallback != nil { + w.DecoderParams.ErrorCallback(msgdec, err, w.DecoderParams.CallbackArgs, w.DecoderParams.DecoderConfig) + DecoderErrors.With( + prometheus.Labels{ + "worker": strconv.Itoa(w.Id), + "name": w.Name, + }). + Inc() + } + } else { + if w.DecoderParams.DoneCallback != nil { + timeTrackStart = time.Now() + success, errcb := w.DecoderParams.DoneCallback(msgdec, w.DecoderParams.CallbackArgs, w.DecoderParams.DecoderConfig) + timeTrackStop = time.Now() + DecoderProcessTime.With( + prometheus.Labels{ + "name": w.Name, + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + if success != true { + log.Errorf("Worker %v: callback problem\n", w.Id) + DecoderErrors.With( + prometheus.Labels{ + "worker": strconv.Itoa(w.Id), + "name": w.Name, + }). + Inc() + } + + if errcb != nil { + log.Errorf("Worker %v: callback error %v\n", w.Id, errcb) + DecoderErrors.With( + prometheus.Labels{ + "worker": strconv.Itoa(w.Id), + "name": w.Name, + }). + Inc() + } + } + } + DecoderStats.With( + prometheus.Labels{ + "worker": strconv.Itoa(w.Id), + "name": w.Name, + }). + Inc() + //w.OutDec<-msgdec + } + } + log.Debugf("Worker %v done", w.Id) + }() +} + +// Stop the worker. +func (w Worker) Stop() { + log.Debugf("Stopping worker %v", w.Id) + w.Quit <- true +} + +// Processor structure +type Processor struct { + workerpool chan chan Message + workerlist []Worker + DecoderParams DecoderParams + Name string +} + +// Decoder structure. Define the function to call and the config specific to the type of packets. +type DecoderParams struct { + DecoderFunc DecoderFunc + DecoderConfig DecoderConfig + DoneCallback DoneCallback + ErrorCallback ErrorCallback + CallbackArgs CallbackArgs +} + +func RegisterMetrics() { + MetricsRegistrationLock.Lock() + if MetricsRegistered { + return + } + prometheus.MustRegister(DecoderStats) + prometheus.MustRegister(DecoderErrors) + prometheus.MustRegister(DecoderTime) + prometheus.MustRegister(DecoderProcessTime) + MetricsRegistered = true + MetricsRegistrationLock.Unlock() +} + +// Create a message processor which is going to create all the workers and set-up the pool. +func CreateProcessor(numWorkers int, decoderParams DecoderParams, name string) Processor { + RegisterMetrics() + + log.Infof("Creating %v message processor with %v workers", name, numWorkers) + processor := Processor{ + workerpool: make(chan chan Message), + workerlist: make([]Worker, numWorkers), + DecoderParams: decoderParams, + Name: name, + } + for i := 0; i < numWorkers; i++ { + worker := CreateWorker(processor.workerpool, decoderParams, i, name) + processor.workerlist[i] = worker + } + return processor +} + +// Start message processor +func (p Processor) Start() { + log.WithFields(log.Fields{ + "Name": p.Name}).Debug("Starting workers") + for _, worker := range p.workerlist { + worker.Start() + } +} + +func (p Processor) Stop() { + for _, worker := range p.workerlist { + worker.Stop() + } +} + +// Send a message to be decoded to the pool. +func (p Processor) ProcessMessage(msg Message) { + sendChannel := <-p.workerpool + sendChannel <- msg +} diff --git a/decoders/netflow/ipfix.go b/decoders/netflow/ipfix.go new file mode 100644 index 0000000..115932c --- /dev/null +++ b/decoders/netflow/ipfix.go @@ -0,0 +1,989 @@ +package netflow + +import ( + "fmt" + "time" +) + +const ( + IPFIX_FIELD_Reserved = 0 + IPFIX_FIELD_octetDeltaCount = 1 + IPFIX_FIELD_packetDeltaCount = 2 + IPFIX_FIELD_deltaFlowCount = 3 + IPFIX_FIELD_protocolIdentifier = 4 + IPFIX_FIELD_ipClassOfService = 5 + IPFIX_FIELD_tcpControlBits = 6 + IPFIX_FIELD_sourceTransportPort = 7 + IPFIX_FIELD_sourceIPv4Address = 8 + IPFIX_FIELD_sourceIPv4PrefixLength = 9 + IPFIX_FIELD_ingressInterface = 10 + IPFIX_FIELD_destinationTransportPort = 11 + IPFIX_FIELD_destinationIPv4Address = 12 + IPFIX_FIELD_destinationIPv4PrefixLength = 13 + IPFIX_FIELD_egressInterface = 14 + IPFIX_FIELD_ipNextHopIPv4Address = 15 + IPFIX_FIELD_bgpSourceAsNumber = 16 + IPFIX_FIELD_bgpDestinationAsNumber = 17 + IPFIX_FIELD_bgpNextHopIPv4Address = 18 + IPFIX_FIELD_postMCastPacketDeltaCount = 19 + IPFIX_FIELD_postMCastOctetDeltaCount = 20 + IPFIX_FIELD_flowEndSysUpTime = 21 + IPFIX_FIELD_flowStartSysUpTime = 22 + IPFIX_FIELD_postOctetDeltaCount = 23 + IPFIX_FIELD_postPacketDeltaCount = 24 + IPFIX_FIELD_minimumIpTotalLength = 25 + IPFIX_FIELD_maximumIpTotalLength = 26 + IPFIX_FIELD_sourceIPv6Address = 27 + IPFIX_FIELD_destinationIPv6Address = 28 + IPFIX_FIELD_sourceIPv6PrefixLength = 29 + IPFIX_FIELD_destinationIPv6PrefixLength = 30 + IPFIX_FIELD_flowLabelIPv6 = 31 + IPFIX_FIELD_icmpTypeCodeIPv4 = 32 + IPFIX_FIELD_igmpType = 33 + IPFIX_FIELD_samplingInterval = 34 + IPFIX_FIELD_samplingAlgorithm = 35 + IPFIX_FIELD_flowActiveTimeout = 36 + IPFIX_FIELD_flowIdleTimeout = 37 + IPFIX_FIELD_engineType = 38 + IPFIX_FIELD_engineId = 39 + IPFIX_FIELD_exportedOctetTotalCount = 40 + IPFIX_FIELD_exportedMessageTotalCount = 41 + IPFIX_FIELD_exportedFlowRecordTotalCount = 42 + IPFIX_FIELD_ipv4RouterSc = 43 + IPFIX_FIELD_sourceIPv4Prefix = 44 + IPFIX_FIELD_destinationIPv4Prefix = 45 + IPFIX_FIELD_mplsTopLabelType = 46 + IPFIX_FIELD_mplsTopLabelIPv4Address = 47 + IPFIX_FIELD_samplerId = 48 + IPFIX_FIELD_samplerMode = 49 + IPFIX_FIELD_samplerRandomInterval = 50 + IPFIX_FIELD_classId = 51 + IPFIX_FIELD_minimumTTL = 52 + IPFIX_FIELD_maximumTTL = 53 + IPFIX_FIELD_fragmentIdentification = 54 + IPFIX_FIELD_postIpClassOfService = 55 + IPFIX_FIELD_sourceMacAddress = 56 + IPFIX_FIELD_postDestinationMacAddress = 57 + IPFIX_FIELD_vlanId = 58 + IPFIX_FIELD_postVlanId = 59 + IPFIX_FIELD_ipVersion = 60 + IPFIX_FIELD_flowDirection = 61 + IPFIX_FIELD_ipNextHopIPv6Address = 62 + IPFIX_FIELD_bgpNextHopIPv6Address = 63 + IPFIX_FIELD_ipv6ExtensionHeaders = 64 + IPFIX_FIELD_mplsTopLabelStackSection = 70 + IPFIX_FIELD_mplsLabelStackSection2 = 71 + IPFIX_FIELD_mplsLabelStackSection3 = 72 + IPFIX_FIELD_mplsLabelStackSection4 = 73 + IPFIX_FIELD_mplsLabelStackSection5 = 74 + IPFIX_FIELD_mplsLabelStackSection6 = 75 + IPFIX_FIELD_mplsLabelStackSection7 = 76 + IPFIX_FIELD_mplsLabelStackSection8 = 77 + IPFIX_FIELD_mplsLabelStackSection9 = 78 + IPFIX_FIELD_mplsLabelStackSection10 = 79 + IPFIX_FIELD_destinationMacAddress = 80 + IPFIX_FIELD_postSourceMacAddress = 81 + IPFIX_FIELD_interfaceName = 82 + IPFIX_FIELD_interfaceDescription = 83 + IPFIX_FIELD_samplerName = 84 + IPFIX_FIELD_octetTotalCount = 85 + IPFIX_FIELD_packetTotalCount = 86 + IPFIX_FIELD_flagsAndSamplerId = 87 + IPFIX_FIELD_fragmentOffset = 88 + IPFIX_FIELD_forwardingStatus = 89 + IPFIX_FIELD_mplsVpnRouteDistinguisher = 90 + IPFIX_FIELD_mplsTopLabelPrefixLength = 91 + IPFIX_FIELD_srcTrafficIndex = 92 + IPFIX_FIELD_dstTrafficIndex = 93 + IPFIX_FIELD_applicationDescription = 94 + IPFIX_FIELD_applicationId = 95 + IPFIX_FIELD_applicationName = 96 + IPFIX_FIELD_postIpDiffServCodePoint = 98 + IPFIX_FIELD_multicastReplicationFactor = 99 + IPFIX_FIELD_className = 100 + IPFIX_FIELD_classificationEngineId = 101 + IPFIX_FIELD_layer2packetSectionOffset = 102 + IPFIX_FIELD_layer2packetSectionSize = 103 + IPFIX_FIELD_layer2packetSectionData = 104 + IPFIX_FIELD_bgpNextAdjacentAsNumber = 128 + IPFIX_FIELD_bgpPrevAdjacentAsNumber = 129 + IPFIX_FIELD_exporterIPv4Address = 130 + IPFIX_FIELD_exporterIPv6Address = 131 + IPFIX_FIELD_droppedOctetDeltaCount = 132 + IPFIX_FIELD_droppedPacketDeltaCount = 133 + IPFIX_FIELD_droppedOctetTotalCount = 134 + IPFIX_FIELD_droppedPacketTotalCount = 135 + IPFIX_FIELD_flowEndReason = 136 + IPFIX_FIELD_commonPropertiesId = 137 + IPFIX_FIELD_observationPointId = 138 + IPFIX_FIELD_icmpTypeCodeIPv6 = 139 + IPFIX_FIELD_mplsTopLabelIPv6Address = 140 + IPFIX_FIELD_lineCardId = 141 + IPFIX_FIELD_portId = 142 + IPFIX_FIELD_meteringProcessId = 143 + IPFIX_FIELD_exportingProcessId = 144 + IPFIX_FIELD_templateId = 145 + IPFIX_FIELD_wlanChannelId = 146 + IPFIX_FIELD_wlanSSID = 147 + IPFIX_FIELD_flowId = 148 + IPFIX_FIELD_observationDomainId = 149 + IPFIX_FIELD_flowStartSeconds = 150 + IPFIX_FIELD_flowEndSeconds = 151 + IPFIX_FIELD_flowStartMilliseconds = 152 + IPFIX_FIELD_flowEndMilliseconds = 153 + IPFIX_FIELD_flowStartMicroseconds = 154 + IPFIX_FIELD_flowEndMicroseconds = 155 + IPFIX_FIELD_flowStartNanoseconds = 156 + IPFIX_FIELD_flowEndNanoseconds = 157 + IPFIX_FIELD_flowStartDeltaMicroseconds = 158 + IPFIX_FIELD_flowEndDeltaMicroseconds = 159 + IPFIX_FIELD_systemInitTimeMilliseconds = 160 + IPFIX_FIELD_flowDurationMilliseconds = 161 + IPFIX_FIELD_flowDurationMicroseconds = 162 + IPFIX_FIELD_observedFlowTotalCount = 163 + IPFIX_FIELD_ignoredPacketTotalCount = 164 + IPFIX_FIELD_ignoredOctetTotalCount = 165 + IPFIX_FIELD_notSentFlowTotalCount = 166 + IPFIX_FIELD_notSentPacketTotalCount = 167 + IPFIX_FIELD_notSentOctetTotalCount = 168 + IPFIX_FIELD_destinationIPv6Prefix = 169 + IPFIX_FIELD_sourceIPv6Prefix = 170 + IPFIX_FIELD_postOctetTotalCount = 171 + IPFIX_FIELD_postPacketTotalCount = 172 + IPFIX_FIELD_flowKeyIndicator = 173 + IPFIX_FIELD_postMCastPacketTotalCount = 174 + IPFIX_FIELD_postMCastOctetTotalCount = 175 + IPFIX_FIELD_icmpTypeIPv4 = 176 + IPFIX_FIELD_icmpCodeIPv4 = 177 + IPFIX_FIELD_icmpTypeIPv6 = 178 + IPFIX_FIELD_icmpCodeIPv6 = 179 + IPFIX_FIELD_udpSourcePort = 180 + IPFIX_FIELD_udpDestinationPort = 181 + IPFIX_FIELD_tcpSourcePort = 182 + IPFIX_FIELD_tcpDestinationPort = 183 + IPFIX_FIELD_tcpSequenceNumber = 184 + IPFIX_FIELD_tcpAcknowledgementNumber = 185 + IPFIX_FIELD_tcpWindowSize = 186 + IPFIX_FIELD_tcpUrgentPointer = 187 + IPFIX_FIELD_tcpHeaderLength = 188 + IPFIX_FIELD_ipHeaderLength = 189 + IPFIX_FIELD_totalLengthIPv4 = 190 + IPFIX_FIELD_payloadLengthIPv6 = 191 + IPFIX_FIELD_ipTTL = 192 + IPFIX_FIELD_nextHeaderIPv6 = 193 + IPFIX_FIELD_mplsPayloadLength = 194 + IPFIX_FIELD_ipDiffServCodePoint = 195 + IPFIX_FIELD_ipPrecedence = 196 + IPFIX_FIELD_fragmentFlags = 197 + IPFIX_FIELD_octetDeltaSumOfSquares = 198 + IPFIX_FIELD_octetTotalSumOfSquares = 199 + IPFIX_FIELD_mplsTopLabelTTL = 200 + IPFIX_FIELD_mplsLabelStackLength = 201 + IPFIX_FIELD_mplsLabelStackDepth = 202 + IPFIX_FIELD_mplsTopLabelExp = 203 + IPFIX_FIELD_ipPayloadLength = 204 + IPFIX_FIELD_udpMessageLength = 205 + IPFIX_FIELD_isMulticast = 206 + IPFIX_FIELD_ipv4IHL = 207 + IPFIX_FIELD_ipv4Options = 208 + IPFIX_FIELD_tcpOptions = 209 + IPFIX_FIELD_paddingOctets = 210 + IPFIX_FIELD_collectorIPv4Address = 211 + IPFIX_FIELD_collectorIPv6Address = 212 + IPFIX_FIELD_exportInterface = 213 + IPFIX_FIELD_exportProtocolVersion = 214 + IPFIX_FIELD_exportTransportProtocol = 215 + IPFIX_FIELD_collectorTransportPort = 216 + IPFIX_FIELD_exporterTransportPort = 217 + IPFIX_FIELD_tcpSynTotalCount = 218 + IPFIX_FIELD_tcpFinTotalCount = 219 + IPFIX_FIELD_tcpRstTotalCount = 220 + IPFIX_FIELD_tcpPshTotalCount = 221 + IPFIX_FIELD_tcpAckTotalCount = 222 + IPFIX_FIELD_tcpUrgTotalCount = 223 + IPFIX_FIELD_ipTotalLength = 224 + IPFIX_FIELD_postNATSourceIPv4Address = 225 + IPFIX_FIELD_postNATDestinationIPv4Address = 226 + IPFIX_FIELD_postNAPTSourceTransportPort = 227 + IPFIX_FIELD_postNAPTDestinationTransportPort = 228 + IPFIX_FIELD_natOriginatingAddressRealm = 229 + IPFIX_FIELD_natEvent = 230 + IPFIX_FIELD_initiatorOctets = 231 + IPFIX_FIELD_responderOctets = 232 + IPFIX_FIELD_firewallEvent = 233 + IPFIX_FIELD_ingressVRFID = 234 + IPFIX_FIELD_egressVRFID = 235 + IPFIX_FIELD_VRFname = 236 + IPFIX_FIELD_postMplsTopLabelExp = 237 + IPFIX_FIELD_tcpWindowScale = 238 + IPFIX_FIELD_biflowDirection = 239 + IPFIX_FIELD_ethernetHeaderLength = 240 + IPFIX_FIELD_ethernetPayloadLength = 241 + IPFIX_FIELD_ethernetTotalLength = 242 + IPFIX_FIELD_dot1qVlanId = 243 + IPFIX_FIELD_dot1qPriority = 244 + IPFIX_FIELD_dot1qCustomerVlanId = 245 + IPFIX_FIELD_dot1qCustomerPriority = 246 + IPFIX_FIELD_metroEvcId = 247 + IPFIX_FIELD_metroEvcType = 248 + IPFIX_FIELD_pseudoWireId = 249 + IPFIX_FIELD_pseudoWireType = 250 + IPFIX_FIELD_pseudoWireControlWord = 251 + IPFIX_FIELD_ingressPhysicalInterface = 252 + IPFIX_FIELD_egressPhysicalInterface = 253 + IPFIX_FIELD_postDot1qVlanId = 254 + IPFIX_FIELD_postDot1qCustomerVlanId = 255 + IPFIX_FIELD_ethernetType = 256 + IPFIX_FIELD_postIpPrecedence = 257 + IPFIX_FIELD_collectionTimeMilliseconds = 258 + IPFIX_FIELD_exportSctpStreamId = 259 + IPFIX_FIELD_maxExportSeconds = 260 + IPFIX_FIELD_maxFlowEndSeconds = 261 + IPFIX_FIELD_messageMD5Checksum = 262 + IPFIX_FIELD_messageScope = 263 + IPFIX_FIELD_minExportSeconds = 264 + IPFIX_FIELD_minFlowStartSeconds = 265 + IPFIX_FIELD_opaqueOctets = 266 + IPFIX_FIELD_sessionScope = 267 + IPFIX_FIELD_maxFlowEndMicroseconds = 268 + IPFIX_FIELD_maxFlowEndMilliseconds = 269 + IPFIX_FIELD_maxFlowEndNanoseconds = 270 + IPFIX_FIELD_minFlowStartMicroseconds = 271 + IPFIX_FIELD_minFlowStartMilliseconds = 272 + IPFIX_FIELD_minFlowStartNanoseconds = 273 + IPFIX_FIELD_collectorCertificate = 274 + IPFIX_FIELD_exporterCertificate = 275 + IPFIX_FIELD_dataRecordsReliability = 276 + IPFIX_FIELD_observationPointType = 277 + IPFIX_FIELD_newConnectionDeltaCount = 278 + IPFIX_FIELD_connectionSumDurationSeconds = 279 + IPFIX_FIELD_connectionTransactionId = 280 + IPFIX_FIELD_postNATSourceIPv6Address = 281 + IPFIX_FIELD_postNATDestinationIPv6Address = 282 + IPFIX_FIELD_natPoolId = 283 + IPFIX_FIELD_natPoolName = 284 + IPFIX_FIELD_anonymizationFlags = 285 + IPFIX_FIELD_anonymizationTechnique = 286 + IPFIX_FIELD_informationElementIndex = 287 + IPFIX_FIELD_p2pTechnology = 288 + IPFIX_FIELD_tunnelTechnology = 289 + IPFIX_FIELD_encryptedTechnology = 290 + IPFIX_FIELD_basicList = 291 + IPFIX_FIELD_subTemplateList = 292 + IPFIX_FIELD_subTemplateMultiList = 293 + IPFIX_FIELD_bgpValidityState = 294 + IPFIX_FIELD_IPSecSPI = 295 + IPFIX_FIELD_greKey = 296 + IPFIX_FIELD_natType = 297 + IPFIX_FIELD_initiatorPackets = 298 + IPFIX_FIELD_responderPackets = 299 + IPFIX_FIELD_observationDomainName = 300 + IPFIX_FIELD_selectionSequenceId = 301 + IPFIX_FIELD_selectorId = 302 + IPFIX_FIELD_informationElementId = 303 + IPFIX_FIELD_selectorAlgorithm = 304 + IPFIX_FIELD_samplingPacketInterval = 305 + IPFIX_FIELD_samplingPacketSpace = 306 + IPFIX_FIELD_samplingTimeInterval = 307 + IPFIX_FIELD_samplingTimeSpace = 308 + IPFIX_FIELD_samplingSize = 309 + IPFIX_FIELD_samplingPopulation = 310 + IPFIX_FIELD_samplingProbability = 311 + IPFIX_FIELD_dataLinkFrameSize = 312 + IPFIX_FIELD_ipHeaderPacketSection = 313 + IPFIX_FIELD_ipPayloadPacketSection = 314 + IPFIX_FIELD_dataLinkFrameSection = 315 + IPFIX_FIELD_mplsLabelStackSection = 316 + IPFIX_FIELD_mplsPayloadPacketSection = 317 + IPFIX_FIELD_selectorIdTotalPktsObserved = 318 + IPFIX_FIELD_selectorIdTotalPktsSelected = 319 + IPFIX_FIELD_absoluteError = 320 + IPFIX_FIELD_relativeError = 321 + IPFIX_FIELD_observationTimeSeconds = 322 + IPFIX_FIELD_observationTimeMilliseconds = 323 + IPFIX_FIELD_observationTimeMicroseconds = 324 + IPFIX_FIELD_observationTimeNanoseconds = 325 + IPFIX_FIELD_digestHashValue = 326 + IPFIX_FIELD_hashIPPayloadOffset = 327 + IPFIX_FIELD_hashIPPayloadSize = 328 + IPFIX_FIELD_hashOutputRangeMin = 329 + IPFIX_FIELD_hashOutputRangeMax = 330 + IPFIX_FIELD_hashSelectedRangeMin = 331 + IPFIX_FIELD_hashSelectedRangeMax = 332 + IPFIX_FIELD_hashDigestOutput = 333 + IPFIX_FIELD_hashInitialiserValue = 334 + IPFIX_FIELD_selectorName = 335 + IPFIX_FIELD_upperCILimit = 336 + IPFIX_FIELD_lowerCILimit = 337 + IPFIX_FIELD_confidenceLevel = 338 + IPFIX_FIELD_informationElementDataType = 339 + IPFIX_FIELD_informationElementDescription = 340 + IPFIX_FIELD_informationElementName = 341 + IPFIX_FIELD_informationElementRangeBegin = 342 + IPFIX_FIELD_informationElementRangeEnd = 343 + IPFIX_FIELD_informationElementSemantics = 344 + IPFIX_FIELD_informationElementUnits = 345 + IPFIX_FIELD_privateEnterpriseNumber = 346 + IPFIX_FIELD_virtualStationInterfaceId = 347 + IPFIX_FIELD_virtualStationInterfaceName = 348 + IPFIX_FIELD_virtualStationUUID = 349 + IPFIX_FIELD_virtualStationName = 350 + IPFIX_FIELD_layer2SegmentId = 351 + IPFIX_FIELD_layer2OctetDeltaCount = 352 + IPFIX_FIELD_layer2OctetTotalCount = 353 + IPFIX_FIELD_ingressUnicastPacketTotalCount = 354 + IPFIX_FIELD_ingressMulticastPacketTotalCount = 355 + IPFIX_FIELD_ingressBroadcastPacketTotalCount = 356 + IPFIX_FIELD_egressUnicastPacketTotalCount = 357 + IPFIX_FIELD_egressBroadcastPacketTotalCount = 358 + IPFIX_FIELD_monitoringIntervalStartMilliSeconds = 359 + IPFIX_FIELD_monitoringIntervalEndMilliSeconds = 360 + IPFIX_FIELD_portRangeStart = 361 + IPFIX_FIELD_portRangeEnd = 362 + IPFIX_FIELD_portRangeStepSize = 363 + IPFIX_FIELD_portRangeNumPorts = 364 + IPFIX_FIELD_staMacAddress = 365 + IPFIX_FIELD_staIPv4Address = 366 + IPFIX_FIELD_wtpMacAddress = 367 + IPFIX_FIELD_ingressInterfaceType = 368 + IPFIX_FIELD_egressInterfaceType = 369 + IPFIX_FIELD_rtpSequenceNumber = 370 + IPFIX_FIELD_userName = 371 + IPFIX_FIELD_applicationCategoryName = 372 + IPFIX_FIELD_applicationSubCategoryName = 373 + IPFIX_FIELD_applicationGroupName = 374 + IPFIX_FIELD_originalFlowsPresent = 375 + IPFIX_FIELD_originalFlowsInitiated = 376 + IPFIX_FIELD_originalFlowsCompleted = 377 + IPFIX_FIELD_distinctCountOfSourceIPAddress = 378 + IPFIX_FIELD_distinctCountOfDestinationIPAddress = 379 + IPFIX_FIELD_distinctCountOfSourceIPv4Address = 380 + IPFIX_FIELD_distinctCountOfDestinationIPv4Address = 381 + IPFIX_FIELD_distinctCountOfSourceIPv6Address = 382 + IPFIX_FIELD_distinctCountOfDestinationIPv6Address = 383 + IPFIX_FIELD_valueDistributionMethod = 384 + IPFIX_FIELD_rfc3550JitterMilliseconds = 385 + IPFIX_FIELD_rfc3550JitterMicroseconds = 386 + IPFIX_FIELD_rfc3550JitterNanoseconds = 387 + IPFIX_FIELD_dot1qDEI = 388 + IPFIX_FIELD_dot1qCustomerDEI = 389 + IPFIX_FIELD_flowSelectorAlgorithm = 390 + IPFIX_FIELD_flowSelectedOctetDeltaCount = 391 + IPFIX_FIELD_flowSelectedPacketDeltaCount = 392 + IPFIX_FIELD_flowSelectedFlowDeltaCount = 393 + IPFIX_FIELD_selectorIDTotalFlowsObserved = 394 + IPFIX_FIELD_selectorIDTotalFlowsSelected = 395 + IPFIX_FIELD_samplingFlowInterval = 396 + IPFIX_FIELD_samplingFlowSpacing = 397 + IPFIX_FIELD_flowSamplingTimeInterval = 398 + IPFIX_FIELD_flowSamplingTimeSpacing = 399 + IPFIX_FIELD_hashFlowDomain = 400 + IPFIX_FIELD_transportOctetDeltaCount = 401 + IPFIX_FIELD_transportPacketDeltaCount = 402 + IPFIX_FIELD_originalExporterIPv4Address = 403 + IPFIX_FIELD_originalExporterIPv6Address = 404 + IPFIX_FIELD_originalObservationDomainId = 405 + IPFIX_FIELD_intermediateProcessId = 406 + IPFIX_FIELD_ignoredDataRecordTotalCount = 407 + IPFIX_FIELD_dataLinkFrameType = 408 + IPFIX_FIELD_sectionOffset = 409 + IPFIX_FIELD_sectionExportedOctets = 410 + IPFIX_FIELD_dot1qServiceInstanceTag = 411 + IPFIX_FIELD_dot1qServiceInstanceId = 412 + IPFIX_FIELD_dot1qServiceInstancePriority = 413 + IPFIX_FIELD_dot1qCustomerSourceMacAddress = 414 + IPFIX_FIELD_dot1qCustomerDestinationMacAddress = 415 + IPFIX_FIELD_postLayer2OctetDeltaCount = 417 + IPFIX_FIELD_postMCastLayer2OctetDeltaCount = 418 + IPFIX_FIELD_postLayer2OctetTotalCount = 420 + IPFIX_FIELD_postMCastLayer2OctetTotalCount = 421 + IPFIX_FIELD_minimumLayer2TotalLength = 422 + IPFIX_FIELD_maximumLayer2TotalLength = 423 + IPFIX_FIELD_droppedLayer2OctetDeltaCount = 424 + IPFIX_FIELD_droppedLayer2OctetTotalCount = 425 + IPFIX_FIELD_ignoredLayer2OctetTotalCount = 426 + IPFIX_FIELD_notSentLayer2OctetTotalCount = 427 + IPFIX_FIELD_layer2OctetDeltaSumOfSquares = 428 + IPFIX_FIELD_layer2OctetTotalSumOfSquares = 429 + IPFIX_FIELD_layer2FrameDeltaCount = 430 + IPFIX_FIELD_layer2FrameTotalCount = 431 + IPFIX_FIELD_pseudoWireDestinationIPv4Address = 432 + IPFIX_FIELD_ignoredLayer2FrameTotalCount = 433 + IPFIX_FIELD_mibObjectValueInteger = 434 + IPFIX_FIELD_mibObjectValueOctetString = 435 + IPFIX_FIELD_mibObjectValueOID = 436 + IPFIX_FIELD_mibObjectValueBits = 437 + IPFIX_FIELD_mibObjectValueIPAddress = 438 + IPFIX_FIELD_mibObjectValueCounter = 439 + IPFIX_FIELD_mibObjectValueGauge = 440 + IPFIX_FIELD_mibObjectValueTimeTicks = 441 + IPFIX_FIELD_mibObjectValueUnsigned = 442 + IPFIX_FIELD_mibObjectValueTable = 443 + IPFIX_FIELD_mibObjectValueRow = 444 + IPFIX_FIELD_mibObjectIdentifier = 445 + IPFIX_FIELD_mibSubIdentifier = 446 + IPFIX_FIELD_mibIndexIndicator = 447 + IPFIX_FIELD_mibCaptureTimeSemantics = 448 + IPFIX_FIELD_mibContextEngineID = 449 + IPFIX_FIELD_mibContextName = 450 + IPFIX_FIELD_mibObjectName = 451 + IPFIX_FIELD_mibObjectDescription = 452 + IPFIX_FIELD_mibObjectSyntax = 453 + IPFIX_FIELD_mibModuleName = 454 + IPFIX_FIELD_mobileIMSI = 455 + IPFIX_FIELD_mobileMSISDN = 456 + IPFIX_FIELD_httpStatusCode = 457 + IPFIX_FIELD_sourceTransportPortsLimit = 458 + IPFIX_FIELD_httpRequestMethod = 459 + IPFIX_FIELD_httpRequestHost = 460 + IPFIX_FIELD_httpRequestTarget = 461 + IPFIX_FIELD_httpMessageVersion = 462 + IPFIX_FIELD_natInstanceID = 463 + IPFIX_FIELD_internalAddressRealm = 464 + IPFIX_FIELD_externalAddressRealm = 465 + IPFIX_FIELD_natQuotaExceededEvent = 466 + IPFIX_FIELD_natThresholdEvent = 467 +) + +type IPFIXPacket struct { + Version uint16 + Length uint16 + ExportTime uint32 + SequenceNumber uint32 + ObservationDomainId uint32 + FlowSets []interface{} +} + +type IPFIXOptionsTemplateFlowSet struct { + FlowSetHeader + Records []IPFIXOptionsTemplateRecord +} + +type IPFIXOptionsTemplateRecord struct { + TemplateId uint16 + FieldCount uint16 + ScopeFieldCount uint16 + Options []Field + Scopes []Field +} + +func IPFIXTypeToString(typeId uint16) string { + + nameList := map[uint16]string{ + 0: "Reserved", + 1: "octetDeltaCount", + 2: "packetDeltaCount", + 3: "deltaFlowCount", + 4: "protocolIdentifier", + 5: "ipClassOfService", + 6: "tcpControlBits", + 7: "sourceTransportPort", + 8: "sourceIPv4Address", + 9: "sourceIPv4PrefixLength", + 10: "ingressInterface", + 11: "destinationTransportPort", + 12: "destinationIPv4Address", + 13: "destinationIPv4PrefixLength", + 14: "egressInterface", + 15: "ipNextHopIPv4Address", + 16: "bgpSourceAsNumber", + 17: "bgpDestinationAsNumber", + 18: "bgpNextHopIPv4Address", + 19: "postMCastPacketDeltaCount", + 20: "postMCastOctetDeltaCount", + 21: "flowEndSysUpTime", + 22: "flowStartSysUpTime", + 23: "postOctetDeltaCount", + 24: "postPacketDeltaCount", + 25: "minimumIpTotalLength", + 26: "maximumIpTotalLength", + 27: "sourceIPv6Address", + 28: "destinationIPv6Address", + 29: "sourceIPv6PrefixLength", + 30: "destinationIPv6PrefixLength", + 31: "flowLabelIPv6", + 32: "icmpTypeCodeIPv4", + 33: "igmpType", + 34: "samplingInterval", + 35: "samplingAlgorithm", + 36: "flowActiveTimeout", + 37: "flowIdleTimeout", + 38: "engineType", + 39: "engineId", + 40: "exportedOctetTotalCount", + 41: "exportedMessageTotalCount", + 42: "exportedFlowRecordTotalCount", + 43: "ipv4RouterSc", + 44: "sourceIPv4Prefix", + 45: "destinationIPv4Prefix", + 46: "mplsTopLabelType", + 47: "mplsTopLabelIPv4Address", + 48: "samplerId", + 49: "samplerMode", + 50: "samplerRandomInterval", + 51: "classId", + 52: "minimumTTL", + 53: "maximumTTL", + 54: "fragmentIdentification", + 55: "postIpClassOfService", + 56: "sourceMacAddress", + 57: "postDestinationMacAddress", + 58: "vlanId", + 59: "postVlanId", + 60: "ipVersion", + 61: "flowDirection", + 62: "ipNextHopIPv6Address", + 63: "bgpNextHopIPv6Address", + 64: "ipv6ExtensionHeaders", + 65: "Assigned for NetFlow v9 compatibility", + 66: "Assigned for NetFlow v9 compatibility", + 67: "Assigned for NetFlow v9 compatibility", + 68: "Assigned for NetFlow v9 compatibility", + 69: "Assigned for NetFlow v9 compatibility", + 70: "mplsTopLabelStackSection", + 71: "mplsLabelStackSection2", + 72: "mplsLabelStackSection3", + 73: "mplsLabelStackSection4", + 74: "mplsLabelStackSection5", + 75: "mplsLabelStackSection6", + 76: "mplsLabelStackSection7", + 77: "mplsLabelStackSection8", + 78: "mplsLabelStackSection9", + 79: "mplsLabelStackSection10", + 80: "destinationMacAddress", + 81: "postSourceMacAddress", + 82: "interfaceName", + 83: "interfaceDescription", + 84: "samplerName", + 85: "octetTotalCount", + 86: "packetTotalCount", + 87: "flagsAndSamplerId", + 88: "fragmentOffset", + 89: "forwardingStatus", + 90: "mplsVpnRouteDistinguisher", + 91: "mplsTopLabelPrefixLength", + 92: "srcTrafficIndex", + 93: "dstTrafficIndex", + 94: "applicationDescription", + 95: "applicationId", + 96: "applicationName", + 97: "Assigned for NetFlow v9 compatibility", + 98: "postIpDiffServCodePoint", + 99: "multicastReplicationFactor", + 100: "className", + 101: "classificationEngineId", + 102: "layer2packetSectionOffset", + 103: "layer2packetSectionSize", + 104: "layer2packetSectionData", + 128: "bgpNextAdjacentAsNumber", + 129: "bgpPrevAdjacentAsNumber", + 130: "exporterIPv4Address", + 131: "exporterIPv6Address", + 132: "droppedOctetDeltaCount", + 133: "droppedPacketDeltaCount", + 134: "droppedOctetTotalCount", + 135: "droppedPacketTotalCount", + 136: "flowEndReason", + 137: "commonPropertiesId", + 138: "observationPointId", + 139: "icmpTypeCodeIPv6", + 140: "mplsTopLabelIPv6Address", + 141: "lineCardId", + 142: "portId", + 143: "meteringProcessId", + 144: "exportingProcessId", + 145: "templateId", + 146: "wlanChannelId", + 147: "wlanSSID", + 148: "flowId", + 149: "observationDomainId", + 150: "flowStartSeconds", + 151: "flowEndSeconds", + 152: "flowStartMilliseconds", + 153: "flowEndMilliseconds", + 154: "flowStartMicroseconds", + 155: "flowEndMicroseconds", + 156: "flowStartNanoseconds", + 157: "flowEndNanoseconds", + 158: "flowStartDeltaMicroseconds", + 159: "flowEndDeltaMicroseconds", + 160: "systemInitTimeMilliseconds", + 161: "flowDurationMilliseconds", + 162: "flowDurationMicroseconds", + 163: "observedFlowTotalCount", + 164: "ignoredPacketTotalCount", + 165: "ignoredOctetTotalCount", + 166: "notSentFlowTotalCount", + 167: "notSentPacketTotalCount", + 168: "notSentOctetTotalCount", + 169: "destinationIPv6Prefix", + 170: "sourceIPv6Prefix", + 171: "postOctetTotalCount", + 172: "postPacketTotalCount", + 173: "flowKeyIndicator", + 174: "postMCastPacketTotalCount", + 175: "postMCastOctetTotalCount", + 176: "icmpTypeIPv4", + 177: "icmpCodeIPv4", + 178: "icmpTypeIPv6", + 179: "icmpCodeIPv6", + 180: "udpSourcePort", + 181: "udpDestinationPort", + 182: "tcpSourcePort", + 183: "tcpDestinationPort", + 184: "tcpSequenceNumber", + 185: "tcpAcknowledgementNumber", + 186: "tcpWindowSize", + 187: "tcpUrgentPointer", + 188: "tcpHeaderLength", + 189: "ipHeaderLength", + 190: "totalLengthIPv4", + 191: "payloadLengthIPv6", + 192: "ipTTL", + 193: "nextHeaderIPv6", + 194: "mplsPayloadLength", + 195: "ipDiffServCodePoint", + 196: "ipPrecedence", + 197: "fragmentFlags", + 198: "octetDeltaSumOfSquares", + 199: "octetTotalSumOfSquares", + 200: "mplsTopLabelTTL", + 201: "mplsLabelStackLength", + 202: "mplsLabelStackDepth", + 203: "mplsTopLabelExp", + 204: "ipPayloadLength", + 205: "udpMessageLength", + 206: "isMulticast", + 207: "ipv4IHL", + 208: "ipv4Options", + 209: "tcpOptions", + 210: "paddingOctets", + 211: "collectorIPv4Address", + 212: "collectorIPv6Address", + 213: "exportInterface", + 214: "exportProtocolVersion", + 215: "exportTransportProtocol", + 216: "collectorTransportPort", + 217: "exporterTransportPort", + 218: "tcpSynTotalCount", + 219: "tcpFinTotalCount", + 220: "tcpRstTotalCount", + 221: "tcpPshTotalCount", + 222: "tcpAckTotalCount", + 223: "tcpUrgTotalCount", + 224: "ipTotalLength", + 225: "postNATSourceIPv4Address", + 226: "postNATDestinationIPv4Address", + 227: "postNAPTSourceTransportPort", + 228: "postNAPTDestinationTransportPort", + 229: "natOriginatingAddressRealm", + 230: "natEvent", + 231: "initiatorOctets", + 232: "responderOctets", + 233: "firewallEvent", + 234: "ingressVRFID", + 235: "egressVRFID", + 236: "VRFname", + 237: "postMplsTopLabelExp", + 238: "tcpWindowScale", + 239: "biflowDirection", + 240: "ethernetHeaderLength", + 241: "ethernetPayloadLength", + 242: "ethernetTotalLength", + 243: "dot1qVlanId", + 244: "dot1qPriority", + 245: "dot1qCustomerVlanId", + 246: "dot1qCustomerPriority", + 247: "metroEvcId", + 248: "metroEvcType", + 249: "pseudoWireId", + 250: "pseudoWireType", + 251: "pseudoWireControlWord", + 252: "ingressPhysicalInterface", + 253: "egressPhysicalInterface", + 254: "postDot1qVlanId", + 255: "postDot1qCustomerVlanId", + 256: "ethernetType", + 257: "postIpPrecedence", + 258: "collectionTimeMilliseconds", + 259: "exportSctpStreamId", + 260: "maxExportSeconds", + 261: "maxFlowEndSeconds", + 262: "messageMD5Checksum", + 263: "messageScope", + 264: "minExportSeconds", + 265: "minFlowStartSeconds", + 266: "opaqueOctets", + 267: "sessionScope", + 268: "maxFlowEndMicroseconds", + 269: "maxFlowEndMilliseconds", + 270: "maxFlowEndNanoseconds", + 271: "minFlowStartMicroseconds", + 272: "minFlowStartMilliseconds", + 273: "minFlowStartNanoseconds", + 274: "collectorCertificate", + 275: "exporterCertificate", + 276: "dataRecordsReliability", + 277: "observationPointType", + 278: "newConnectionDeltaCount", + 279: "connectionSumDurationSeconds", + 280: "connectionTransactionId", + 281: "postNATSourceIPv6Address", + 282: "postNATDestinationIPv6Address", + 283: "natPoolId", + 284: "natPoolName", + 285: "anonymizationFlags", + 286: "anonymizationTechnique", + 287: "informationElementIndex", + 288: "p2pTechnology", + 289: "tunnelTechnology", + 290: "encryptedTechnology", + 291: "basicList", + 292: "subTemplateList", + 293: "subTemplateMultiList", + 294: "bgpValidityState", + 295: "IPSecSPI", + 296: "greKey", + 297: "natType", + 298: "initiatorPackets", + 299: "responderPackets", + 300: "observationDomainName", + 301: "selectionSequenceId", + 302: "selectorId", + 303: "informationElementId", + 304: "selectorAlgorithm", + 305: "samplingPacketInterval", + 306: "samplingPacketSpace", + 307: "samplingTimeInterval", + 308: "samplingTimeSpace", + 309: "samplingSize", + 310: "samplingPopulation", + 311: "samplingProbability", + 312: "dataLinkFrameSize", + 313: "ipHeaderPacketSection", + 314: "ipPayloadPacketSection", + 315: "dataLinkFrameSection", + 316: "mplsLabelStackSection", + 317: "mplsPayloadPacketSection", + 318: "selectorIdTotalPktsObserved", + 319: "selectorIdTotalPktsSelected", + 320: "absoluteError", + 321: "relativeError", + 322: "observationTimeSeconds", + 323: "observationTimeMilliseconds", + 324: "observationTimeMicroseconds", + 325: "observationTimeNanoseconds", + 326: "digestHashValue", + 327: "hashIPPayloadOffset", + 328: "hashIPPayloadSize", + 329: "hashOutputRangeMin", + 330: "hashOutputRangeMax", + 331: "hashSelectedRangeMin", + 332: "hashSelectedRangeMax", + 333: "hashDigestOutput", + 334: "hashInitialiserValue", + 335: "selectorName", + 336: "upperCILimit", + 337: "lowerCILimit", + 338: "confidenceLevel", + 339: "informationElementDataType", + 340: "informationElementDescription", + 341: "informationElementName", + 342: "informationElementRangeBegin", + 343: "informationElementRangeEnd", + 344: "informationElementSemantics", + 345: "informationElementUnits", + 346: "privateEnterpriseNumber", + 347: "virtualStationInterfaceId", + 348: "virtualStationInterfaceName", + 349: "virtualStationUUID", + 350: "virtualStationName", + 351: "layer2SegmentId", + 352: "layer2OctetDeltaCount", + 353: "layer2OctetTotalCount", + 354: "ingressUnicastPacketTotalCount", + 355: "ingressMulticastPacketTotalCount", + 356: "ingressBroadcastPacketTotalCount", + 357: "egressUnicastPacketTotalCount", + 358: "egressBroadcastPacketTotalCount", + 359: "monitoringIntervalStartMilliSeconds", + 360: "monitoringIntervalEndMilliSeconds", + 361: "portRangeStart", + 362: "portRangeEnd", + 363: "portRangeStepSize", + 364: "portRangeNumPorts", + 365: "staMacAddress", + 366: "staIPv4Address", + 367: "wtpMacAddress", + 368: "ingressInterfaceType", + 369: "egressInterfaceType", + 370: "rtpSequenceNumber", + 371: "userName", + 372: "applicationCategoryName", + 373: "applicationSubCategoryName", + 374: "applicationGroupName", + 375: "originalFlowsPresent", + 376: "originalFlowsInitiated", + 377: "originalFlowsCompleted", + 378: "distinctCountOfSourceIPAddress", + 379: "distinctCountOfDestinationIPAddress", + 380: "distinctCountOfSourceIPv4Address", + 381: "distinctCountOfDestinationIPv4Address", + 382: "distinctCountOfSourceIPv6Address", + 383: "distinctCountOfDestinationIPv6Address", + 384: "valueDistributionMethod", + 385: "rfc3550JitterMilliseconds", + 386: "rfc3550JitterMicroseconds", + 387: "rfc3550JitterNanoseconds", + 388: "dot1qDEI", + 389: "dot1qCustomerDEI", + 390: "flowSelectorAlgorithm", + 391: "flowSelectedOctetDeltaCount", + 392: "flowSelectedPacketDeltaCount", + 393: "flowSelectedFlowDeltaCount", + 394: "selectorIDTotalFlowsObserved", + 395: "selectorIDTotalFlowsSelected", + 396: "samplingFlowInterval", + 397: "samplingFlowSpacing", + 398: "flowSamplingTimeInterval", + 399: "flowSamplingTimeSpacing", + 400: "hashFlowDomain", + 401: "transportOctetDeltaCount", + 402: "transportPacketDeltaCount", + 403: "originalExporterIPv4Address", + 404: "originalExporterIPv6Address", + 405: "originalObservationDomainId", + 406: "intermediateProcessId", + 407: "ignoredDataRecordTotalCount", + 408: "dataLinkFrameType", + 409: "sectionOffset", + 410: "sectionExportedOctets", + 411: "dot1qServiceInstanceTag", + 412: "dot1qServiceInstanceId", + 413: "dot1qServiceInstancePriority", + 414: "dot1qCustomerSourceMacAddress", + 415: "dot1qCustomerDestinationMacAddress", + 416: "", + 417: "postLayer2OctetDeltaCount", + 418: "postMCastLayer2OctetDeltaCount", + 419: "", + 420: "postLayer2OctetTotalCount", + 421: "postMCastLayer2OctetTotalCount", + 422: "minimumLayer2TotalLength", + 423: "maximumLayer2TotalLength", + 424: "droppedLayer2OctetDeltaCount", + 425: "droppedLayer2OctetTotalCount", + 426: "ignoredLayer2OctetTotalCount", + 427: "notSentLayer2OctetTotalCount", + 428: "layer2OctetDeltaSumOfSquares", + 429: "layer2OctetTotalSumOfSquares", + 430: "layer2FrameDeltaCount", + 431: "layer2FrameTotalCount", + 432: "pseudoWireDestinationIPv4Address", + 433: "ignoredLayer2FrameTotalCount", + 434: "mibObjectValueInteger", + 435: "mibObjectValueOctetString", + 436: "mibObjectValueOID", + 437: "mibObjectValueBits", + 438: "mibObjectValueIPAddress", + 439: "mibObjectValueCounter", + 440: "mibObjectValueGauge", + 441: "mibObjectValueTimeTicks", + 442: "mibObjectValueUnsigned", + 443: "mibObjectValueTable", + 444: "mibObjectValueRow", + 445: "mibObjectIdentifier", + 446: "mibSubIdentifier", + 447: "mibIndexIndicator", + 448: "mibCaptureTimeSemantics", + 449: "mibContextEngineID", + 450: "mibContextName", + 451: "mibObjectName", + 452: "mibObjectDescription", + 453: "mibObjectSyntax", + 454: "mibModuleName", + 455: "mobileIMSI", + 456: "mobileMSISDN", + 457: "httpStatusCode", + 458: "sourceTransportPortsLimit", + 459: "httpRequestMethod", + 460: "httpRequestHost", + 461: "httpRequestTarget", + 462: "httpMessageVersion", + 463: "natInstanceID", + 464: "internalAddressRealm", + 465: "externalAddressRealm", + 466: "natQuotaExceededEvent", + 467: "natThresholdEvent", + } + + if typeId >= 105 && typeId <= 127 { + return "Assigned for NetFlow v9 compatibility" + } else if typeId >= 468 && typeId <= 32767 { + return "Unassigned" + } else { + return nameList[typeId] + } +} + +func (flowSet IPFIXOptionsTemplateFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) + str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount) + str += fmt.Sprintf(" ScopeFieldCount: %v\n", record.ScopeFieldCount) + + str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes)) + + for k, field := range record.Scopes { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + + str += fmt.Sprintf(" Options (%v):\n", len(record.Options)) + + for k, field := range record.Options { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + + } + + return str +} + +func (p IPFIXPacket) String() string { + str := "Flow Packet\n" + str += "------------\n" + str += fmt.Sprintf(" Version: %v\n", p.Version) + str += fmt.Sprintf(" Length: %v\n", p.Length) + + exportTime := time.Unix(int64(p.ExportTime), 0) + str += fmt.Sprintf(" ExportTime: %v\n", exportTime.String()) + str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber) + str += fmt.Sprintf(" ObservationDomainId: %v\n", p.ObservationDomainId) + str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets)) + + for i, flowSet := range p.FlowSets { + switch flowSet := flowSet.(type) { + case TemplateFlowSet: + str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString) + case IPFIXOptionsTemplateFlowSet: + str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString) + case DataFlowSet: + str += fmt.Sprintf(" - DataFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString) + case OptionsDataFlowSet: + str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString, IPFIXTypeToString) + default: + str += fmt.Sprintf(" - (unknown type) %v:\n", i, flowSet) + } + } + + return str +} diff --git a/decoders/netflow/netflow.go b/decoders/netflow/netflow.go new file mode 100644 index 0000000..2cc2a58 --- /dev/null +++ b/decoders/netflow/netflow.go @@ -0,0 +1,599 @@ +package netflow + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "github.com/cloudflare/goflow/decoders" + "github.com/cloudflare/goflow/decoders/utils" + "net" + "strconv" + "sync" +) + +type BaseMessage struct { + Src net.IP + Port int + Payload []byte +} + +type BaseMessageDecoded struct { + Version uint16 + Src net.IP + Port int + Packet decoder.MessageDecoded +} + +type FlowBaseTemplateSet map[string]map[uint32]map[uint16][]Field +type FlowBaseOptionRecords struct { + Scopes []Field + Options []Field +} +type FlowBaseOptionsTemplateSet map[string]map[uint32]map[uint16]FlowBaseOptionRecords +type FlowBaseTemplateInfo map[string]map[uint32]map[uint16]bool + +type DecoderConfig struct { + NetFlowV9TemplateSet FlowBaseTemplateSet + NetFlowV9OptionsTemplateSet FlowBaseOptionsTemplateSet + NetFlowV9TemplateInfo FlowBaseTemplateInfo + + IPFIXTemplateSet FlowBaseTemplateSet + IPFIXOptionsTemplateSet FlowBaseOptionsTemplateSet + IPFIXTemplateInfo FlowBaseTemplateInfo + + NetFlowV9TemplateSetLock *sync.RWMutex + IPFIXTemplateSetLock *sync.RWMutex + + AddTemplates bool + UniqueTemplates bool +} + +func CreateConfig() DecoderConfig { + config := DecoderConfig{ + AddTemplates: true, + UniqueTemplates: true, + NetFlowV9TemplateSetLock: &sync.RWMutex{}, + IPFIXTemplateSetLock: &sync.RWMutex{}, + NetFlowV9TemplateSet: make(map[string]map[uint32]map[uint16][]Field), + NetFlowV9OptionsTemplateSet: make(map[string]map[uint32]map[uint16]FlowBaseOptionRecords), + NetFlowV9TemplateInfo: make(map[string]map[uint32]map[uint16]bool), + IPFIXTemplateSet: make(map[string]map[uint32]map[uint16][]Field), + IPFIXOptionsTemplateSet: make(map[string]map[uint32]map[uint16]FlowBaseOptionRecords), + IPFIXTemplateInfo: make(map[string]map[uint32]map[uint16]bool), + } + return config +} + +func DecodePacket(msg decoder.Message, config decoder.DecoderConfig) (decoder.MessageDecoded, error) { + baseMsg := msg.(BaseMessage) + payload := bytes.NewBuffer(baseMsg.Payload) + configdec := config.(DecoderConfig) + + key := baseMsg.Src.String() + ":" + strconv.Itoa(baseMsg.Port) + if configdec.UniqueTemplates { + key = "unique" + } + + version, msgDecoded, err := DecodeMessage(key, configdec.UniqueTemplates, payload, config) + + baseMsgDecoded := BaseMessageDecoded{ + Version: version, + Src: baseMsg.Src, + Port: baseMsg.Port, + Packet: msgDecoded, + } + + return baseMsgDecoded, err +} + +func DecodeNFv9OptionsTemplateSet(payload *bytes.Buffer) ([]NFv9OptionsTemplateRecord, error) { + records := make([]NFv9OptionsTemplateRecord, 0) + var err error + for payload.Len() >= 4 { + optsTemplateRecord := NFv9OptionsTemplateRecord{} + err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.ScopeLength, &optsTemplateRecord.OptionLength) + if err != nil { + break + } + + sizeScope := int(optsTemplateRecord.ScopeLength) / 4 + sizeOptions := int(optsTemplateRecord.OptionLength) / 4 + if sizeScope < 0 || sizeOptions < 0 { + return records, errors.New("Error decoding OptionsTemplateSet: negative length.") + } + + fields := make([]Field, sizeScope) + for i := 0; i < sizeScope; i++ { + field := Field{} + err = utils.BinaryDecoder(payload, &field) + fields[i] = field + } + optsTemplateRecord.Scopes = fields + + fields = make([]Field, sizeOptions) + for i := 0; i < sizeOptions; i++ { + field := Field{} + err = utils.BinaryDecoder(payload, &field) + fields[i] = field + } + optsTemplateRecord.Options = fields + + records = append(records, optsTemplateRecord) + } + + return records, nil +} + +func DecodeIPFIXOptionsTemplateSet(payload *bytes.Buffer) ([]IPFIXOptionsTemplateRecord, error) { + records := make([]IPFIXOptionsTemplateRecord, 0) + var err error + for payload.Len() >= 4 { + optsTemplateRecord := IPFIXOptionsTemplateRecord{} + err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.FieldCount, &optsTemplateRecord.ScopeFieldCount) + if err != nil { + break + } + + fields := make([]Field, int(optsTemplateRecord.ScopeFieldCount)) + for i := 0; i < int(optsTemplateRecord.ScopeFieldCount); i++ { + field := Field{} + err = utils.BinaryDecoder(payload, &field) + fields[i] = field + } + optsTemplateRecord.Scopes = fields + + optionsSize := int(optsTemplateRecord.FieldCount) - int(optsTemplateRecord.ScopeFieldCount) + if optionsSize < 0 { + return records, errors.New("Error decoding OptionsTemplateSet: negative length.") + } + fields = make([]Field, optionsSize) + for i := 0; i < optionsSize; i++ { + field := Field{} + err = utils.BinaryDecoder(payload, &field) + fields[i] = field + } + optsTemplateRecord.Options = fields + + records = append(records, optsTemplateRecord) + } + + return records, nil +} + +func DecodeTemplateSet(payload *bytes.Buffer) ([]TemplateRecord, error) { + records := make([]TemplateRecord, 0) + var err error + for payload.Len() >= 4 { + templateRecord := TemplateRecord{} + err = utils.BinaryDecoder(payload, &templateRecord.TemplateId, &templateRecord.FieldCount) + if err != nil { + break + } + + if int(templateRecord.FieldCount) < 0 { + return records, errors.New("Error decoding TemplateSet: zero count.") + } + + fields := make([]Field, int(templateRecord.FieldCount)) + for i := 0; i < int(templateRecord.FieldCount); i++ { + field := Field{} + err = utils.BinaryDecoder(payload, &field) + fields[i] = field + } + templateRecord.Fields = fields + //fmt.Printf(" %v\n", templateRecord) + records = append(records, templateRecord) + } + + return records, nil +} + +func AddTemplate(key string, obsDomainId uint32, templateRecords []TemplateRecord, listTemplates FlowBaseTemplateSet, listTemplatesInfo FlowBaseTemplateInfo) { + for _, templateRecord := range templateRecords { + _, exists := listTemplatesInfo[key] + if exists != true { + listTemplatesInfo[key] = make(map[uint32]map[uint16]bool) + } + _, exists = listTemplates[key] + if exists != true { + listTemplates[key] = make(map[uint32]map[uint16][]Field) + } + _, exists = listTemplatesInfo[key][obsDomainId] + if exists != true { + listTemplatesInfo[key][obsDomainId] = make(map[uint16]bool) + } + _, exists = listTemplates[key][obsDomainId] + if exists != true { + listTemplates[key][obsDomainId] = make(map[uint16][]Field) + } + listTemplates[key][obsDomainId][templateRecord.TemplateId] = templateRecord.Fields + listTemplatesInfo[key][obsDomainId][templateRecord.TemplateId] = true + } +} + +func AddNFv9OptionsTemplate(key string, obsDomainId uint32, templateRecords []NFv9OptionsTemplateRecord, listOptionsTemplates FlowBaseOptionsTemplateSet, listTemplatesInfo FlowBaseTemplateInfo) { + for _, templateRecord := range templateRecords { + _, exists := listTemplatesInfo[key] + if exists != true { + listTemplatesInfo[key] = make(map[uint32]map[uint16]bool) + } + _, exists = listOptionsTemplates[key] + if exists != true { + listOptionsTemplates[key] = make(map[uint32]map[uint16]FlowBaseOptionRecords) + } + _, exists = listTemplatesInfo[key][obsDomainId] + if exists != true { + listTemplatesInfo[key][obsDomainId] = make(map[uint16]bool) + } + _, exists = listOptionsTemplates[key][obsDomainId] + if exists != true { + listOptionsTemplates[key][obsDomainId] = make(map[uint16]FlowBaseOptionRecords) + } + optionRecord := FlowBaseOptionRecords{ + Scopes: templateRecord.Scopes, + Options: templateRecord.Options, + } + listOptionsTemplates[key][obsDomainId][templateRecord.TemplateId] = optionRecord + listTemplatesInfo[key][obsDomainId][templateRecord.TemplateId] = false + } +} + +func AddIPFIXOptionsTemplate(key string, obsDomainId uint32, templateRecords []IPFIXOptionsTemplateRecord, listOptionsTemplates FlowBaseOptionsTemplateSet, listTemplatesInfo FlowBaseTemplateInfo) { + for _, templateRecord := range templateRecords { + _, exists := listTemplatesInfo[key] + if exists != true { + listTemplatesInfo[key] = make(map[uint32]map[uint16]bool) + } + _, exists = listOptionsTemplates[key] + if exists != true { + listOptionsTemplates[key] = make(map[uint32]map[uint16]FlowBaseOptionRecords) + } + _, exists = listTemplatesInfo[key][obsDomainId] + if exists != true { + listTemplatesInfo[key][obsDomainId] = make(map[uint16]bool) + } + _, exists = listOptionsTemplates[key][obsDomainId] + if exists != true { + listOptionsTemplates[key][obsDomainId] = make(map[uint16]FlowBaseOptionRecords) + } + optionRecord := FlowBaseOptionRecords{ + Scopes: templateRecord.Scopes, + Options: templateRecord.Options, + } + listOptionsTemplates[key][obsDomainId][templateRecord.TemplateId] = optionRecord + listTemplatesInfo[key][obsDomainId][templateRecord.TemplateId] = false + } +} + +func GetTemplateSize(template []Field) int { + sum := 0 + for _, templateField := range template { + sum += int(templateField.Length) + } + return sum +} + +func DecodeDataSetUsingFields(payload *bytes.Buffer, listFields []Field) []DataField { + for payload.Len() >= GetTemplateSize(listFields) { + + dataFields := make([]DataField, len(listFields)) + + for i, templateField := range listFields { + value := payload.Next(int(templateField.Length)) + nfvalue := DataField{ + Type: templateField.Type, + Value: value, + } + dataFields[i] = nfvalue + } + return dataFields + } + return []DataField{} +} + +type ErrorTemplateNotFound struct { + src string + obsDomainId uint32 + templateId uint16 + typeTemplate string +} + +func NewErrorTemplateNotFound(src string, obsDomainId uint32, templateId uint16, typeTemplate string) *ErrorTemplateNotFound { + return &ErrorTemplateNotFound{ + src: src, + obsDomainId: obsDomainId, + templateId: templateId, + typeTemplate: typeTemplate, + } +} + +func (e *ErrorTemplateNotFound) Error() string { + return fmt.Sprintf("No %v template %v found for source %v and domain id %v", e.typeTemplate, e.templateId, e.src, e.obsDomainId) +} + +func DecodeOptionsDataSet(src string, obsDomainId uint32, templateId uint16, payload *bytes.Buffer, listOptionsTemplates FlowBaseOptionsTemplateSet) ([]OptionsDataRecord, error) { + records := make([]OptionsDataRecord, 0) + + listOptionsTemplatesSrc, oksrc := listOptionsTemplates[src] + if oksrc { + listOptionsTemplatesSrcObs, okobs := listOptionsTemplatesSrc[obsDomainId] + if okobs { + listOptionsFields, oktmp := listOptionsTemplatesSrcObs[templateId] + if oktmp { + listFieldsScopes := listOptionsFields.Scopes + listFieldsOption := listOptionsFields.Options + + listFieldsScopesSize := GetTemplateSize(listFieldsScopes) + listFieldsOptionSize := GetTemplateSize(listFieldsOption) + + for payload.Len() >= listFieldsScopesSize+listFieldsOptionSize { + payloadLim := bytes.NewBuffer(payload.Next(listFieldsScopesSize)) + scopeValues := DecodeDataSetUsingFields(payloadLim, listFieldsScopes) + payloadLim = bytes.NewBuffer(payload.Next(listFieldsOptionSize)) + optionValues := DecodeDataSetUsingFields(payloadLim, listFieldsOption) + + record := OptionsDataRecord{ + ScopesValues: scopeValues, + OptionsValues: optionValues, + } + + records = append(records, record) + } + return records, nil + } else { + return []OptionsDataRecord{}, NewErrorTemplateNotFound(src, obsDomainId, templateId, "options") + } + } else { + return []OptionsDataRecord{}, NewErrorTemplateNotFound(src, obsDomainId, templateId, "options") + } + } else { + return []OptionsDataRecord{}, NewErrorTemplateNotFound(src, obsDomainId, templateId, "options") + } +} + +func DecodeDataSet(src string, obsDomainId uint32, templateId uint16, payload *bytes.Buffer, listTemplates FlowBaseTemplateSet) ([]DataRecord, error) { + records := make([]DataRecord, 0) + + listTemplatesSrc, oksrc := listTemplates[src] + if oksrc { + listTemplatesSrcObs, okobs := listTemplatesSrc[obsDomainId] + if okobs { + listFields, oktmp := listTemplatesSrcObs[templateId] + if oktmp { + listFieldsSize := GetTemplateSize(listFields) + for payload.Len() >= listFieldsSize { + payloadLim := bytes.NewBuffer(payload.Next(listFieldsSize)) + values := DecodeDataSetUsingFields(payloadLim, listFields) + + record := DataRecord{ + Values: values, + } + + records = append(records, record) + } + return records, nil + } else { + return []DataRecord{}, NewErrorTemplateNotFound(src, obsDomainId, templateId, "data") + } + } else { + return []DataRecord{}, NewErrorTemplateNotFound(src, obsDomainId, templateId, "data") + } + } else { + return []DataRecord{}, NewErrorTemplateNotFound(src, obsDomainId, templateId, "data") + } +} + +func IsDataSet(src string, obsDomainId uint32, templateId uint16, listTemplatesInfo FlowBaseTemplateInfo) (bool, error) { + listTemplatesInfoSrc, oksrc := listTemplatesInfo[src] + if oksrc { + listTemplatesInfoSrcObs, okobs := listTemplatesInfoSrc[obsDomainId] + if okobs { + listBool, oktmp := listTemplatesInfoSrcObs[templateId] + if oktmp { + return listBool, nil + } + return false, NewErrorTemplateNotFound(src, obsDomainId, templateId, "info") + } + return false, NewErrorTemplateNotFound(src, obsDomainId, templateId, "info") + } + return false, NewErrorTemplateNotFound(src, obsDomainId, templateId, "info") +} + +func DecodeMessage(key string, uniqueTemplates bool, payload *bytes.Buffer, config decoder.DecoderConfig) (uint16, decoder.MessageDecoded, error) { + configdec := config.(DecoderConfig) + + var size uint16 + var templateLock *sync.RWMutex + var confTemplateInfo *FlowBaseTemplateInfo + var confOptionsTemplate *FlowBaseOptionsTemplateSet + var confTemplate *FlowBaseTemplateSet + packetNFv9 := NFv9Packet{} + packetIPFIX := IPFIXPacket{} + var returnItem interface{} + + var version uint16 + var obsDomainId uint32 + binary.Read(payload, binary.BigEndian, &version) + + if version == 9 { + utils.BinaryDecoder(payload, &packetNFv9.Count, &packetNFv9.SystemUptime, &packetNFv9.UnixSeconds, &packetNFv9.SequenceNumber, &packetNFv9.SourceId) + size = packetNFv9.Count + packetNFv9.Version = version + templateLock = configdec.NetFlowV9TemplateSetLock + confTemplateInfo = &configdec.NetFlowV9TemplateInfo + confOptionsTemplate = &configdec.NetFlowV9OptionsTemplateSet + confTemplate = &configdec.NetFlowV9TemplateSet + returnItem = *(&packetNFv9) + obsDomainId = packetNFv9.SourceId + } else if version == 10 { + utils.BinaryDecoder(payload, &packetIPFIX.Length, &packetIPFIX.ExportTime, &packetIPFIX.SequenceNumber, &packetIPFIX.ObservationDomainId) + size = packetIPFIX.Length + packetIPFIX.Version = version + templateLock = configdec.IPFIXTemplateSetLock + confTemplateInfo = &configdec.IPFIXTemplateInfo + confOptionsTemplate = &configdec.IPFIXOptionsTemplateSet + confTemplate = &configdec.IPFIXTemplateSet + returnItem = *(&packetIPFIX) + obsDomainId = packetIPFIX.ObservationDomainId + } else { + return version, nil, errors.New(fmt.Sprintf("Unknown version %v.", version)) + } + + if uniqueTemplates { + obsDomainId = 0 + } + + for i := 0; ((i < int(size) && version == 9) || version == 10) && payload.Len() > 0; i++ { + fsheader := FlowSetHeader{} + utils.BinaryDecoder(payload, &fsheader) + + nextrelpos := int(fsheader.Length) - binary.Size(fsheader) + if nextrelpos < 0 { + return version, returnItem, errors.New("Error decoding packet: non-terminated stream.") + } + + var flowSet interface{} + + if fsheader.Id == 0 && version == 9 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeTemplateSet(templateReader) + if err != nil { + return version, returnItem, err + } + templatefs := TemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + + flowSet = templatefs + + if configdec.AddTemplates { + templateLock.Lock() + AddTemplate(key, obsDomainId, records, *confTemplate, *confTemplateInfo) + templateLock.Unlock() + } + + } else if fsheader.Id == 1 && version == 9 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeNFv9OptionsTemplateSet(templateReader) + if err != nil { + return version, returnItem, err + } + optsTemplatefs := NFv9OptionsTemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = optsTemplatefs + + if configdec.AddTemplates { + templateLock.Lock() + AddNFv9OptionsTemplate(key, obsDomainId, records, *confOptionsTemplate, *confTemplateInfo) + templateLock.Unlock() + } + + } else if fsheader.Id == 2 && version == 10 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeTemplateSet(templateReader) + if err != nil { + return version, returnItem, err + } + templatefs := TemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = templatefs + + if configdec.AddTemplates { + templateLock.Lock() + AddTemplate(key, obsDomainId, records, *confTemplate, *confTemplateInfo) + templateLock.Unlock() + } + + } else if fsheader.Id == 3 && version == 10 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeIPFIXOptionsTemplateSet(templateReader) + if err != nil { + return version, returnItem, err + } + optsTemplatefs := IPFIXOptionsTemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = optsTemplatefs + + if configdec.AddTemplates { + templateLock.Lock() + AddIPFIXOptionsTemplate(key, obsDomainId, records, *confOptionsTemplate, *confTemplateInfo) + templateLock.Unlock() + } + + } else if fsheader.Id >= 256 { + dataReader := bytes.NewBuffer(payload.Next(nextrelpos)) + + templateLock.RLock() + isDs, err := IsDataSet(key, obsDomainId, fsheader.Id, *confTemplateInfo) + templateLock.RUnlock() + if err == nil { + if isDs { + templateLock.RLock() + records, err := DecodeDataSet(key, obsDomainId, fsheader.Id, dataReader, *confTemplate) + if err != nil { + return version, returnItem, err + } + templateLock.RUnlock() + datafs := DataFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = datafs + } else { + templateLock.RLock() + records, err := DecodeOptionsDataSet(key, obsDomainId, fsheader.Id, dataReader, *confOptionsTemplate) + if err != nil { + return version, returnItem, err + } + templateLock.RUnlock() + + datafs := OptionsDataFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = datafs + } + } else { + return version, returnItem, err + } + } else { + return version, returnItem, errors.New(fmt.Sprintf("%v not a valid Id\n", fsheader.Id)) + } + + if version == 9 && flowSet != nil { + packetNFv9.FlowSets = append(packetNFv9.FlowSets, flowSet) + } else if version == 10 && flowSet != nil { + packetIPFIX.FlowSets = append(packetIPFIX.FlowSets, flowSet) + } + } + + if version == 9 { + return version, packetNFv9, nil + } else if version == 10 { + return version, packetIPFIX, nil + } else { + return 0, returnItem, errors.New(fmt.Sprintf("Unknown version %v.", version)) + } +} + +func CreateProcessor(numWorkers int, decoderConfig DecoderConfig, doneCallback decoder.DoneCallback, callbackArgs decoder.CallbackArgs, errorCallback decoder.ErrorCallback) decoder.Processor { + + decoderParams := decoder.DecoderParams{ + DecoderFunc: DecodePacket, + DecoderConfig: decoderConfig, + DoneCallback: doneCallback, + CallbackArgs: callbackArgs, + ErrorCallback: errorCallback, + } + processor := decoder.CreateProcessor(numWorkers, decoderParams, "NetFlow") + + return processor +} diff --git a/decoders/netflow/nfv9.go b/decoders/netflow/nfv9.go new file mode 100644 index 0000000..91ca4d7 --- /dev/null +++ b/decoders/netflow/nfv9.go @@ -0,0 +1,315 @@ +package netflow + +import ( + "fmt" + "time" +) + +const ( + NFV9_FIELD_IN_BYTES = 1 + NFV9_FIELD_IN_PKTS = 2 + NFV9_FIELD_FLOWS = 3 + NFV9_FIELD_PROTOCOL = 4 + NFV9_FIELD_SRC_TOS = 5 + NFV9_FIELD_TCP_FLAGS = 6 + NFV9_FIELD_L4_SRC_PORT = 7 + NFV9_FIELD_IPV4_SRC_ADDR = 8 + NFV9_FIELD_SRC_MASK = 9 + NFV9_FIELD_INPUT_SNMP = 10 + NFV9_FIELD_L4_DST_PORT = 11 + NFV9_FIELD_IPV4_DST_ADDR = 12 + NFV9_FIELD_DST_MASK = 13 + NFV9_FIELD_OUTPUT_SNMP = 14 + NFV9_FIELD_IPV4_NEXT_HOP = 15 + NFV9_FIELD_SRC_AS = 16 + NFV9_FIELD_DST_AS = 17 + NFV9_FIELD_BGP_IPV4_NEXT_HOP = 18 + NFV9_FIELD_MUL_DST_PKTS = 19 + NFV9_FIELD_MUL_DST_BYTES = 20 + NFV9_FIELD_LAST_SWITCHED = 21 + NFV9_FIELD_FIRST_SWITCHED = 22 + NFV9_FIELD_OUT_BYTES = 23 + NFV9_FIELD_OUT_PKTS = 24 + NFV9_FIELD_MIN_PKT_LNGTH = 25 + NFV9_FIELD_MAX_PKT_LNGTH = 26 + NFV9_FIELD_IPV6_SRC_ADDR = 27 + NFV9_FIELD_IPV6_DST_ADDR = 28 + NFV9_FIELD_IPV6_SRC_MASK = 29 + NFV9_FIELD_IPV6_DST_MASK = 30 + NFV9_FIELD_IPV6_FLOW_LABEL = 31 + NFV9_FIELD_ICMP_TYPE = 32 + NFV9_FIELD_MUL_IGMP_TYPE = 33 + NFV9_FIELD_SAMPLING_INTERVAL = 34 + NFV9_FIELD_SAMPLING_ALGORITHM = 35 + NFV9_FIELD_FLOW_ACTIVE_TIMEOUT = 36 + NFV9_FIELD_FLOW_INACTIVE_TIMEOUT = 37 + NFV9_FIELD_ENGINE_TYPE = 38 + NFV9_FIELD_ENGINE_ID = 39 + NFV9_FIELD_TOTAL_BYTES_EXP = 40 + NFV9_FIELD_TOTAL_PKTS_EXP = 41 + NFV9_FIELD_TOTAL_FLOWS_EXP = 42 + NFV9_FIELD_IPV4_SRC_PREFIX = 44 + NFV9_FIELD_IPV4_DST_PREFIX = 45 + NFV9_FIELD_MPLS_TOP_LABEL_TYPE = 46 + NFV9_FIELD_MPLS_TOP_LABEL_IP_ADDR = 47 + NFV9_FIELD_FLOW_SAMPLER_ID = 48 + NFV9_FIELD_FLOW_SAMPLER_MODE = 49 + NFV9_FIELD_FLOW_SAMPLER_RANDOM_INTERVAL = 50 + NFV9_FIELD_MIN_TTL = 52 + NFV9_FIELD_MAX_TTL = 53 + NFV9_FIELD_IPV4_IDENT = 54 + NFV9_FIELD_DST_TOS = 55 + NFV9_FIELD_IN_SRC_MAC = 56 + NFV9_FIELD_OUT_DST_MAC = 57 + NFV9_FIELD_SRC_VLAN = 58 + NFV9_FIELD_DST_VLAN = 59 + NFV9_FIELD_IP_PROTOCOL_VERSION = 60 + NFV9_FIELD_DIRECTION = 61 + NFV9_FIELD_IPV6_NEXT_HOP = 62 + NFV9_FIELD_BGP_IPV6_NEXT_HOP = 63 + NFV9_FIELD_IPV6_OPTION_HEADERS = 64 + NFV9_FIELD_MPLS_LABEL_1 = 70 + NFV9_FIELD_MPLS_LABEL_2 = 71 + NFV9_FIELD_MPLS_LABEL_3 = 72 + NFV9_FIELD_MPLS_LABEL_4 = 73 + NFV9_FIELD_MPLS_LABEL_5 = 74 + NFV9_FIELD_MPLS_LABEL_6 = 75 + NFV9_FIELD_MPLS_LABEL_7 = 76 + NFV9_FIELD_MPLS_LABEL_8 = 77 + NFV9_FIELD_MPLS_LABEL_9 = 78 + NFV9_FIELD_MPLS_LABEL_10 = 79 + NFV9_FIELD_IN_DST_MAC = 80 + NFV9_FIELD_OUT_SRC_MAC = 81 + NFV9_FIELD_IF_NAME = 82 + NFV9_FIELD_IF_DESC = 83 + NFV9_FIELD_SAMPLER_NAME = 84 + NFV9_FIELD_IN_PERMANENT_BYTES = 85 + NFV9_FIELD_IN_PERMANENT_PKTS = 86 + NFV9_FIELD_FRAGMENT_OFFSET = 88 + NFV9_FIELD_FORWARDING_STATUS = 89 + NFV9_FIELD_MPLS_PAL_RD = 90 + NFV9_FIELD_MPLS_PREFIX_LEN = 91 + NFV9_FIELD_SRC_TRAFFIC_INDEX = 92 + NFV9_FIELD_DST_TRAFFIC_INDEX = 93 + NFV9_FIELD_APPLICATION_DESCRIPTION = 94 + NFV9_FIELD_APPLICATION_TAG = 95 + NFV9_FIELD_APPLICATION_NAME = 96 + NFV9_FIELD_postipDiffServCodePoint = 98 + NFV9_FIELD_replication_factor = 99 + NFV9_FIELD_layer2packetSectionOffset = 102 + NFV9_FIELD_layer2packetSectionSize = 103 + NFV9_FIELD_layer2packetSectionData = 104 +) + +type NFv9Packet struct { + Version uint16 + Count uint16 + SystemUptime uint32 + UnixSeconds uint32 + SequenceNumber uint32 + SourceId uint32 + FlowSets []interface{} +} + +type NFv9OptionsTemplateFlowSet struct { + FlowSetHeader + Records []NFv9OptionsTemplateRecord +} + +type NFv9OptionsTemplateRecord struct { + TemplateId uint16 + ScopeLength uint16 + OptionLength uint16 + Scopes []Field + Options []Field +} + +func NFv9TypeToString(typeId uint16) string { + + nameList := map[uint16]string{ + 1: "IN_BYTES", + 2: "IN_PKTS", + 3: "FLOWS", + 4: "PROTOCOL", + 5: "SRC_TOS", + 6: "TCP_FLAGS", + 7: "L4_SRC_PORT", + 8: "IPV4_SRC_ADDR", + 9: "SRC_MASK", + 10: "INPUT_SNMP", + 11: "L4_DST_PORT", + 12: "IPV4_DST_ADDR", + 13: "DST_MASK", + 14: "OUTPUT_SNMP", + 15: "IPV4_NEXT_HOP", + 16: "SRC_AS", + 17: "DST_AS", + 18: "BGP_IPV4_NEXT_HOP", + 19: "MUL_DST_PKTS", + 20: "MUL_DST_BYTES", + 21: "LAST_SWITCHED", + 22: "FIRST_SWITCHED", + 23: "OUT_BYTES", + 24: "OUT_PKTS", + 25: "MIN_PKT_LNGTH", + 26: "MAX_PKT_LNGTH", + 27: "IPV6_SRC_ADDR", + 28: "IPV6_DST_ADDR", + 29: "IPV6_SRC_MASK", + 30: "IPV6_DST_MASK", + 31: "IPV6_FLOW_LABEL", + 32: "ICMP_TYPE", + 33: "MUL_IGMP_TYPE", + 34: "SAMPLING_INTERVAL", + 35: "SAMPLING_ALGORITHM", + 36: "FLOW_ACTIVE_TIMEOUT", + 37: "FLOW_INACTIVE_TIMEOUT", + 38: "ENGINE_TYPE", + 39: "ENGINE_ID", + 40: "TOTAL_BYTES_EXP", + 41: "TOTAL_PKTS_EXP", + 42: "TOTAL_FLOWS_EXP", + 43: "*Vendor Proprietary*", + 44: "IPV4_SRC_PREFIX", + 45: "IPV4_DST_PREFIX", + 46: "MPLS_TOP_LABEL_TYPE", + 47: "MPLS_TOP_LABEL_IP_ADDR", + 48: "FLOW_SAMPLER_ID", + 49: "FLOW_SAMPLER_MODE", + 50: "FLOW_SAMPLER_RANDOM_INTERVAL", + 51: "*Vendor Proprietary*", + 52: "MIN_TTL", + 53: "MAX_TTL", + 54: "IPV4_IDENT", + 55: "DST_TOS", + 56: "IN_SRC_MAC", + 57: "OUT_DST_MAC", + 58: "SRC_VLAN", + 59: "DST_VLAN", + 60: "IP_PROTOCOL_VERSION", + 61: "DIRECTION", + 62: "IPV6_NEXT_HOP", + 63: "BPG_IPV6_NEXT_HOP", + 64: "IPV6_OPTION_HEADERS", + 65: "*Vendor Proprietary*", + 66: "*Vendor Proprietary*", + 67: "*Vendor Proprietary*", + 68: "*Vendor Proprietary*", + 69: "*Vendor Proprietary*", + 70: "MPLS_LABEL_1", + 71: "MPLS_LABEL_2", + 72: "MPLS_LABEL_3", + 73: "MPLS_LABEL_4", + 74: "MPLS_LABEL_5", + 75: "MPLS_LABEL_6", + 76: "MPLS_LABEL_7", + 77: "MPLS_LABEL_8", + 78: "MPLS_LABEL_9", + 79: "MPLS_LABEL_10", + 80: "IN_DST_MAC", + 81: "OUT_SRC_MAC", + 82: "IF_NAME", + 83: "IF_DESC", + 84: "SAMPLER_NAME", + 85: "IN_ PERMANENT _BYTES", + 86: "IN_ PERMANENT _PKTS", + 87: "* Vendor Proprietary*", + 88: "FRAGMENT_OFFSET", + 89: "FORWARDING STATUS", + 90: "MPLS PAL RD", + 91: "MPLS PREFIX LEN", + 92: "SRC TRAFFIC INDEX", + 93: "DST TRAFFIC INDEX", + 94: "APPLICATION DESCRIPTION", + 95: "APPLICATION TAG", + 96: "APPLICATION NAME", + 98: "postipDiffServCodePoint", + 99: "replication factor", + 100: "DEPRECATED", + 102: "layer2packetSectionOffset", + 103: "layer2packetSectionSize", + 104: "layer2packetSectionData", + } + + if typeId > 104 || typeId == 0 { + return "Unassigned" + } else { + return nameList[typeId] + } +} + +func NFv9ScopeToString(scopeId uint16) string { + nameList := map[uint16]string{ + 1: "System", + 2: "Interface", + 3: "Line Card", + 4: "NetFlow Cache", + 5: "Template", + } + + if scopeId >= 1 && scopeId <= 5 { + return nameList[scopeId] + } else { + return "Unassigned" + } +} + +func (flowSet NFv9OptionsTemplateFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) + str += fmt.Sprintf(" ScopeLength: %v\n", record.ScopeLength) + str += fmt.Sprintf(" OptionLength: %v\n", record.OptionLength) + str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes)) + + for k, field := range record.Scopes { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, NFv9ScopeToString(field.Type), field.Type, field.Length) + } + + str += fmt.Sprintf(" Options (%v):\n", len(record.Options)) + + for k, field := range record.Options { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + } + + return str +} + +func (p NFv9Packet) String() string { + str := "Flow Packet\n" + str += "------------\n" + str += fmt.Sprintf(" Version: %v\n", p.Version) + str += fmt.Sprintf(" Count: %v\n", p.Count) + + unixSeconds := time.Unix(int64(p.UnixSeconds), 0) + str += fmt.Sprintf(" SystemUptime: %v\n", p.SystemUptime) + str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String()) + str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber) + str += fmt.Sprintf(" SourceId: %v\n", p.SourceId) + str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets)) + + for i, flowSet := range p.FlowSets { + switch flowSet := flowSet.(type) { + case TemplateFlowSet: + str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString) + case NFv9OptionsTemplateFlowSet: + str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString) + case DataFlowSet: + str += fmt.Sprintf(" - DataFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString) + case OptionsDataFlowSet: + str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString, NFv9ScopeToString) + default: + str += fmt.Sprintf(" - (unknown type) %v:\n", i, flowSet) + } + } + return str +} diff --git a/decoders/netflow/packet.go b/decoders/netflow/packet.go new file mode 100644 index 0000000..5384eb5 --- /dev/null +++ b/decoders/netflow/packet.go @@ -0,0 +1,153 @@ +package netflow + +import ( + "fmt" +) + +// FlowSetHeader contains fields shared by all Flow Sets (DataFlowSet, +// TemplateFlowSet, OptionsTemplateFlowSet). +type FlowSetHeader struct { + // FlowSet ID: + // 0 for TemplateFlowSet + // 1 for OptionsTemplateFlowSet + // 256-65535 for DataFlowSet (used as TemplateId) + Id uint16 + + // The total length of this FlowSet in bytes (including padding). + Length uint16 +} + +// TemplateFlowSet is a collection of templates that describe structure of Data +// Records (actual NetFlow data). +type TemplateFlowSet struct { + FlowSetHeader + + // List of Template Records + Records []TemplateRecord +} + +// DataFlowSet is a collection of Data Records (actual NetFlow data) and Options +// Data Records (meta data). +type DataFlowSet struct { + FlowSetHeader + + Records []DataRecord +} + +type OptionsDataFlowSet struct { + FlowSetHeader + + Records []OptionsDataRecord +} + +// TemplateRecord is a single template that describes structure of a Flow Record +// (actual Netflow data). +type TemplateRecord struct { + // Each of the newly generated Template Records is given a unique + // Template ID. This uniqueness is local to the Observation Domain that + // generated the Template ID. Template IDs of Data FlowSets are numbered + // from 256 to 65535. + TemplateId uint16 + + // Number of fields in this Template Record. Because a Template FlowSet + // usually contains multiple Template Records, this field allows the + // Collector to determine the end of the current Template Record and + // the start of the next. + FieldCount uint16 + + // List of fields in this Template Record. + Fields []Field +} + +type DataRecord struct { + Values []DataField +} + +// OptionsDataRecord is meta data sent alongide actual NetFlow data. Combined +// with OptionsTemplateRecord it can be decoded to a single data row. +type OptionsDataRecord struct { + // List of Scope values stored in raw format as []byte + ScopesValues []DataField + + // List of Optons values stored in raw format as []byte + OptionsValues []DataField +} + +// Field describes type and length of a single value in a Flow Data Record. +// Field does not contain the record value itself it is just a description of +// what record value will look like. +type Field struct { + // A numeric value that represents the type of field. + Type uint16 + + // The length (in bytes) of the field. + Length uint16 +} + +type DataField struct { + // A numeric value that represents the type of field. + Type uint16 + + // The value (in bytes) of the field. + Value interface{} + //Value []byte +} + +func (flowSet OptionsDataFlowSet) String(TypeToString func(uint16) string, ScopeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" Scopes (%v):\n", len(record.ScopesValues)) + + for k, value := range record.ScopesValues { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, ScopeToString(value.Type), value.Type, value.Value) + } + + str += fmt.Sprintf(" Options (%v):\n", len(record.OptionsValues)) + + for k, value := range record.OptionsValues { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) + } + } + + return str +} + +func (flowSet DataFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" Values (%v):\n", len(record.Values)) + + for k, value := range record.Values { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) + } + } + + return str +} + +func (flowSet TemplateFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - %v. Record:\n", j) + str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) + str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount) + str += fmt.Sprintf(" Fields (%v):\n", len(record.Fields)) + + for k, field := range record.Fields { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + } + + return str +} diff --git a/decoders/sflow/datastructure.go b/decoders/sflow/datastructure.go new file mode 100644 index 0000000..670652a --- /dev/null +++ b/decoders/sflow/datastructure.go @@ -0,0 +1,103 @@ +package sflow + +type SampledHeader struct { + Protocol uint32 + FrameLength uint32 + Stripped uint32 + OriginalLength uint32 + HeaderData []byte +} + +type SampledEthernet struct { + Length uint32 + SrcMac []byte + DstMac []byte + EthType uint32 +} + +type SampledIP_Base struct { + Length uint32 + Protocol uint32 + SrcIP []byte + DstIP []byte + SrcPort uint32 + DstPort uint32 + TcpFlags uint32 +} + +type SampledIPv4 struct { + Base SampledIP_Base + Tos uint32 +} + +type SampledIPv6 struct { + Base SampledIP_Base + Priority uint32 +} + +type ExtendedSwitch struct { + SrcVlan uint32 + SrcPriority uint32 + DstVlan uint32 + DstPriority uint32 +} + +type ExtendedRouter struct { + NextHopIPVersion uint32 + NextHop []byte + SrcMaskLen uint32 + DstMaskLen uint32 +} + +type ExtendedGateway struct { + NextHopIPVersion uint32 + NextHop []byte + AS uint32 + SrcAS uint32 + SrcPeerAS uint32 + ASDestinations uint32 + ASPathType uint32 + ASPathLength uint32 + ASPath []uint32 + CommunitiesLength uint32 + Communities []uint32 + LocalPref uint32 +} + +type IfCounters struct { + IfIndex uint32 + IfType uint32 + IfSpeed uint64 + IfDirection uint32 + IfStatus uint32 + IfInOctets uint64 + IfInUcastPkts uint32 + IfInMulticastPkts uint32 + IfInBroadcastPkts uint32 + IfInDiscards uint32 + IfInErrors uint32 + IfInUnknownProtos uint32 + IfOutOctets uint64 + IfOutUcastPkts uint32 + IfOutMulticastPkts uint32 + IfOutBroadcastPkts uint32 + IfOutDiscards uint32 + IfOutErrors uint32 + IfPromiscuousMode uint32 +} + +type EthernetCounters struct { + Dot3StatsAlignmentErrors uint32 + Dot3StatsFCSErrors uint32 + Dot3StatsSingleCollisionFrames uint32 + Dot3StatsMultipleCollisionFrames uint32 + Dot3StatsSQETestErrors uint32 + Dot3StatsDeferredTransmissions uint32 + Dot3StatsLateCollisions uint32 + Dot3StatsExcessiveCollisions uint32 + Dot3StatsInternalMacTransmitErrors uint32 + Dot3StatsCarrierSenseErrors uint32 + Dot3StatsFrameTooLongs uint32 + Dot3StatsInternalMacReceiveErrors uint32 + Dot3StatsSymbolErrors uint32 +} diff --git a/decoders/sflow/packet.go b/decoders/sflow/packet.go new file mode 100644 index 0000000..3f8aef0 --- /dev/null +++ b/decoders/sflow/packet.go @@ -0,0 +1,69 @@ +package sflow + +type Packet struct { + Version uint32 + IPVersion uint32 + AgentIP []byte + SubAgentId uint32 + SequenceNumber uint32 + Uptime uint32 + SamplesCount uint32 + Samples []interface{} +} + +type SampleHeader struct { + Format uint32 + Length uint32 + + SampleSequenceNumber uint32 + SourceIdType uint32 + SourceIdValue uint32 +} + +type FlowSample struct { + Header SampleHeader + + SamplingRate uint32 + SamplePool uint32 + Drops uint32 + Input uint32 + Output uint32 + FlowRecordsCount uint32 + Records []FlowRecord +} + +type CounterSample struct { + Header SampleHeader + + CounterRecordsCount uint32 + Records []CounterRecord +} + +type ExpandedFlowSample struct { + Header SampleHeader + + SamplingRate uint32 + SamplePool uint32 + Drops uint32 + InputIfFormat uint32 + InputIfValue uint32 + OutputIfFormat uint32 + OutputIfValue uint32 + FlowRecordsCount uint32 + Records []FlowRecord +} + +type RecordHeader struct { + DataFormat uint32 + Length uint32 +} + +type FlowRecord struct { + Header RecordHeader + Data interface{} +} + +type CounterRecord struct { + Header RecordHeader + Data interface{} +} diff --git a/decoders/sflow/sflow.go b/decoders/sflow/sflow.go new file mode 100644 index 0000000..c4b4846 --- /dev/null +++ b/decoders/sflow/sflow.go @@ -0,0 +1,326 @@ +package sflow + +import ( + "bytes" + "errors" + "fmt" + "github.com/cloudflare/goflow/decoders" + "github.com/cloudflare/goflow/decoders/utils" + "net" +) + +const ( + FORMAT_EXT_SWITCH = 1001 + FORMAT_EXT_ROUTER = 1002 + FORMAT_EXT_GATEWAY = 1003 + FORMAT_RAW_PKT = 1 + FORMAT_ETH = 2 + FORMAT_IPV4 = 3 + FORMAT_IPV6 = 4 +) + +type BaseMessage struct { + Src net.IP + Port int + Payload []byte +} + +type BaseMessageDecoded struct { + Version uint32 + Src net.IP + Port int + Packet decoder.MessageDecoded +} + +type DecoderConfig struct { +} + +func CreateConfig() DecoderConfig { + config := DecoderConfig{} + return config +} + +func DecodePacket(msg decoder.Message, config decoder.DecoderConfig) (decoder.MessageDecoded, error) { + baseMsg := msg.(BaseMessage) + payload := bytes.NewBuffer(baseMsg.Payload) + + version, msgDecoded, err := DecodeMessage(payload, config) + + baseMsgDecoded := BaseMessageDecoded{ + Version: version, + Src: baseMsg.Src, + Port: baseMsg.Port, + Packet: msgDecoded, + } + + return baseMsgDecoded, err +} + +func DecodeCounterRecord(header *RecordHeader, payload *bytes.Buffer) (CounterRecord, error) { + counterRecord := CounterRecord{ + Header: *header, + } + switch (*header).DataFormat { + case 1: + ifCounters := IfCounters{} + utils.BinaryDecoder(payload, &ifCounters) + counterRecord.Data = ifCounters + case 2: + ethernetCounters := EthernetCounters{} + utils.BinaryDecoder(payload, ðernetCounters) + counterRecord.Data = ethernetCounters + default: + return counterRecord, errors.New(fmt.Sprintf("Unknown data format %v.", (*header).DataFormat)) + } + //fmt.Printf("%v\n", counterRecord) + + return counterRecord, nil +} + +func DecodeIP(payload *bytes.Buffer) (uint32, []byte, error) { + var ipVersion uint32 + utils.BinaryDecoder(payload, &ipVersion) + var ip []byte + if ipVersion == 1 { + ip = make([]byte, 4) + } else if ipVersion == 2 { + ip = make([]byte, 16) + } else { + return ipVersion, ip, errors.New(fmt.Sprintf("Unknown Next Hop IP version %v.", ipVersion)) + } + if payload.Len() >= len(ip) { + utils.BinaryDecoder(payload, &ip) + } else { + return ipVersion, ip, errors.New(fmt.Sprintf("Not enough data: %v, needs %v.", payload.Len(), len(ip))) + } + return ipVersion, ip, nil +} + +func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, error) { + flowRecord := FlowRecord{ + Header: *header, + } + switch (*header).DataFormat { + case FORMAT_EXT_SWITCH: + extendedSwitch := ExtendedSwitch{} + utils.BinaryDecoder(payload, &extendedSwitch) + flowRecord.Data = extendedSwitch + case FORMAT_RAW_PKT: + sampledHeader := SampledHeader{} + utils.BinaryDecoder(payload, &(sampledHeader.Protocol), &(sampledHeader.FrameLength), &(sampledHeader.Stripped), &(sampledHeader.OriginalLength)) + sampledHeader.HeaderData = payload.Bytes() + flowRecord.Data = sampledHeader + case FORMAT_IPV4: + sampledIPBase := SampledIP_Base{ + SrcIP: make([]byte, 4), + DstIP: make([]byte, 4), + } + utils.BinaryDecoder(payload, &sampledIPBase) + sampledIPv4 := SampledIPv4{ + Base: sampledIPBase, + } + utils.BinaryDecoder(payload, &(sampledIPv4.Tos)) + flowRecord.Data = sampledIPv4 + case FORMAT_IPV6: + sampledIPBase := SampledIP_Base{ + SrcIP: make([]byte, 16), + DstIP: make([]byte, 16), + } + utils.BinaryDecoder(payload, &sampledIPBase) + sampledIPv6 := SampledIPv6{ + Base: sampledIPBase, + } + utils.BinaryDecoder(payload, &(sampledIPv6.Priority)) + flowRecord.Data = sampledIPv6 + case FORMAT_EXT_ROUTER: + extendedRouter := ExtendedRouter{} + + ipVersion, ip, err := DecodeIP(payload) + if err != nil { + return flowRecord, err + } + extendedRouter.NextHopIPVersion = ipVersion + extendedRouter.NextHop = ip + utils.BinaryDecoder(payload, &(extendedRouter.SrcMaskLen), &(extendedRouter.DstMaskLen)) + flowRecord.Data = extendedRouter + case FORMAT_EXT_GATEWAY: + extendedGateway := ExtendedGateway{} + ipVersion, ip, err := DecodeIP(payload) + if err != nil { + return flowRecord, err + } + extendedGateway.NextHopIPVersion = ipVersion + extendedGateway.NextHop = ip + utils.BinaryDecoder(payload, &(extendedGateway.AS), &(extendedGateway.SrcAS), &(extendedGateway.SrcPeerAS), + &(extendedGateway.ASDestinations)) + asPath := make([]uint32, 0) + if extendedGateway.ASDestinations != 0 { + utils.BinaryDecoder(payload, &(extendedGateway.ASPathType), &(extendedGateway.ASPathLength)) + if int(extendedGateway.ASPathLength) > payload.Len()-4 { + return flowRecord, errors.New(fmt.Sprintf("Invalid AS path length.", extendedGateway.ASPathLength)) + } + asPath = make([]uint32, extendedGateway.ASPathLength) + if len(asPath) > 0 { + utils.BinaryDecoder(payload, asPath) + } + } + extendedGateway.ASPath = asPath + + utils.BinaryDecoder(payload, &(extendedGateway.CommunitiesLength)) + if int(extendedGateway.CommunitiesLength) > payload.Len()-4 { + return flowRecord, errors.New(fmt.Sprintf("Invalid Communities length.", extendedGateway.ASPathLength)) + } + communities := make([]uint32, extendedGateway.CommunitiesLength) + if len(communities) > 0 { + utils.BinaryDecoder(payload, communities) + } + utils.BinaryDecoder(payload, &(extendedGateway.LocalPref)) + extendedGateway.Communities = communities + + flowRecord.Data = extendedGateway + default: + return flowRecord, errors.New(fmt.Sprintf("Unknown data format %v.", (*header).DataFormat)) + } + /*if((*header).DataFormat == 1002) { + fmt.Printf("%v\n", flowRecord) + }*/ + return flowRecord, nil +} + +func DecodeSample(header *SampleHeader, payload *bytes.Buffer, config decoder.DecoderConfig) (interface{}, error) { + format := (*header).Format + var sample interface{} + + utils.BinaryDecoder(payload, &((*header).SampleSequenceNumber)) + if format == FORMAT_RAW_PKT || format == FORMAT_ETH { + var sourceId uint32 + utils.BinaryDecoder(payload, &sourceId) + + (*header).SourceIdType = sourceId >> 24 + (*header).SourceIdValue = sourceId & 0x00ffffff + } else if format == FORMAT_IPV4 || format == FORMAT_IPV6 { + utils.BinaryDecoder(payload, &((*header).SourceIdType), &((*header).SourceIdValue)) + } else { + return nil, errors.New(fmt.Sprintf("Unknown format %v.", format)) + } + + var recordsCount uint32 + var flowSample FlowSample + var counterSample CounterSample + var expandedFlowSample ExpandedFlowSample + if format == FORMAT_RAW_PKT { + flowSample = FlowSample{ + Header: *header, + } + utils.BinaryDecoder(payload, &(flowSample.SamplingRate), &(flowSample.SamplePool), + &(flowSample.Drops), &(flowSample.Input), &(flowSample.Output), &(flowSample.FlowRecordsCount)) + recordsCount = flowSample.FlowRecordsCount + flowSample.Records = make([]FlowRecord, recordsCount) + sample = flowSample + } else if format == FORMAT_ETH || format == FORMAT_IPV6 { + utils.BinaryDecoder(payload, &recordsCount) + counterSample = CounterSample{ + Header: *header, + CounterRecordsCount: recordsCount, + } + counterSample.Records = make([]CounterRecord, recordsCount) + sample = counterSample + } else if format == FORMAT_IPV4 { + expandedFlowSample = ExpandedFlowSample{ + Header: *header, + } + utils.BinaryDecoder(payload, &(expandedFlowSample.SamplingRate), &(expandedFlowSample.SamplePool), + &(expandedFlowSample.Drops), &(expandedFlowSample.InputIfFormat), &(expandedFlowSample.InputIfValue), + &(expandedFlowSample.OutputIfFormat), &(expandedFlowSample.OutputIfValue), &(expandedFlowSample.FlowRecordsCount)) + recordsCount = expandedFlowSample.FlowRecordsCount + expandedFlowSample.Records = make([]FlowRecord, recordsCount) + sample = expandedFlowSample + } + for i := 0; i < int(recordsCount) && payload.Len() >= 8; i++ { + recordHeader := RecordHeader{} + utils.BinaryDecoder(payload, &(recordHeader.DataFormat), &(recordHeader.Length)) + if int(recordHeader.Length) > payload.Len() { + break + } + recordReader := bytes.NewBuffer(payload.Next(int(recordHeader.Length))) + if format == FORMAT_RAW_PKT || format == FORMAT_IPV4 { + record, err := DecodeFlowRecord(&recordHeader, recordReader) + if err != nil { + continue + } + if format == FORMAT_RAW_PKT { + flowSample.Records[i] = record + } else if format == FORMAT_IPV4 { + expandedFlowSample.Records[i] = record + } + } else if format == FORMAT_ETH || format == FORMAT_IPV6 { + record, err := DecodeCounterRecord(&recordHeader, recordReader) + if err != nil { + continue + } + counterSample.Records[i] = record + } + } + //fmt.Printf("%v\n", sample) + return sample, nil +} + +func DecodeMessage(payload *bytes.Buffer, config decoder.DecoderConfig) (uint32, decoder.MessageDecoded, error) { + var version uint32 + utils.BinaryDecoder(payload, &version) + packetV5 := Packet{} + if version == 5 { + packetV5.Version = version + utils.BinaryDecoder(payload, &(packetV5.IPVersion)) + var ip []byte + if packetV5.IPVersion == 1 { + ip = make([]byte, 4) + utils.BinaryDecoder(payload, ip) + } else if packetV5.IPVersion == 2 { + ip = make([]byte, 16) + utils.BinaryDecoder(payload, ip) + } else { + return version, nil, errors.New(fmt.Sprintf("Unknown IP version %v.", packetV5.IPVersion)) + } + + packetV5.AgentIP = ip + utils.BinaryDecoder(payload, &(packetV5.SubAgentId), &(packetV5.SequenceNumber), &(packetV5.Uptime), &(packetV5.SamplesCount)) + packetV5.Samples = make([]interface{}, int(packetV5.SamplesCount)) + for i := 0; i < int(packetV5.SamplesCount) && payload.Len() >= 8; i++ { + header := SampleHeader{} + utils.BinaryDecoder(payload, &(header.Format), &(header.Length)) + if int(header.Length) > payload.Len() { + break + } + sampleReader := bytes.NewBuffer(payload.Next(int(header.Length))) + + sample, err := DecodeSample(&header, sampleReader, config) + if err != nil { + // log + continue + } else { + packetV5.Samples[i] = sample + } + } + + //fmt.Printf("%v\n", packetV5) + return version, packetV5, nil + } else { + return version, nil, errors.New(fmt.Sprintf("Unknown version %v.", version)) + } + return version, nil, nil +} + +func CreateProcessor(numWorkers int, decoderConfig DecoderConfig, doneCallback decoder.DoneCallback, callbackArgs decoder.CallbackArgs, errorCallback decoder.ErrorCallback) decoder.Processor { + + decoderParams := decoder.DecoderParams{ + DecoderFunc: DecodePacket, + DecoderConfig: decoderConfig, + DoneCallback: doneCallback, + CallbackArgs: callbackArgs, + ErrorCallback: errorCallback, + } + processor := decoder.CreateProcessor(numWorkers, decoderParams, "sFlow") + + return processor +} diff --git a/decoders/utils/utils.go b/decoders/utils/utils.go new file mode 100644 index 0000000..a36e3b2 --- /dev/null +++ b/decoders/utils/utils.go @@ -0,0 +1,16 @@ +package utils + +import ( + "encoding/binary" + "io" +) + +func BinaryDecoder(payload io.Reader, dests ...interface{}) error { + for _, dest := range dests { + err := binary.Read(payload, binary.BigEndian, dest) + if err != nil { + return err + } + } + return nil +} diff --git a/goflow.go b/goflow.go new file mode 100644 index 0000000..9c14b70 --- /dev/null +++ b/goflow.go @@ -0,0 +1,296 @@ +package main + +import ( + "flag" + "fmt" + log "github.com/Sirupsen/logrus" + "github.com/cloudflare/goflow/decoders/netflow" + "github.com/cloudflare/goflow/decoders/sflow" + "github.com/cloudflare/goflow/producer" + "net" + "os" + "runtime" + "strconv" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" + + "encoding/json" +) + +const AppVersion = "GoFlow v1.1.0" + +var ( + FEnable = flag.Bool("netflow", true, "Enable NetFlow") + SEnable = flag.Bool("sflow", true, "Enable sFlow") + + FAddr = flag.String("faddr", ":", "NetFlow/IPFIX listening address") + FPort = flag.Int("fport", 2055, "NetFlow/IPFIX listening port") + + SAddr = flag.String("saddr", ":", "sFlow listening address") + SPort = flag.Int("sport", 6343, "sFlow listening port") + + SamplingRate = flag.Int("sampling", 16834, "Fixed NetFlow sampling rate (-1 to disable)") + FWorkers = flag.Int("fworkers", 1, "Number of NetFlow workers") + SWorkers = flag.Int("sworkers", 1, "Number of sFlow workers") + LogLevel = flag.String("loglevel", "info", "Log level") + LogFmt = flag.String("logfmt", "normal", "Log formatter") + + EnableKafka = flag.Bool("kafka", true, "Enable Kafka") + UniqueTemplates = flag.Bool("uniquetemplates", false, "Unique templates (vs per-router/obs domain id) ; must have same sampling rate everywhere)") + MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") + MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") + TemplatePath = flag.String("templates.path", "/templates", "NetFlow/IPFIX templates list") + + Version = flag.Bool("v", false, "Print version") + + MetricTrafficBytes = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_traffic_bytes", + Help: "Bytes received by the application.", + }, + []string{"remote_ip", "remote_port", "local_ip", "local_port", "type"}, + ) + MetricTrafficPackets = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_traffic_packets", + Help: "Packets received by the application.", + }, + []string{"remote_ip", "remote_port", "local_ip", "local_port", "type"}, + ) + MetricPacketSizeSum = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_traffic_summary_size_bytes", + Help: "Summary of packet size.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"remote_ip", "remote_port", "local_ip", "local_port", "type"}, + ) +) + +func init() { + prometheus.MustRegister(MetricTrafficBytes) + prometheus.MustRegister(MetricTrafficPackets) + prometheus.MustRegister(MetricPacketSizeSum) +} + +func metricsHTTP() { + http.Handle(*MetricsPath, promhttp.Handler()) + log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) +} + +func templatesHTTP(th TemplateHandler) { + http.Handle(*TemplatePath, th) +} + +type TemplateHandler struct { + Config *netflow.DecoderConfig +} + +func (h TemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + + if h.Config != nil { + h.Config.NetFlowV9TemplateSetLock.RLock() + h.Config.IPFIXTemplateSetLock.RLock() + + templates := make([]map[string]map[uint32]map[uint16][]netflow.Field, 2) + templates[0] = h.Config.NetFlowV9TemplateSet + templates[1] = h.Config.IPFIXTemplateSet + + enc := json.NewEncoder(w) + enc.Encode(templates) + + h.Config.IPFIXTemplateSetLock.RUnlock() + h.Config.NetFlowV9TemplateSetLock.RUnlock() + } else { + log.Debugf("No config found") + } +} + +func netflowRoutine(processArgs *producer.ProcessArguments, wg *sync.WaitGroup) { + defer (*wg).Done() + + nfConfig := netflow.CreateConfig() + nfConfig.UniqueTemplates = *UniqueTemplates + + th := TemplateHandler{} + th.Config = &nfConfig + go templatesHTTP(th) + + processor := netflow.CreateProcessor(*FWorkers, nfConfig, producer.ProcessMessageNetFlow, *processArgs, producer.ProcessNetFlowError) + processor.Start() + + addr := net.UDPAddr{ + IP: net.ParseIP(*FAddr), + Port: *FPort, + } + udpconn, err := net.ListenUDP("udp", &addr) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + udpconn.Close() + } + + payload := make([]byte, 9000) + + localIP := addr.IP.String() + if addr.IP == nil { + localIP = "" + } + log.WithFields(log.Fields{ + "Type": "NetFlow"}). + Infof("Listening on UDP %v:%v", localIP, strconv.Itoa(addr.Port)) + for { + size, pktAddr, _ := udpconn.ReadFromUDP(payload) + payloadCut := make([]byte, size) + copy(payloadCut, payload[0:size]) + + baseMessage := netflow.BaseMessage{ + Src: pktAddr.IP, + Port: pktAddr.Port, + Payload: payloadCut, + } + processor.ProcessMessage(baseMessage) + + MetricTrafficBytes.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addr.Port), + "type": "NetFlow", + }). + Add(float64(size)) + MetricTrafficPackets.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addr.Port), + "type": "NetFlow", + }). + Inc() + MetricPacketSizeSum.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addr.Port), + "type": "NetFlow", + }). + Observe(float64(size)) + } + + udpconn.Close() +} + +func sflowRoutine(processArgs *producer.ProcessArguments, wg *sync.WaitGroup) { + defer (*wg).Done() + + sfConfig := sflow.CreateConfig() + + processor := sflow.CreateProcessor(*SWorkers, sfConfig, producer.ProcessMessageSFlow, *processArgs, producer.ProcessSFlowError) + processor.Start() + + addr := net.UDPAddr{ + IP: net.ParseIP(*SAddr), + Port: *SPort, + } + udpconn, err := net.ListenUDP("udp", &addr) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + udpconn.Close() + } + + payload := make([]byte, 9000) + + localIP := addr.IP.String() + if addr.IP == nil { + localIP = "" + } + log.WithFields(log.Fields{ + "Type": "sFlow"}). + Infof("Listening on UDP %v:%v", localIP, strconv.Itoa(addr.Port)) + for { + size, pktAddr, _ := udpconn.ReadFromUDP(payload) + payloadCut := make([]byte, size) + copy(payloadCut, payload[0:size]) + + baseMessage := sflow.BaseMessage{ + Src: pktAddr.IP, + Port: pktAddr.Port, + Payload: payloadCut, + } + processor.ProcessMessage(baseMessage) + + MetricTrafficBytes.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addr.Port), + "type": "sFlow", + }). + Add(float64(size)) + MetricTrafficPackets.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addr.Port), + "type": "sFlow", + }). + Inc() + MetricPacketSizeSum.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addr.Port), + "type": "sFlow", + }). + Observe(float64(size)) + } + + udpconn.Close() +} + +func main() { + flag.Parse() + + if *Version { + fmt.Println(AppVersion) + os.Exit(0) + } + + go metricsHTTP() + + lvl, _ := log.ParseLevel(*LogLevel) + log.SetLevel(lvl) + switch *LogFmt { + case "json": + log.SetFormatter(&log.JSONFormatter{}) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + + wg := &sync.WaitGroup{} + log.WithFields(log.Fields{ + "NetFlow": *FEnable, + "sFlow": *SEnable}). + Info("Starting GoFlow") + + processArgs := producer.CreateProcessArguments(*EnableKafka, *SamplingRate, *UniqueTemplates) + + if *FEnable { + (*wg).Add(1) + go netflowRoutine(&processArgs, wg) + } + if *SEnable { + (*wg).Add(1) + go sflowRoutine(&processArgs, wg) + } + + (*wg).Wait() +} diff --git a/pb/flow.pb.go b/pb/flow.pb.go new file mode 100644 index 0000000..34e8224 --- /dev/null +++ b/pb/flow.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: flow.proto + +/* +Package flowprotob is a generated protocol buffer package. + +It is generated from these files: + flow.proto + +It has these top-level messages: + FlowMessage +*/ +package flowprotob + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FlowMessage_FlowType int32 + +const ( + FlowMessage_FLOWUNKNOWN FlowMessage_FlowType = 0 + FlowMessage_NFV9 FlowMessage_FlowType = 9 + FlowMessage_IPFIX FlowMessage_FlowType = 10 + FlowMessage_SFLOW FlowMessage_FlowType = 5 +) + +var FlowMessage_FlowType_name = map[int32]string{ + 0: "FLOWUNKNOWN", + 9: "NFV9", + 10: "IPFIX", + 5: "SFLOW", +} +var FlowMessage_FlowType_value = map[string]int32{ + "FLOWUNKNOWN": 0, + "NFV9": 9, + "IPFIX": 10, + "SFLOW": 5, +} + +func (x FlowMessage_FlowType) String() string { + return proto.EnumName(FlowMessage_FlowType_name, int32(x)) +} +func (FlowMessage_FlowType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// To be deprecated +type FlowMessage_IPType int32 + +const ( + FlowMessage_IPUNKNOWN FlowMessage_IPType = 0 + FlowMessage_IPv4 FlowMessage_IPType = 4 + FlowMessage_IPv6 FlowMessage_IPType = 6 +) + +var FlowMessage_IPType_name = map[int32]string{ + 0: "IPUNKNOWN", + 4: "IPv4", + 6: "IPv6", +} +var FlowMessage_IPType_value = map[string]int32{ + "IPUNKNOWN": 0, + "IPv4": 4, + "IPv6": 6, +} + +func (x FlowMessage_IPType) String() string { + return proto.EnumName(FlowMessage_IPType_name, int32(x)) +} +func (FlowMessage_IPType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } + +type FlowMessage struct { + Type FlowMessage_FlowType `protobuf:"varint,1,opt,name=Type,json=type,enum=flowprotob.FlowMessage_FlowType" json:"Type,omitempty"` + TimeRecvd uint64 `protobuf:"varint,2,opt,name=TimeRecvd,json=timeRecvd" json:"TimeRecvd,omitempty"` + SamplingRate uint64 `protobuf:"varint,3,opt,name=SamplingRate,json=samplingRate" json:"SamplingRate,omitempty"` + SequenceNum uint32 `protobuf:"varint,4,opt,name=SequenceNum,json=sequenceNum" json:"SequenceNum,omitempty"` + // Found inside packet + TimeFlow uint64 `protobuf:"varint,5,opt,name=TimeFlow,json=timeFlow" json:"TimeFlow,omitempty"` + // Source/destination addresses + SrcIP []byte `protobuf:"bytes,6,opt,name=SrcIP,json=srcIP,proto3" json:"SrcIP,omitempty"` + DstIP []byte `protobuf:"bytes,7,opt,name=DstIP,json=dstIP,proto3" json:"DstIP,omitempty"` + IPversion FlowMessage_IPType `protobuf:"varint,8,opt,name=IPversion,json=iPversion,enum=flowprotob.FlowMessage_IPType" json:"IPversion,omitempty"` + // Size of the sampled packet + Bytes uint64 `protobuf:"varint,9,opt,name=Bytes,json=bytes" json:"Bytes,omitempty"` + Packets uint64 `protobuf:"varint,10,opt,name=Packets,json=packets" json:"Packets,omitempty"` + // Routing information + RouterAddr []byte `protobuf:"bytes,11,opt,name=RouterAddr,json=routerAddr,proto3" json:"RouterAddr,omitempty"` + NextHop []byte `protobuf:"bytes,12,opt,name=NextHop,json=nextHop,proto3" json:"NextHop,omitempty"` + NextHopAS uint32 `protobuf:"varint,13,opt,name=NextHopAS,json=nextHopAS" json:"NextHopAS,omitempty"` + // Autonomous system information + SrcAS uint32 `protobuf:"varint,14,opt,name=SrcAS,json=srcAS" json:"SrcAS,omitempty"` + DstAS uint32 `protobuf:"varint,15,opt,name=DstAS,json=dstAS" json:"DstAS,omitempty"` + // Prefix size + SrcNet uint32 `protobuf:"varint,16,opt,name=SrcNet,json=srcNet" json:"SrcNet,omitempty"` + DstNet uint32 `protobuf:"varint,17,opt,name=DstNet,json=dstNet" json:"DstNet,omitempty"` + // Interfaces + SrcIf uint32 `protobuf:"varint,18,opt,name=SrcIf,json=srcIf" json:"SrcIf,omitempty"` + DstIf uint32 `protobuf:"varint,19,opt,name=DstIf,json=dstIf" json:"DstIf,omitempty"` + // Layer 4 protocol + Proto uint32 `protobuf:"varint,20,opt,name=Proto,json=proto" json:"Proto,omitempty"` + // Port for UDP and TCP + SrcPort uint32 `protobuf:"varint,21,opt,name=SrcPort,json=srcPort" json:"SrcPort,omitempty"` + DstPort uint32 `protobuf:"varint,22,opt,name=DstPort,json=dstPort" json:"DstPort,omitempty"` + // IP and TCP special flags + IPTos uint32 `protobuf:"varint,23,opt,name=IPTos,json=iPTos" json:"IPTos,omitempty"` + ForwardingStatus uint32 `protobuf:"varint,24,opt,name=ForwardingStatus,json=forwardingStatus" json:"ForwardingStatus,omitempty"` + IPTTL uint32 `protobuf:"varint,25,opt,name=IPTTL,json=iPTTL" json:"IPTTL,omitempty"` + TCPFlags uint32 `protobuf:"varint,26,opt,name=TCPFlags,json=tCPFlags" json:"TCPFlags,omitempty"` + // Ethernet information + SrcMac uint64 `protobuf:"varint,27,opt,name=SrcMac,json=srcMac" json:"SrcMac,omitempty"` + DstMac uint64 `protobuf:"varint,28,opt,name=DstMac,json=dstMac" json:"DstMac,omitempty"` + VlanId uint32 `protobuf:"varint,29,opt,name=VlanId,json=vlanId" json:"VlanId,omitempty"` + // Layer 3 protocol (IPv4/IPv6/ARP/...) + Etype uint32 `protobuf:"varint,30,opt,name=Etype,json=etype" json:"Etype,omitempty"` +} + +func (m *FlowMessage) Reset() { *m = FlowMessage{} } +func (m *FlowMessage) String() string { return proto.CompactTextString(m) } +func (*FlowMessage) ProtoMessage() {} +func (*FlowMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FlowMessage) GetType() FlowMessage_FlowType { + if m != nil { + return m.Type + } + return FlowMessage_FLOWUNKNOWN +} + +func (m *FlowMessage) GetTimeRecvd() uint64 { + if m != nil { + return m.TimeRecvd + } + return 0 +} + +func (m *FlowMessage) GetSamplingRate() uint64 { + if m != nil { + return m.SamplingRate + } + return 0 +} + +func (m *FlowMessage) GetSequenceNum() uint32 { + if m != nil { + return m.SequenceNum + } + return 0 +} + +func (m *FlowMessage) GetTimeFlow() uint64 { + if m != nil { + return m.TimeFlow + } + return 0 +} + +func (m *FlowMessage) GetSrcIP() []byte { + if m != nil { + return m.SrcIP + } + return nil +} + +func (m *FlowMessage) GetDstIP() []byte { + if m != nil { + return m.DstIP + } + return nil +} + +func (m *FlowMessage) GetIPversion() FlowMessage_IPType { + if m != nil { + return m.IPversion + } + return FlowMessage_IPUNKNOWN +} + +func (m *FlowMessage) GetBytes() uint64 { + if m != nil { + return m.Bytes + } + return 0 +} + +func (m *FlowMessage) GetPackets() uint64 { + if m != nil { + return m.Packets + } + return 0 +} + +func (m *FlowMessage) GetRouterAddr() []byte { + if m != nil { + return m.RouterAddr + } + return nil +} + +func (m *FlowMessage) GetNextHop() []byte { + if m != nil { + return m.NextHop + } + return nil +} + +func (m *FlowMessage) GetNextHopAS() uint32 { + if m != nil { + return m.NextHopAS + } + return 0 +} + +func (m *FlowMessage) GetSrcAS() uint32 { + if m != nil { + return m.SrcAS + } + return 0 +} + +func (m *FlowMessage) GetDstAS() uint32 { + if m != nil { + return m.DstAS + } + return 0 +} + +func (m *FlowMessage) GetSrcNet() uint32 { + if m != nil { + return m.SrcNet + } + return 0 +} + +func (m *FlowMessage) GetDstNet() uint32 { + if m != nil { + return m.DstNet + } + return 0 +} + +func (m *FlowMessage) GetSrcIf() uint32 { + if m != nil { + return m.SrcIf + } + return 0 +} + +func (m *FlowMessage) GetDstIf() uint32 { + if m != nil { + return m.DstIf + } + return 0 +} + +func (m *FlowMessage) GetProto() uint32 { + if m != nil { + return m.Proto + } + return 0 +} + +func (m *FlowMessage) GetSrcPort() uint32 { + if m != nil { + return m.SrcPort + } + return 0 +} + +func (m *FlowMessage) GetDstPort() uint32 { + if m != nil { + return m.DstPort + } + return 0 +} + +func (m *FlowMessage) GetIPTos() uint32 { + if m != nil { + return m.IPTos + } + return 0 +} + +func (m *FlowMessage) GetForwardingStatus() uint32 { + if m != nil { + return m.ForwardingStatus + } + return 0 +} + +func (m *FlowMessage) GetIPTTL() uint32 { + if m != nil { + return m.IPTTL + } + return 0 +} + +func (m *FlowMessage) GetTCPFlags() uint32 { + if m != nil { + return m.TCPFlags + } + return 0 +} + +func (m *FlowMessage) GetSrcMac() uint64 { + if m != nil { + return m.SrcMac + } + return 0 +} + +func (m *FlowMessage) GetDstMac() uint64 { + if m != nil { + return m.DstMac + } + return 0 +} + +func (m *FlowMessage) GetVlanId() uint32 { + if m != nil { + return m.VlanId + } + return 0 +} + +func (m *FlowMessage) GetEtype() uint32 { + if m != nil { + return m.Etype + } + return 0 +} + +func init() { + proto.RegisterType((*FlowMessage)(nil), "flowprotob.FlowMessage") + proto.RegisterEnum("flowprotob.FlowMessage_FlowType", FlowMessage_FlowType_name, FlowMessage_FlowType_value) + proto.RegisterEnum("flowprotob.FlowMessage_IPType", FlowMessage_IPType_name, FlowMessage_IPType_value) +} + +func init() { proto.RegisterFile("flow.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 598 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x86, 0xbf, 0x7c, 0x24, 0x71, 0x3c, 0x49, 0x5a, 0xb3, 0x94, 0x32, 0x94, 0x52, 0x45, 0x39, + 0x45, 0x54, 0xca, 0x01, 0x2a, 0x24, 0x04, 0x97, 0x94, 0x10, 0x61, 0xd1, 0xba, 0x96, 0x1d, 0x5a, + 0xae, 0x8e, 0xbd, 0x89, 0x22, 0x1c, 0x3b, 0x78, 0x37, 0x29, 0xfd, 0x7d, 0xfc, 0x31, 0x34, 0xb3, + 0x76, 0x5a, 0x90, 0xb8, 0xcd, 0xfb, 0xcc, 0xec, 0xec, 0xce, 0xeb, 0x31, 0xc0, 0x3c, 0xcd, 0x6f, + 0x87, 0xeb, 0x22, 0xd7, 0xb9, 0xe0, 0x98, 0xc3, 0x59, 0xff, 0x97, 0x05, 0xed, 0x49, 0x9a, 0xdf, + 0x5e, 0x4a, 0xa5, 0xa2, 0x85, 0x14, 0x67, 0x50, 0x9f, 0xde, 0xad, 0x25, 0xd6, 0x7a, 0xb5, 0xc1, + 0xde, 0xeb, 0xde, 0xf0, 0xbe, 0x74, 0xf8, 0xa0, 0x8c, 0x63, 0xaa, 0x0b, 0xea, 0xfa, 0x6e, 0x2d, + 0xc5, 0x31, 0xd8, 0xd3, 0xe5, 0x4a, 0x06, 0x32, 0xde, 0x26, 0xf8, 0x7f, 0xaf, 0x36, 0xa8, 0x07, + 0xb6, 0xae, 0x80, 0xe8, 0x43, 0x27, 0x8c, 0x56, 0xeb, 0x74, 0x99, 0x2d, 0x82, 0x48, 0x4b, 0x7c, + 0xc4, 0x05, 0x1d, 0xf5, 0x80, 0x89, 0x1e, 0xb4, 0x43, 0xf9, 0x63, 0x23, 0xb3, 0x58, 0x7a, 0x9b, + 0x15, 0xd6, 0x7b, 0xb5, 0x41, 0x37, 0x68, 0xab, 0x7b, 0x24, 0x8e, 0xa0, 0x45, 0x77, 0xd0, 0xcd, + 0xd8, 0xe0, 0x0e, 0x2d, 0x5d, 0x6a, 0x71, 0x00, 0x8d, 0xb0, 0x88, 0x5d, 0x1f, 0x9b, 0xbd, 0xda, + 0xa0, 0x13, 0x34, 0x14, 0x09, 0xa2, 0x63, 0xa5, 0x5d, 0x1f, 0x2d, 0x43, 0x13, 0x12, 0xe2, 0x03, + 0xd8, 0xae, 0xbf, 0x95, 0x85, 0x5a, 0xe6, 0x19, 0xb6, 0x78, 0xcc, 0x93, 0x7f, 0x8d, 0xe9, 0xfa, + 0x3c, 0xa4, 0xbd, 0xac, 0x0e, 0x50, 0xcf, 0xf3, 0x3b, 0x2d, 0x15, 0xda, 0xfc, 0x84, 0xc6, 0x8c, + 0x84, 0x40, 0xb0, 0xfc, 0x28, 0xfe, 0x2e, 0xb5, 0x42, 0x60, 0x6e, 0xad, 0x8d, 0x14, 0x27, 0x00, + 0x41, 0xbe, 0xd1, 0xb2, 0x18, 0x25, 0x49, 0x81, 0x6d, 0x7e, 0x08, 0x14, 0x3b, 0x42, 0x27, 0x3d, + 0xf9, 0x53, 0x7f, 0xce, 0xd7, 0xd8, 0xe1, 0xa4, 0x95, 0x19, 0x49, 0x9e, 0x96, 0x99, 0x51, 0x88, + 0x5d, 0xf6, 0xc3, 0xce, 0x2a, 0x50, 0x4e, 0x3c, 0x0a, 0x71, 0x8f, 0x33, 0x34, 0xb1, 0xa1, 0x63, + 0xa5, 0x47, 0x21, 0xee, 0x1b, 0x9a, 0x90, 0x10, 0x87, 0xd0, 0x0c, 0x8b, 0xd8, 0x93, 0x1a, 0x1d, + 0xc6, 0x4d, 0xc5, 0x8a, 0xf8, 0x58, 0x69, 0xe2, 0x8f, 0x0d, 0x4f, 0x58, 0x55, 0x6e, 0xce, 0x51, + 0xec, 0x7a, 0xbb, 0xf3, 0xca, 0xcd, 0x39, 0x3e, 0xd9, 0xf5, 0x36, 0xd4, 0x27, 0xdf, 0xf0, 0xc0, + 0x50, 0xb3, 0x61, 0x08, 0x56, 0x58, 0xc4, 0x7e, 0x5e, 0x68, 0x7c, 0xca, 0xdc, 0x52, 0x46, 0x52, + 0x66, 0xac, 0x34, 0x67, 0x0e, 0x4d, 0x26, 0x31, 0x92, 0x3a, 0xb9, 0xfe, 0x34, 0x57, 0xf8, 0xcc, + 0x74, 0x5a, 0x92, 0x10, 0xaf, 0xc0, 0x99, 0xe4, 0xc5, 0x6d, 0x54, 0x24, 0xcb, 0x6c, 0x11, 0xea, + 0x48, 0x6f, 0x14, 0x22, 0x17, 0x38, 0xf3, 0xbf, 0x78, 0xd9, 0x61, 0x7a, 0x81, 0xcf, 0x77, 0x1d, + 0xa6, 0x17, 0xbc, 0x37, 0x1f, 0xfd, 0x49, 0x1a, 0x2d, 0x14, 0x1e, 0x71, 0xa2, 0xa5, 0x4b, 0x5d, + 0x3a, 0x73, 0x19, 0xc5, 0xf8, 0x82, 0x3f, 0x1b, 0x39, 0x73, 0x19, 0xc5, 0xa5, 0x33, 0xc4, 0x8f, + 0x0d, 0x4f, 0x58, 0x11, 0xbf, 0x4e, 0xa3, 0xcc, 0x4d, 0xf0, 0xa5, 0x71, 0x6c, 0xcb, 0x8a, 0x6e, + 0xfe, 0x44, 0x3f, 0x02, 0x9e, 0x98, 0x9b, 0x25, 0x89, 0xfe, 0x7b, 0x68, 0x55, 0xff, 0x89, 0xd8, + 0x87, 0xf6, 0xe4, 0xe2, 0xea, 0xe6, 0xab, 0xf7, 0xc5, 0xbb, 0xba, 0xf1, 0x9c, 0xff, 0x44, 0x0b, + 0xea, 0xde, 0xe4, 0xfa, 0x9d, 0x63, 0x0b, 0x9b, 0x9e, 0x3d, 0x71, 0xbf, 0x39, 0x40, 0x61, 0x48, + 0x65, 0x4e, 0xa3, 0x7f, 0x0a, 0x4d, 0xb3, 0x7d, 0xa2, 0x4b, 0x0b, 0xfb, 0xc7, 0x41, 0xd7, 0xdf, + 0x9e, 0x39, 0xf5, 0x32, 0x7a, 0xeb, 0x34, 0xcf, 0x4f, 0xe1, 0x28, 0xce, 0x57, 0xc3, 0x38, 0xcd, + 0x37, 0xc9, 0x3c, 0x8d, 0x0a, 0x39, 0xcc, 0xa4, 0xe6, 0xa5, 0x8e, 0x16, 0x8b, 0xf3, 0xee, 0x83, + 0x95, 0xf6, 0x67, 0xb3, 0x26, 0x7f, 0xa3, 0x37, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x3f, 0xb3, + 0xab, 0x2d, 0x13, 0x04, 0x00, 0x00, +} diff --git a/pb/flow.proto b/pb/flow.proto new file mode 100644 index 0000000..4bb1913 --- /dev/null +++ b/pb/flow.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; +package flowprotob; + +option java_package = "com.cloudflare.net.flowagg"; +option java_outer_classname = "FlowMessagePb"; + +message FlowMessage { + + enum FlowType { + FLOWUNKNOWN = 0; + NFV9 = 9; + IPFIX = 10; + SFLOW = 5; + } + FlowType Type = 1; + + uint64 TimeRecvd = 2; + uint64 SamplingRate = 3; + uint32 SequenceNum = 4; + + // Found inside packet + uint64 TimeFlow = 5; + + // Source/destination addresses + bytes SrcIP = 6; + bytes DstIP = 7; + + // To be deprecated + enum IPType { + IPUNKNOWN = 0; + IPv4 = 4; + IPv6 = 6; + } + IPType IPversion = 8; + + // Size of the sampled packet + uint64 Bytes = 9; + uint64 Packets = 10; + + // Routing information + bytes RouterAddr = 11; + bytes NextHop = 12; + uint32 NextHopAS = 13; + + // Autonomous system information + uint32 SrcAS = 14; + uint32 DstAS = 15; + + // Prefix size + uint32 SrcNet = 16; + uint32 DstNet = 17; + + // Interfaces + uint32 SrcIf = 18; + uint32 DstIf = 19; + + // Layer 4 protocol + uint32 Proto = 20; + + // Port for UDP and TCP + uint32 SrcPort = 21; + uint32 DstPort = 22; + + // IP and TCP special flags + uint32 IPTos = 23; + uint32 ForwardingStatus = 24; + uint32 IPTTL = 25; + uint32 TCPFlags = 26; + + // Ethernet information + uint64 SrcMac = 27; + uint64 DstMac = 28; + + uint32 VlanId = 29; + + // Layer 3 protocol (IPv4/IPv6/ARP/...) + uint32 Etype = 30; +} \ No newline at end of file diff --git a/producer/kafka.go b/producer/kafka.go new file mode 100644 index 0000000..e4ef31b --- /dev/null +++ b/producer/kafka.go @@ -0,0 +1,66 @@ +package producer + +import ( + "errors" + "flag" + "fmt" + log "github.com/Sirupsen/logrus" + flowmessage "github.com/cloudflare/goflow/pb" + proto "github.com/golang/protobuf/proto" + sarama "gopkg.in/Shopify/sarama.v1" + "net" + "strconv" + "strings" +) + +var ( + KafkaTopic = flag.String("kafka.out.topic", "flow-messages", "Kafka topic to produce to") + KafkaSrv = flag.String("kafka.out.srv", "", "SRV record containing a list of Kafka brokers (or use kafka.out.brokers)") + KafkaBrk = flag.String("kafka.out.brokers", "127.0.0.1:9092,[::1]:9092", "Kafka brokers list separated by commas") +) + +type KafkaState struct { + producer sarama.AsyncProducer +} + +func StartKafkaProducer() *KafkaState { + kafkaConfig := sarama.NewConfig() + kafkaConfig.Producer.Return.Successes = false + kafkaConfig.Producer.Return.Errors = false + + addrs := make([]string, 0) + if *KafkaSrv != "" { + addrs, _ = GetServiceAddresses(*KafkaSrv) + } else { + addrs = strings.Split(*KafkaBrk, ",") + } + + kafkaProducer, err := sarama.NewAsyncProducer(addrs, kafkaConfig) + if err != nil { + log.Fatalf("%v", err) + } + state := KafkaState{ + producer: kafkaProducer, + } + + return &state +} + +func GetServiceAddresses(srv string) (addrs []string, err error) { + _, srvs, err := net.LookupSRV("", "", srv) + if err != nil { + return nil, errors.New(fmt.Sprintf("Service discovery: %v\n", err)) + } + for _, srv := range srvs { + addrs = append(addrs, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port)))) + } + return addrs, nil +} + +func (s KafkaState) SendKafkaFlowMessage(flowMessage flowmessage.FlowMessage) { + b, _ := proto.Marshal(&flowMessage) + s.producer.Input() <- &sarama.ProducerMessage{ + Topic: *KafkaTopic, + Value: sarama.ByteEncoder(b), + } +} diff --git a/producer/producer.go b/producer/producer.go new file mode 100644 index 0000000..1946360 --- /dev/null +++ b/producer/producer.go @@ -0,0 +1,49 @@ +package producer + +import ( + "github.com/prometheus/client_golang/prometheus" + "sync" +) + +type ProcessArguments struct { + KafkaState *KafkaState + + SamplingRateMap SamplingRateMap + SamplingRateLock *sync.RWMutex + SamplingRateFixed int + + TemplateMap TemplateMap + TemplateMapLock *sync.RWMutex + + UniqueTemplates bool +} + +func CreateProcessArguments(kafka bool, samplingRate int, uniqueTemplates bool) ProcessArguments { + prometheus.MustRegister(NetFlowStats) + prometheus.MustRegister(NetFlowErrors) + prometheus.MustRegister(NetFlowSetRecordsStatsSum) + prometheus.MustRegister(NetFlowSetStatsSum) + prometheus.MustRegister(NetFlowTimeStatsSum) + prometheus.MustRegister(NetFlowTemplatesStats) + + prometheus.MustRegister(SFlowStats) + prometheus.MustRegister(SFlowErrors) + prometheus.MustRegister(SFlowSampleStatsSum) + prometheus.MustRegister(SFlowSampleRecordsStatsSum) + + var kafkaState *KafkaState + if kafka { + kafkaState = StartKafkaProducer() + } + + processArgs := ProcessArguments{ + KafkaState: kafkaState, + SamplingRateMap: make(map[string]map[uint32]uint64), + SamplingRateLock: &sync.RWMutex{}, + TemplateMap: make(map[string]map[uint32]map[uint16]bool), + TemplateMapLock: &sync.RWMutex{}, + SamplingRateFixed: samplingRate, + UniqueTemplates: uniqueTemplates, + } + return processArgs +} diff --git a/producer/producer_nf.go b/producer/producer_nf.go new file mode 100644 index 0000000..1a6af89 --- /dev/null +++ b/producer/producer_nf.go @@ -0,0 +1,785 @@ +package producer + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + log "github.com/Sirupsen/logrus" + "github.com/cloudflare/goflow/decoders/netflow" + flowmessage "github.com/cloudflare/goflow/pb" + "github.com/prometheus/client_golang/prometheus" + "net" + "strconv" + "sync" + "time" +) + +var ( + NetFlowStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_count", + Help: "NetFlows processed.", + }, + []string{"router", "version"}, + ) + NetFlowErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_errors_count", + Help: "NetFlows processed errors.", + }, + []string{"router", "version", "error"}, + ) + NetFlowSetRecordsStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_flowset_records_sum", + Help: "NetFlows FlowSets sum of records.", + }, + []string{"router", "version", "type"}, // data-template, data, opts... + ) + NetFlowSetStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_flowset_sum", + Help: "NetFlows FlowSets sum.", + }, + []string{"router", "version", "type"}, // data-template, data, opts... + ) + NetFlowTimeStatsSum = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_process_nf_delay_summary_seconds", + Help: "NetFlows time difference between time of flow and processing.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"router", "version"}, + ) + NetFlowTemplatesStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_templates_count", + Help: "NetFlows Template count.", + }, + []string{"router", "version", "obs_domain_id", "type"}, // options/template + ) +) + +type SamplingRateMap map[string]map[uint32]uint64 +type TemplateMap map[string]map[uint32]map[uint16]bool + +func NetFlowLookFor(dataFields []netflow.DataField, typeId uint16) (bool, interface{}) { + for _, dataField := range dataFields { + if dataField.Type == typeId { + return true, dataField.Value + } + } + return false, nil +} + +func NetFlowPopulate(dataFields []netflow.DataField, typeId uint16, addr interface{}) bool { + exists, value := NetFlowLookFor(dataFields, typeId) + //log.Printf("Populate: %v %v %v\n", typeId, exists, value) + if exists && value != nil { + valueBytes, ok := value.([]byte) + valueReader := bytes.NewReader(valueBytes) + if ok { + switch addrt := addr.(type) { + case *(net.IP): + *addrt = valueBytes + case *(time.Time): + t := uint64(0) + binary.Read(valueReader, binary.BigEndian, &t) + t64 := int64(t / 1000) + *addrt = time.Unix(t64, 0) + default: + binary.Read(valueReader, binary.BigEndian, addr) + } + } + } + return exists +} + +func DecodeUNumber(b []byte, out interface{}) error { + var o uint64 + l := len(b) + switch l { + case 1: + o = uint64(b[0]) + case 2: + o = uint64(binary.BigEndian.Uint16(b)) + case 4: + o = uint64(binary.BigEndian.Uint32(b)) + case 8: + o = binary.BigEndian.Uint64(b) + default: + if l < 8 { + var iter uint + for i := range b { + o |= uint64(b[i]) << uint(8*(uint(l)-iter-1)) + iter++ + } + } else { + return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l)) + } + } + switch t := out.(type) { + case *byte: + *t = byte(o) + case *uint16: + *t = uint16(o) + case *uint32: + *t = uint32(o) + case *uint64: + *t = o + default: + return errors.New("The parameter is not a pointer to a byte/uint16/uint32/uint64 structure") + } + return nil +} + +func ConvertNetFlowDataSet(router net.IP, version uint16, seqnum uint32, sampling uint64, baseTime uint32, uptime uint32, record []netflow.DataField) *flowmessage.FlowMessage { + routerStr := router.String() + flowMessage := &flowmessage.FlowMessage{ + SamplingRate: sampling, + TimeRecvd: uint64(time.Now().Unix()), + } + flowMessage.RouterAddr = router + var time uint64 + + if version == 9 { + flowMessage.Type = flowmessage.FlowMessage_NFV9 + } else if version == 10 { + flowMessage.Type = flowmessage.FlowMessage_IPFIX + } + flowMessage.SequenceNum = seqnum + + for i := range record { + df := record[i] + + v, ok := df.Value.([]byte) + if !ok { + continue + } + + switch df.Type { + + // Statistics + case netflow.NFV9_FIELD_IN_BYTES: + DecodeUNumber(v, &(flowMessage.Bytes)) + case netflow.NFV9_FIELD_IN_PKTS: + DecodeUNumber(v, &(flowMessage.Packets)) + case netflow.NFV9_FIELD_OUT_BYTES: + DecodeUNumber(v, &(flowMessage.Bytes)) + case netflow.NFV9_FIELD_OUT_PKTS: + DecodeUNumber(v, &(flowMessage.Packets)) + + // L4 + case netflow.NFV9_FIELD_L4_SRC_PORT: + DecodeUNumber(v, &(flowMessage.SrcPort)) + case netflow.NFV9_FIELD_L4_DST_PORT: + DecodeUNumber(v, &(flowMessage.DstPort)) + case netflow.NFV9_FIELD_PROTOCOL: + DecodeUNumber(v, &(flowMessage.Proto)) + + // Network + case netflow.NFV9_FIELD_SRC_AS: + DecodeUNumber(v, &(flowMessage.SrcAS)) + case netflow.NFV9_FIELD_DST_AS: + DecodeUNumber(v, &(flowMessage.DstAS)) + + // Interfaces + case netflow.NFV9_FIELD_INPUT_SNMP: + DecodeUNumber(v, &(flowMessage.SrcIf)) + case netflow.NFV9_FIELD_OUTPUT_SNMP: + DecodeUNumber(v, &(flowMessage.DstIf)) + + case netflow.NFV9_FIELD_FORWARDING_STATUS: + DecodeUNumber(v, &(flowMessage.ForwardingStatus)) + case netflow.NFV9_FIELD_SRC_TOS: + DecodeUNumber(v, &(flowMessage.IPTos)) + case netflow.NFV9_FIELD_TCP_FLAGS: + DecodeUNumber(v, &(flowMessage.TCPFlags)) + case netflow.NFV9_FIELD_MIN_TTL: + DecodeUNumber(v, &(flowMessage.IPTTL)) + + // IP + case netflow.NFV9_FIELD_IPV4_SRC_ADDR: + flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + flowMessage.SrcIP = v + case netflow.NFV9_FIELD_IPV4_DST_ADDR: + flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + flowMessage.DstIP = v + + case netflow.NFV9_FIELD_SRC_MASK: + DecodeUNumber(v, &(flowMessage.SrcNet)) + case netflow.NFV9_FIELD_DST_MASK: + DecodeUNumber(v, &(flowMessage.DstNet)) + + case netflow.NFV9_FIELD_IPV6_SRC_ADDR: + flowMessage.IPversion = flowmessage.FlowMessage_IPv6 + flowMessage.SrcIP = v + case netflow.NFV9_FIELD_IPV6_DST_ADDR: + flowMessage.IPversion = flowmessage.FlowMessage_IPv6 + flowMessage.DstIP = v + + case netflow.NFV9_FIELD_IPV6_SRC_MASK: + DecodeUNumber(v, &(flowMessage.SrcNet)) + case netflow.NFV9_FIELD_IPV6_DST_MASK: + DecodeUNumber(v, &(flowMessage.DstNet)) + + case netflow.NFV9_FIELD_IPV4_NEXT_HOP: + flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + flowMessage.NextHop = v + case netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP: + flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + flowMessage.NextHop = v + + case netflow.NFV9_FIELD_IPV6_NEXT_HOP: + flowMessage.IPversion = flowmessage.FlowMessage_IPv6 + flowMessage.NextHop = v + case netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP: + flowMessage.IPversion = flowmessage.FlowMessage_IPv6 + flowMessage.NextHop = v + + // Mac + case netflow.NFV9_FIELD_IN_SRC_MAC: + DecodeUNumber(v, &(flowMessage.SrcMac)) + case netflow.NFV9_FIELD_OUT_DST_MAC: + DecodeUNumber(v, &(flowMessage.DstMac)) + + case netflow.NFV9_FIELD_SRC_VLAN: + DecodeUNumber(v, &(flowMessage.VlanId)) + + default: + if version == 9 { + // NetFlow v9 time works with a differential based on router's uptime + switch df.Type { + case netflow.NFV9_FIELD_LAST_SWITCHED: + var timeLastSwitched uint32 + DecodeUNumber(v, &timeLastSwitched) + timeDiff := (uptime - timeLastSwitched) / 1000 + flowMessage.TimeFlow = uint64(baseTime - timeDiff) + } + } else if version == 10 { + switch df.Type { + case netflow.IPFIX_FIELD_flowEndSeconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlow = time + case netflow.IPFIX_FIELD_flowEndMilliseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlow = time / 1000 + case netflow.IPFIX_FIELD_flowEndMicroseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlow = time / 1000000 + case netflow.IPFIX_FIELD_flowEndNanoseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlow = time / 1000000000 + } + } + } + + } + + if flowMessage.TimeFlow < flowMessage.TimeRecvd { + return flowMessage + } else { + NetFlowErrors.With( + prometheus.Labels{ + "router": routerStr, + "version": strconv.Itoa(int(version)), + "error": "garbage", + }). + Inc() + return nil + // Silently discard bad packet + } +} + +/*func ConvertNetFlowDataSet(router net.IP, version uint16, seqnum uint32, sampling uint64, baseTime uint32, uptime uint32, record []netflow.DataField) *flowmessage.FlowMessage { + routerStr := router.String() + flowMessage := &flowmessage.FlowMessage{} + + flowMessage.SamplingRate = sampling + + var proto uint8 + NetFlowPopulate(record, 4, &proto) + flowMessage.Proto = uint32(proto) + + var srcPort uint16 + var dstPort uint16 + + NetFlowPopulate(record, 7, &srcPort) + NetFlowPopulate(record, 11, &dstPort) + flowMessage.SrcPort = uint32(srcPort) + flowMessage.DstPort = uint32(dstPort) + + NetFlowPopulate(record, 16, &(flowMessage.SrcAS)) + NetFlowPopulate(record, 17, &(flowMessage.DstAS)) + NetFlowPopulate(record, 10, &(flowMessage.SrcIf)) + NetFlowPopulate(record, 14, &(flowMessage.DstIf)) + NetFlowPopulate(record, 89, &(flowMessage.ForwardingStatus)) + + var ttl uint8 + NetFlowPopulate(record, 52, &ttl) + flowMessage.IPTTL = uint32(ttl) + var tos uint8 + NetFlowPopulate(record, 5, &tos) + flowMessage.IPTos = uint32(tos) + var tcpFlags uint8 + NetFlowPopulate(record, 6, &tcpFlags) + flowMessage.TCPFlags = uint32(tcpFlags) + + ipSrc := net.IP{} + ipDst := net.IP{} + ipNh := net.IP{} + isv4 := NetFlowPopulate(record, 8, &(ipSrc)) + if(isv4) { + NetFlowPopulate(record, 12, &(ipDst)) + NetFlowPopulate(record, 15, &(ipNh)) + NetFlowPopulate(record, 18, &(ipNh)) + + flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + } else { + NetFlowPopulate(record, 27, &(ipSrc)) + NetFlowPopulate(record, 28, &(ipDst)) + NetFlowPopulate(record, 62, &(ipNh)) + NetFlowPopulate(record, 63, &(ipNh)) + + flowMessage.IPversion = flowmessage.FlowMessage_IPv6 + } + flowMessage.SrcIP = ipSrc + flowMessage.DstIP = ipDst + flowMessage.NextHop = ipNh + flowMessage.RouterAddr = router + + var srcmask byte + var dstmask byte + NetFlowPopulate(record, 9, &srcmask) + NetFlowPopulate(record, 13, &dstmask) + flowMessage.SrcNet = uint32(srcmask) + flowMessage.DstNet = uint32(dstmask) + + recvd := uint64(time.Now().Unix()) + flowMessage.TimeRecvd = recvd + var flowTime uint64 + var bytes uint64 + var packets uint64 + + if(version == 9) { + var bytes32 uint32 + var packets32 uint32 + NetFlowPopulate(record, 1, &bytes32) + NetFlowPopulate(record, 2, &packets32) + bytes = uint64(bytes32) + packets = uint64(packets32) + + flowMessage.Type = flowmessage.FlowMessage_NFV9 + flowMessage.SequenceNum = seqnum + + var timeLastSwitched uint32; + NetFlowPopulate(record, 21, &timeLastSwitched) + NetFlowPopulate(record, 21, &timeLastSwitched) + timeDiff := (uptime - timeLastSwitched)/1000 + flowTime = uint64(baseTime - timeDiff) + + } else if(version == 10) { + NetFlowPopulate(record, 1, &bytes) + NetFlowPopulate(record, 2, &packets) + + flowMessage.Type = flowmessage.FlowMessage_IPFIX + flowMessage.SequenceNum = seqnum + + var time uint64; + NetFlowPopulate(record, 153, &time) + flowTime = uint64(time/1000) + } + flowMessage.Bytes = bytes + flowMessage.Packets = packets + + if(flowTime < recvd) { + flowMessage.TimeFlow = flowTime + return flowMessage + } else { + NetFlowErrors.With( + prometheus.Labels{ + "router": routerStr, + "version": strconv.Itoa(int(version)), + "error": "garbage", + }). + Inc() + return nil + //flowMessage.TimeFlow = recvd + // Silently discard bad packet + } +}*/ + +func SearchNetFlowDataSets(router net.IP, version uint16, seqnum uint32, sampling uint64, baseTime uint32, uptime uint32, dataFlowSet []netflow.DataFlowSet) []flowmessage.FlowMessage { + flowMessageSet := make([]flowmessage.FlowMessage, 0) + for _, dataFlowSetItem := range dataFlowSet { + for _, record := range dataFlowSetItem.Records { + fmsg := ConvertNetFlowDataSet(router, version, seqnum, sampling, baseTime, uptime, record.Values) + if fmsg != nil { + flowMessageSet = append(flowMessageSet, *fmsg) + } + } + } + return flowMessageSet +} + +func GetSamplingRate(key string, obsDomainId uint32, optionsDataFlowSet []netflow.OptionsDataFlowSet, samplingRateLock *sync.RWMutex, samplingRateMap SamplingRateMap) uint64 { + + for _, optionsDataFlowSetItem := range optionsDataFlowSet { + for _, record := range optionsDataFlowSetItem.Records { + var samplingRate uint32 + NetFlowPopulate(record.OptionsValues, 34, &samplingRate) + samplingRateLock.Lock() + samplingRateMap[key][obsDomainId] = uint64(samplingRate) + samplingRateLock.Unlock() + } + } + + samplingRateLock.RLock() + samplingRate := samplingRateMap[key][obsDomainId] + samplingRateLock.RUnlock() + + return samplingRate +} + +func GetNetFlowTemplatesSets(version uint16, packet interface{}) []netflow.TemplateFlowSet { + templatesFlowSet := make([]netflow.TemplateFlowSet, 0) + if version == 9 { + packetNFv9 := packet.(netflow.NFv9Packet) + for _, flowSet := range packetNFv9.FlowSets { + switch flowSet.(type) { + case netflow.TemplateFlowSet: + templatesFlowSet = append(templatesFlowSet, flowSet.(netflow.TemplateFlowSet)) + } + } + } else if version == 10 { + packetIPFIX := packet.(netflow.IPFIXPacket) + for _, flowSet := range packetIPFIX.FlowSets { + switch flowSet.(type) { + case netflow.TemplateFlowSet: + templatesFlowSet = append(templatesFlowSet, flowSet.(netflow.TemplateFlowSet)) + } + } + } + return templatesFlowSet +} + +func ProcessTemplates(router string, obsDomainId uint32, version uint16, templatesFlowSet []netflow.TemplateFlowSet, templateMap TemplateMap, templateMapLock *sync.RWMutex) { + for _, flowSet := range templatesFlowSet { + for _, record := range flowSet.Records { + CountTemplate(router, obsDomainId, true, version, record.TemplateId, templateMap, templateMapLock) + } + } +} + +func GetNetFlowOptionsTemplatesSets(version uint16, packet interface{}) []interface{} { + optionsTemplatesFlowSet := make([]interface{}, 0) + if version == 9 { + packetNFv9 := packet.(netflow.NFv9Packet) + for _, flowSet := range packetNFv9.FlowSets { + switch flowSet.(type) { + case netflow.NFv9OptionsTemplateFlowSet: + optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, flowSet.(netflow.NFv9OptionsTemplateFlowSet)) + } + } + } else if version == 10 { + packetIPFIX := packet.(netflow.IPFIXPacket) + for _, flowSet := range packetIPFIX.FlowSets { + switch flowSet.(type) { + case netflow.IPFIXOptionsTemplateFlowSet: + optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, flowSet.(netflow.IPFIXOptionsTemplateFlowSet)) + } + } + } + return optionsTemplatesFlowSet +} + +func CountTemplate(router string, obsDomainId uint32, notOptions bool, version uint16, id uint16, templateMap TemplateMap, templateMapLock *sync.RWMutex) { + templateMapLock.RLock() + _, ok := templateMap[router][obsDomainId][id] + templateMapLock.RUnlock() + if ok == false { + templateMapLock.Lock() + _, oksrc := templateMap[router] + if oksrc == false { + templateMap[router] = make(map[uint32]map[uint16]bool) + } + _, okobs := templateMap[router][obsDomainId] + if okobs == false { + templateMap[router][obsDomainId] = make(map[uint16]bool) + } + + typeStr := "template" + if notOptions == false { + typeStr = "options_template" + } + + templateMap[router][obsDomainId][id] = false + templateMapLock.Unlock() + NetFlowTemplatesStats.With( + prometheus.Labels{ + "router": router, + "version": strconv.Itoa(int(version)), + "obs_domain_id": strconv.Itoa(int(obsDomainId)), + "type": typeStr, + }). + Inc() + } +} + +func ProcessOptionsTemplates(router string, obsDomainId uint32, version uint16, templatesFlowSet []interface{}, templateMap TemplateMap, templateMapLock *sync.RWMutex) { + for _, flowSet := range templatesFlowSet { + if version == 9 { + for _, record := range flowSet.(netflow.NFv9OptionsTemplateFlowSet).Records { + CountTemplate(router, obsDomainId, false, version, record.TemplateId, templateMap, templateMapLock) + } + } else if version == 10 { + for _, record := range flowSet.(netflow.IPFIXOptionsTemplateFlowSet).Records { + CountTemplate(router, obsDomainId, false, version, record.TemplateId, templateMap, templateMapLock) + } + } + } +} + +func GetNetFlowOptionsDataSets(version uint16, packet interface{}) []netflow.OptionsDataFlowSet { + optionsDataFlowSet := make([]netflow.OptionsDataFlowSet, 0) + if version == 9 { + packetNFv9 := packet.(netflow.NFv9Packet) + for _, flowSet := range packetNFv9.FlowSets { + switch flowSet.(type) { + case netflow.OptionsDataFlowSet: + optionsDataFlowSet = append(optionsDataFlowSet, flowSet.(netflow.OptionsDataFlowSet)) + } + } + } else if version == 10 { + packetIPFIX := packet.(netflow.IPFIXPacket) + for _, flowSet := range packetIPFIX.FlowSets { + switch flowSet.(type) { + case netflow.OptionsDataFlowSet: + optionsDataFlowSet = append(optionsDataFlowSet, flowSet.(netflow.OptionsDataFlowSet)) + } + } + } + return optionsDataFlowSet +} + +func GetNetFlowDataFlowSets(version uint16, packet interface{}) []netflow.DataFlowSet { + dataFlowSet := make([]netflow.DataFlowSet, 0) + if version == 9 { + packetNFv9 := packet.(netflow.NFv9Packet) + for _, flowSet := range packetNFv9.FlowSets { + switch flowSet.(type) { + case netflow.DataFlowSet: + dataFlowSet = append(dataFlowSet, flowSet.(netflow.DataFlowSet)) + } + } + } else if version == 10 { + packetIPFIX := packet.(netflow.IPFIXPacket) + for _, flowSet := range packetIPFIX.FlowSets { + switch flowSet.(type) { + case netflow.DataFlowSet: + dataFlowSet = append(dataFlowSet, flowSet.(netflow.DataFlowSet)) + } + } + } + return dataFlowSet +} + +func ProcessNetFlowError(msgDec interface{}, err error, args interface{}, conf interface{}) (bool, error) { + msgDecoded := msgDec.(netflow.BaseMessageDecoded) + packet := msgDecoded.Packet + version := msgDecoded.Version + seqnum := uint32(0) + if version == 9 { + seqnum = packet.(netflow.NFv9Packet).SequenceNumber + } else if version == 10 { + seqnum = packet.(netflow.IPFIXPacket).SequenceNumber + } + + switch err := err.(type) { + case *netflow.ErrorTemplateNotFound: + log.WithFields(log.Fields{ + "version": version, + "source": msgDecoded.Src.String(), + "seqnum": seqnum, + "error": err, + }).Debug("Template not found") + + NetFlowErrors.With( + prometheus.Labels{ + "router": msgDecoded.Src.String(), + "version": strconv.Itoa(int(version)), + "error": "template_not_found", + }). + Add(1) + default: + log.WithFields(log.Fields{ + "version": version, + "source": msgDecoded.Src.String(), + "seqnum": seqnum, + "error": err, + }).Error("Error processing") + + NetFlowErrors.With( + prometheus.Labels{ + "router": msgDecoded.Src.String(), + "version": strconv.Itoa(int(version)), + "error": "undefined", + }). + Add(1) + } + + return true, nil +} + +func MetricTypeNetFlow(router string, version uint16, packet interface{}) { + countMap := make(map[string]int) + countRecordsMap := make(map[string]int) + if version == 9 { + packetNFv9 := packet.(netflow.NFv9Packet) + for _, flowSet := range packetNFv9.FlowSets { + switch flowSet := flowSet.(type) { + case netflow.TemplateFlowSet: + countMap["TemplateFlowSet"]++ + countRecordsMap["TemplateFlowSet"] += len(flowSet.Records) + case netflow.DataFlowSet: + countMap["DataFlowSet"]++ + countRecordsMap["DataFlowSet"] += len(flowSet.Records) + case netflow.NFv9OptionsTemplateFlowSet: + countMap["OptionsTemplateFlowSet"]++ + countRecordsMap["OptionsTemplateFlowSet"] += len(flowSet.Records) + case netflow.OptionsDataFlowSet: + countMap["OptionsDataFlowSet"]++ + countRecordsMap["OptionsDataFlowSet"] += len(flowSet.Records) + } + } + } else if version == 10 { + packetIPFIX := packet.(netflow.IPFIXPacket) + for _, flowSet := range packetIPFIX.FlowSets { + switch flowSet := flowSet.(type) { + case netflow.TemplateFlowSet: + countMap["TemplateFlowSet"]++ + countRecordsMap["TemplateFlowSet"] += len(flowSet.Records) + case netflow.DataFlowSet: + countMap["DataFlowSet"]++ + countRecordsMap["DataFlowSet"] += len(flowSet.Records) + case netflow.IPFIXOptionsTemplateFlowSet: + countMap["OptionsTemplateFlowSet"]++ + countRecordsMap["OptionsTemplateFlowSet"] += len(flowSet.Records) + case netflow.OptionsDataFlowSet: + countMap["OptionsDataFlowSet"]++ + countRecordsMap["OptionsDataFlowSet"] += len(flowSet.Records) + } + } + } + + for keyType := range countMap { + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": router, + "version": strconv.Itoa(int(version)), + "type": keyType, + }). + Add(float64(countMap[keyType])) + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": router, + "version": strconv.Itoa(int(version)), + "type": keyType, + }). + Add(float64(countRecordsMap[keyType])) + } + +} + +func ProcessMessageNetFlow(msgDec interface{}, args interface{}, conf interface{}) (bool, error) { + msgDecoded := msgDec.(netflow.BaseMessageDecoded) + packet := msgDecoded.Packet + + version := msgDecoded.Version + seqnum := uint32(0) + router := msgDecoded.Src.String() + ":" + strconv.Itoa(msgDecoded.Port) + var baseTime uint32 + var uptime uint32 + var obsDomainId uint32 + if version == 9 { + seqnum = packet.(netflow.NFv9Packet).SequenceNumber + baseTime = packet.(netflow.NFv9Packet).UnixSeconds + uptime = packet.(netflow.NFv9Packet).SystemUptime + obsDomainId = packet.(netflow.NFv9Packet).SourceId + } else if version == 10 { + seqnum = packet.(netflow.IPFIXPacket).SequenceNumber + baseTime = packet.(netflow.IPFIXPacket).ExportTime + obsDomainId = packet.(netflow.IPFIXPacket).ObservationDomainId + } + + if version == 9 || version == 10 { + dataFlowSet := GetNetFlowDataFlowSets(msgDecoded.Version, packet) + + args, ok := args.(ProcessArguments) + if ok { + var samplingRate uint64 + routerKey := router + if args.SamplingRateFixed == -1 { + optionsDataFlowSet := GetNetFlowOptionsDataSets(msgDecoded.Version, packet) + if args.UniqueTemplates { + routerKey = "unique" + } + samplingRate = GetSamplingRate(routerKey, obsDomainId, optionsDataFlowSet, args.SamplingRateLock, args.SamplingRateMap) + } else { + samplingRate = uint64(args.SamplingRateFixed) + } + templatesFlowSet := GetNetFlowTemplatesSets(version, packet) + ProcessTemplates(routerKey, obsDomainId, version, templatesFlowSet, args.TemplateMap, args.TemplateMapLock) + optionsTemplatesFlowSet := GetNetFlowOptionsTemplatesSets(version, packet) + ProcessOptionsTemplates(routerKey, obsDomainId, version, optionsTemplatesFlowSet, args.TemplateMap, args.TemplateMapLock) + + flowMessageSet := SearchNetFlowDataSets(msgDecoded.Src, msgDecoded.Version, seqnum, samplingRate, baseTime, uptime, dataFlowSet) + + MetricTypeNetFlow(router, msgDecoded.Version, packet) + NetFlowStats.With( + prometheus.Labels{ + "router": router, + "version": strconv.Itoa(int(version)), + }). + Inc() + + log.WithFields(log.Fields{ + "type": "NetFlow/IPFIX", + "version": version, + "source": router, + "seqnum": seqnum, + "samplingRate": strconv.Itoa(int(samplingRate)), + "count_flowmessages": len(flowMessageSet), + }).Debug("Message processed") + + for _, flowMessage := range flowMessageSet { + if args.KafkaState != nil { + args.KafkaState.SendKafkaFlowMessage(flowMessage) + } + + timeDiff := flowMessage.TimeRecvd - flowMessage.TimeFlow + + NetFlowTimeStatsSum.With( + prometheus.Labels{ + "router": router, + "version": strconv.Itoa(int(version)), + }). + Observe(float64(timeDiff)) + } + } + } else { + NetFlowErrors.With( + prometheus.Labels{ + "router": router, + "version": "unknown", + "error": "netflow_version", + }). + Inc() + + return false, errors.New(fmt.Sprintf("Bad NetFlow version: %v\n", version)) + } + + return true, nil +} diff --git a/producer/producer_sf.go b/producer/producer_sf.go new file mode 100644 index 0000000..09df9a2 --- /dev/null +++ b/producer/producer_sf.go @@ -0,0 +1,329 @@ +package producer + +import ( + "encoding/binary" + "errors" + "fmt" + log "github.com/Sirupsen/logrus" + "github.com/cloudflare/goflow/decoders/sflow" + flowmessage "github.com/cloudflare/goflow/pb" + "github.com/prometheus/client_golang/prometheus" + "net" + "strconv" + "time" +) + +var ( + SFlowStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_count", + Help: "sFlows processed.", + }, + []string{"router", "agent", "version"}, + ) + SFlowErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_errors_count", + Help: "sFlows processed errors.", + }, + []string{"router", "version", "error"}, + ) + SFlowSampleStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_samples_sum", + Help: "SFlows samples sum.", + }, + []string{"router", "agent", "version", "type"}, // counter, flow, expanded... + ) + SFlowSampleRecordsStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_samples_records_sum", + Help: "SFlows samples sum of records.", + }, + []string{"router", "agent", "version", "type"}, // data-template, data, opts... + ) +) + +func GetSFlowFlowSamples(packet *sflow.Packet) []interface{} { + flowSamples := make([]interface{}, 0) + for _, sample := range packet.Samples { + switch sample.(type) { + case sflow.FlowSample: + flowSamples = append(flowSamples, sample) + case sflow.ExpandedFlowSample: + flowSamples = append(flowSamples, sample) + } + } + return flowSamples +} + +func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader) error { + data := (*sampledHeader).HeaderData + switch (*sampledHeader).Protocol { + case 1: // Ethernet + etherType := data[12:14] + var dataTransport []byte + var nextHeader byte + var tos byte + var ttl byte + var tcpflags byte + srcIP := net.IP{} + dstIP := net.IP{} + offset := 14 + + var srcMac uint64 + var dstMac uint64 + + srcMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[0:6]...)) + dstMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[6:12]...)) + (*flowMessage).SrcMac = srcMac + (*flowMessage).DstMac = dstMac + + if etherType[0] == 0x81 && etherType[1] == 0x0 { // VLAN 802.1Q + (*flowMessage).VlanId = uint32(binary.BigEndian.Uint16(data[14:16])) + offset += 4 + etherType = data[16:18] + } + + (*flowMessage).Etype = uint32(binary.BigEndian.Uint16(etherType[0:2])) + + if etherType[0] == 0x8 && etherType[1] == 0x0 { // IPv4 + (*flowMessage).IPversion = flowmessage.FlowMessage_IPv4 + + if len(data) >= offset+36 { + nextHeader = data[offset+9] + srcIP = data[offset+12 : offset+16] + dstIP = data[offset+16 : offset+20] + dataTransport = data[offset+20 : len(data)] + tos = data[offset+1] + ttl = data[offset+8] + } + } else if etherType[0] == 0x86 && etherType[1] == 0xdd { // IPv6 + (*flowMessage).IPversion = flowmessage.FlowMessage_IPv6 + if len(data) >= offset+40 { + nextHeader = data[offset+6] + srcIP = data[offset+8 : offset+24] + dstIP = data[offset+24 : offset+40] + dataTransport = data[offset+40 : len(data)] + + tostmp := uint32(binary.BigEndian.Uint16(data[offset : offset+2])) + tos = uint8(tostmp & 0x0ff0 >> 4) + ttl = data[offset+7] + } + } else if etherType[0] == 0x8 && etherType[1] == 0x6 { // ARP + } else { + return errors.New(fmt.Sprintf("Unknown EtherType: %v\n", etherType)) + } + + if len(dataTransport) >= 4 && (nextHeader == 17 || nextHeader == 6) { + (*flowMessage).SrcPort = uint32(binary.BigEndian.Uint16(dataTransport[0:2])) + (*flowMessage).DstPort = uint32(binary.BigEndian.Uint16(dataTransport[2:4])) + } + + if len(dataTransport) >= 13 && nextHeader == 6 { + tcpflags = dataTransport[13] + } + + (*flowMessage).SrcIP = srcIP + (*flowMessage).DstIP = dstIP + (*flowMessage).Proto = uint32(nextHeader) + (*flowMessage).IPTos = uint32(tos) + (*flowMessage).IPTTL = uint32(ttl) + (*flowMessage).TCPFlags = uint32(tcpflags) + + //fmt.Printf("TEst %v:%v %v:%v \n", srcIP.String(), (*flowMessage).SrcPort, dstIP.String(), (*flowMessage).DstPort) + } + return nil +} + +func SearchSFlowSamples(router net.IP, agent net.IP, version uint32, seqnum uint32, samples []interface{}) []flowmessage.FlowMessage { + flowMessageSet := make([]flowmessage.FlowMessage, 0) + //routerStr := router.String() + //agentStr := agent.String() + + for _, flowSample := range samples { + var records []sflow.FlowRecord + + flowMessage := flowmessage.FlowMessage{} + flowMessage.Type = flowmessage.FlowMessage_SFLOW + + switch flowSample := flowSample.(type) { + case sflow.FlowSample: + records = flowSample.Records + flowMessage.SamplingRate = uint64(flowSample.SamplingRate) + flowMessage.SrcIf = flowSample.Input + flowMessage.DstIf = flowSample.Output + case sflow.ExpandedFlowSample: + records = flowSample.Records + flowMessage.SamplingRate = uint64(flowSample.SamplingRate) + flowMessage.SrcIf = flowSample.InputIfValue + flowMessage.DstIf = flowSample.OutputIfValue + } + + flowMessage.SequenceNum = seqnum + recvd := uint64(time.Now().Unix()) + flowMessage.TimeRecvd = recvd + flowMessage.TimeFlow = recvd + + ipNh := net.IP{} + ipSrc := net.IP{} + ipDst := net.IP{} + flowMessage.Packets = 1 + flowMessage.RouterAddr = agent + for _, record := range records { + switch recordData := record.Data.(type) { + case sflow.SampledHeader: + flowMessage.Bytes = uint64(recordData.FrameLength) + ParseSampledHeader(&flowMessage, &recordData) + case sflow.SampledIPv4: + ipSrc = recordData.Base.SrcIP + ipDst = recordData.Base.DstIP + flowMessage.SrcIP = ipSrc + flowMessage.DstIP = ipDst + flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + flowMessage.Bytes = uint64(recordData.Base.Length) + flowMessage.Proto = recordData.Base.Protocol + flowMessage.SrcPort = recordData.Base.SrcPort + flowMessage.DstPort = recordData.Base.DstPort + flowMessage.IPTos = recordData.Tos + flowMessage.Etype = 0x800 + case sflow.SampledIPv6: + ipSrc = recordData.Base.SrcIP + ipDst = recordData.Base.DstIP + flowMessage.IPversion = flowmessage.FlowMessage_IPv6 + flowMessage.SrcIP = ipSrc + flowMessage.DstIP = ipDst + flowMessage.Bytes = uint64(recordData.Base.Length) + flowMessage.Proto = recordData.Base.Protocol + flowMessage.SrcPort = recordData.Base.SrcPort + flowMessage.DstPort = recordData.Base.DstPort + flowMessage.IPTos = recordData.Priority + flowMessage.Etype = 0x86dd + case sflow.ExtendedRouter: + ipNh = recordData.NextHop + flowMessage.NextHop = ipNh + flowMessage.SrcNet = recordData.SrcMaskLen + flowMessage.DstNet = recordData.DstMaskLen + case sflow.ExtendedGateway: + ipNh = recordData.NextHop + flowMessage.NextHop = ipNh + flowMessage.SrcAS = recordData.SrcAS + if len(recordData.ASPath) > 0 { + flowMessage.DstAS = recordData.ASPath[len(recordData.ASPath)-1] + flowMessage.NextHopAS = recordData.ASPath[0] + flowMessage.SrcAS = recordData.AS + } else { + flowMessage.DstAS = recordData.AS + } + } + } + flowMessageSet = append(flowMessageSet, flowMessage) + //fmt.Printf("%v\n", flowMessage.String()) + } + return flowMessageSet +} + +func MetricTypeSFlow(router string, agent string, version uint32, packet sflow.Packet) { + countMap := make(map[string]int) + countRecordsMap := make(map[string]int) + if version == 5 { + for _, flowSample := range packet.Samples { + switch flowSample := flowSample.(type) { + case sflow.FlowSample: + countMap["FlowSample"]++ + countRecordsMap["FlowSample"] += len(flowSample.Records) + case sflow.CounterSample: + name := "CounterSample" + if flowSample.Header.Format == 4 { + name = "Expanded" + name + } + countMap[name]++ + countRecordsMap[name] += len(flowSample.Records) + case sflow.ExpandedFlowSample: + countMap["ExpandedFlowSample"]++ + countRecordsMap["ExpandedFlowSample"] += len(flowSample.Records) + } + } + } + + for keyType := range countMap { + SFlowSampleStatsSum.With( + prometheus.Labels{ + "router": router, + "agent": agent, + "version": strconv.Itoa(int(version)), + "type": keyType, + }). + Add(float64(countMap[keyType])) + + SFlowSampleRecordsStatsSum.With( + prometheus.Labels{ + "router": router, + "agent": agent, + "version": strconv.Itoa(int(version)), + "type": keyType, + }). + Add(float64(countRecordsMap[keyType])) + } +} + +func ProcessSFlowError(msgDec interface{}, err error, args interface{}, conf interface{}) (bool, error) { + return true, nil +} + +func ProcessMessageSFlow(msgDec interface{}, args interface{}, conf interface{}) (bool, error) { + msgDecoded := msgDec.(sflow.BaseMessageDecoded) + packet := msgDecoded.Packet + + version := msgDecoded.Version + router := msgDecoded.Src.String() + ":" + strconv.Itoa(msgDecoded.Port) + if version == 5 { + //fmt.Printf("%v %v %v\n", packet, router) + packetV5, ok := packet.(sflow.Packet) + args, ok2 := args.(ProcessArguments) + if ok && ok2 { + seqnum := packetV5.SequenceNumber + var agent net.IP + agent = packetV5.AgentIP + + flowSamples := GetSFlowFlowSamples(&packetV5) + flowMessageSet := SearchSFlowSamples(msgDecoded.Src, agent, version, seqnum, flowSamples) + + MetricTypeSFlow(router, agent.String(), version, packetV5) + SFlowStats.With( + prometheus.Labels{ + "router": router, + "agent": agent.String(), + "version": strconv.Itoa(int(version)), + }). + Inc() + + log.WithFields(log.Fields{ + "type": "sflow", + "version": version, + "source": router, + "seqnum": seqnum, + "count_flowmessages": len(flowMessageSet), + }).Debug("Message processed") + + for _, flowMessage := range flowMessageSet { + if args.KafkaState != nil { + args.KafkaState.SendKafkaFlowMessage(flowMessage) + } + } + } + + } else { + SFlowErrors.With( + prometheus.Labels{ + "router": router, + "version": "unknown", + "error": "sflow_version", + }). + Inc() + + return false, errors.New(fmt.Sprintf("Bad SFlow version: %v\n", version)) + } + + return true, nil +}