Reorder, categorise and clean up. Also now there
is a somewhat usefull readme.
This commit is contained in:
92
Python/test/FindGRE.py
Normal file
92
Python/test/FindGRE.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import netflow, socket, json, time, os, influxdb_client, ipaddress
|
||||
from influxdb_client import InfluxDBClient, Point, WritePrecision
|
||||
from influxdb_client.client.write_api import SYNCHRONOUS, ASYNCHRONOUS, WriteOptions
|
||||
from datetime import timedelta
|
||||
from proto import manWhatTheProto
|
||||
from IP2Loc import ermWhatTheCountry
|
||||
from whatDomain import ermWhatATheIpFromDomainYaCrazy, ermWhatAAAATheIpFromDomainYaCrazy
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Netentry preconf
|
||||
WHAT_THE_NETFLOW_PORT = 2055
|
||||
WHAT_THE_NETFLOW_IP = "0.0.0.0"
|
||||
|
||||
|
||||
|
||||
# Threaded flow processor
|
||||
def process_flow(i, entry):
|
||||
# prep dict
|
||||
#tmpEntry = str(entry)
|
||||
#tmpEntry = tmpEntry[22:-1]
|
||||
#tmpEntry2 = tmpEntry.replace("'", '"')
|
||||
|
||||
#print(tmpEntry2)
|
||||
#print(entry)
|
||||
#exit()
|
||||
#dictEntry = json.loads(tmpEntry2)
|
||||
#bigDict[i] = (dictEntry)
|
||||
|
||||
# take data out from netentry
|
||||
inEntry = entry.data
|
||||
|
||||
# Convert IPs and time duration
|
||||
# IPs
|
||||
inEntry["IPV4_SRC_ADDR"] = str(ipaddress.IPv4Address(inEntry["IPV4_SRC_ADDR"]))
|
||||
inEntry["IPV4_DST_ADDR"] = str(ipaddress.IPv4Address(inEntry["IPV4_DST_ADDR"]))
|
||||
inEntry["NEXT_HOP"] = str(ipaddress.IPv4Address(inEntry["NEXT_HOP"]))
|
||||
|
||||
# Convert time from ms to HH:MM:SS
|
||||
first = int(inEntry["FIRST_SWITCHED"])
|
||||
last = int(inEntry["LAST_SWITCHED"])
|
||||
|
||||
inEntry["FIRST_SWITCHED_HR"] = str(timedelta(milliseconds=first))
|
||||
inEntry["LAST_SWITCHED_HR"] = str(timedelta(milliseconds=last))
|
||||
|
||||
protoHereWhat = manWhatTheProto(int(inEntry["PROTO"]))
|
||||
|
||||
if protoHereWhat == "GRE" or inEntry["PROTO"] == 47:
|
||||
print("skibidi")
|
||||
exit()
|
||||
|
||||
print("----------------")
|
||||
print(inEntry["PROTO"])
|
||||
return (i, inEntry)
|
||||
|
||||
# Bind
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sock.bind((WHAT_THE_NETFLOW_IP, WHAT_THE_NETFLOW_PORT))
|
||||
|
||||
print("Ready")
|
||||
|
||||
with ThreadPoolExecutor(max_workers=8) as executor:
|
||||
while True:
|
||||
# Get netentry data ig?
|
||||
sock.settimeout(5)
|
||||
|
||||
|
||||
payload, client = sock.recvfrom(4096) # experimental, tested with 1464 bytes
|
||||
p = netflow.parse_packet(payload) # Test result: <ExportPacket v5 with 30 records>
|
||||
#print(p.entrys) # Test result: 5
|
||||
|
||||
# Submit all entries to thread pool
|
||||
futures = [executor.submit(process_flow, i, entry) for i, entry in enumerate(p.flows, 1)]
|
||||
|
||||
bigDict = {}
|
||||
# inflxdb_Datazz_To_Send = []
|
||||
|
||||
for future in futures:
|
||||
i, inEntry = future.result()
|
||||
# inflxdb_Datazz_To_Send.append(point)
|
||||
bigDict[i] = inEntry
|
||||
|
||||
# Send data to InfluxDB
|
||||
# write_api.write(bucket=bucket, org=org, record=inflxdb_Datazz_To_Send)
|
||||
# time.sleep(INFLX_SEPARATE_POINTS) # separate points
|
||||
|
||||
print(f"{len(bigDict)} <--- This many entrys")
|
||||
|
||||
# Clean up before another loop
|
||||
bigDict.clear()
|
||||
# inflxdb_Datazz_To_Send.clear()
|
||||
#print(bigDict)
|
||||
|
||||
116
Python/test/mariaDB.py
Normal file
116
Python/test/mariaDB.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import netflow, socket, json, time, os, influxdb_client, ipaddress
|
||||
from influxdb_client import InfluxDBClient, Point, WritePrecision
|
||||
from influxdb_client.client.write_api import SYNCHRONOUS, ASYNCHRONOUS, WriteOptions
|
||||
from datetime import timedelta
|
||||
from proto import manWhatTheProto
|
||||
from IP2Loc import ermWhatTheCountry
|
||||
from whatDomain import ermWhatATheIpFromDomainYaCrazy, ermWhatAAAATheIpFromDomainYaCrazy
|
||||
|
||||
# Netentry preconf
|
||||
WHAT_THE_NETFLOW_PORT = 2055
|
||||
WHAT_THE_NETFLOW_IP = "0.0.0.0"
|
||||
|
||||
|
||||
# Other preconf
|
||||
bigDict = {}
|
||||
inflxdb_Datazz_To_Send = []
|
||||
|
||||
# Bind
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sock.bind((WHAT_THE_NETFLOW_IP, WHAT_THE_NETFLOW_PORT))
|
||||
|
||||
print("Ready")
|
||||
|
||||
while True:
|
||||
# Get netentry data ig?
|
||||
payload, client = sock.recvfrom(4096) # experimental, tested with 1464 bytes
|
||||
p = netflow.parse_packet(payload) # Test result: <ExportPacket v5 with 30 records>
|
||||
#print(p.entrys) # Test result: 5
|
||||
|
||||
|
||||
|
||||
for i, entry in enumerate(p.flows, 1):
|
||||
# prep dict
|
||||
#tmpEntry = str(entry)
|
||||
#tmpEntry = tmpEntry[22:-1]
|
||||
#tmpEntry2 = tmpEntry.replace("'", '"')
|
||||
|
||||
#print(tmpEntry2)
|
||||
#print(entry
|
||||
#exit()
|
||||
#dictEntry = json.loads(tmpEntry2)
|
||||
#bigDict[i] = (dictEntry)
|
||||
|
||||
|
||||
# take data out from netentry
|
||||
inEntry = entry.data
|
||||
|
||||
# Convert IPs and time duration
|
||||
# IPs
|
||||
inEntry["IPV4_SRC_ADDR"] = str(ipaddress.IPv4Address(inEntry["IPV4_SRC_ADDR"]))
|
||||
inEntry["IPV4_DST_ADDR"] = str(ipaddress.IPv4Address(inEntry["IPV4_DST_ADDR"]))
|
||||
inEntry["NEXT_HOP"] = str(ipaddress.IPv4Address(inEntry["NEXT_HOP"]))
|
||||
|
||||
# Convert time from ms to HH:MM:SS
|
||||
first = int(inEntry["FIRST_SWITCHED"])
|
||||
last = int(inEntry["LAST_SWITCHED"])
|
||||
|
||||
inEntry["FIRST_SWITCHED_HR"] = str(timedelta(milliseconds=first))
|
||||
inEntry["LAST_SWITCHED_HR"] = str(timedelta(milliseconds=last))
|
||||
|
||||
|
||||
# Prep InfluxDB data
|
||||
inflxdb_Data_To_Send = (
|
||||
influxdb_client.Point(f"{measurement}-script")
|
||||
.tag("MACHINE", MACHINE_TAG)
|
||||
.tag("ROUTER", ROUTER_TAG)
|
||||
.field("dstAddr", inEntry["IPV4_DST_ADDR"])
|
||||
.field("srcAddr", inEntry["IPV4_SRC_ADDR"])
|
||||
.field("nextHop", inEntry["NEXT_HOP"])
|
||||
.field("inptInt", inEntry["INPUT"])
|
||||
.field("outptInt", inEntry["OUTPUT"])
|
||||
.field("inPackt", inEntry["IN_PACKETS"])
|
||||
.field("outPakt", inEntry["IN_OCTETS"])
|
||||
.field("frstSwtchd", inEntry["FIRST_SWITCHED"])
|
||||
.field("lstSwtchd", inEntry["LAST_SWITCHED"])
|
||||
.field("srcPort", inEntry["SRC_PORT"])
|
||||
.field("dstPort", inEntry["DST_PORT"])
|
||||
.field("tcpFlags", inEntry["TCP_FLAGS"])
|
||||
.tag("proto", manWhatTheProto(int(inEntry["PROTO"])))
|
||||
.field("tos", inEntry["TOS"])
|
||||
.field("srcAS", inEntry["SRC_AS"])
|
||||
.field("dstAS", inEntry["DST_AS"])
|
||||
.field("srcMask", inEntry["SRC_MASK"])
|
||||
.field("dstMask", inEntry["DST_MASK"])
|
||||
.field("dstCntr", ermWhatTheCountry(str(inEntry["IPV4_DST_ADDR"])))
|
||||
.field("srcCntr", ermWhatTheCountry(str(inEntry["IPV4_SRC_ADDR"])))
|
||||
)
|
||||
|
||||
inflxdb_Datazz_To_Send.append(inflxdb_Data_To_Send)
|
||||
|
||||
#i+=1
|
||||
#type(tmpEntry)
|
||||
#print(dictEntry)
|
||||
#print(tmpEntry.lstrip(20))
|
||||
|
||||
print("----------------")
|
||||
bigDict[i] = (inEntry)
|
||||
|
||||
# end while True
|
||||
|
||||
print()
|
||||
print(bigDict)
|
||||
exit()
|
||||
|
||||
# Send data to InfluxDB
|
||||
write_api.write(bucket=bucket, org="staging", record=inflxdb_Data_To_Send)
|
||||
time.sleep(INFLX_SEPARATE_POINTS) # separate points
|
||||
|
||||
print(f"{len(bigDict)} <--- This many entrys")
|
||||
|
||||
|
||||
# Clean up before another loop
|
||||
bigDict.clear()
|
||||
inflxdb_Datazz_To_Send.clear()
|
||||
|
||||
#print(bigDict)
|
||||
65
Python/test/test.py
Normal file
65
Python/test/test.py
Normal file
File diff suppressed because one or more lines are too long
5
Python/test/test5.py
Normal file
5
Python/test/test5.py
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user