#!/usr/bin/env python import json import sys import time from netaddr import IPNetwork, IPSet MY_AS = 76184 import dn42 from dn42 import Prefix, UsedPrefix if __name__ == '__main__': asdata_dir = sys.argv[1] prefixes_out = sys.argv[2] asdata_out = sys.argv[3] asdata = dn42.parse_records(asdata_dir) asdata = { int(asn[2:]): d for asn, d in asdata.items() } lines = sys.stdin.readlines() prefixes = [l.split()[0] for l in lines] # IPSet are great, but they "compact" redundant subnets, which we # don't want for used prefixes. Indeed, merging two subnets is # not correct when they belong to different people. However, for # unused subnets, we don't care: that's why we use IPSet. used_set = IPSet(prefixes) used_set = used_set & dn42.ADDRSPACE unused_set = dn42.ADDRSPACE ^ used_set # Work with lists of our custom classes used = [UsedPrefix(l.split()[0], {"asn": l.split()[1]}) for l in lines] used = [p for p in used if p.prefix in dn42.ADDRSPACE] unused = [Prefix(str(p)) for p in unused_set.iter_cidrs()] all_prefixes = used + unused all_prefixes.sort(key=lambda p : p.prefix) assert(IPSet([p.prefix for p in all_prefixes]) == dn42.ADDRSPACE) # TODO: handle prefix inclusion, this generates incorrect data # right now. result = dict() result["name"] = "addrspace" result["display"] = "none" result["date"] = time.time() result["origin"] = MY_AS result["prefixes"] = len(used) data = { "name": "prefixes", "prefix": dn42.ROOT_PREFIX, "children": [p.to_dict() for p in all_prefixes] } result["children"] = [{"name": "empty", "size": 1}, data] with open(prefixes_out, "w") as f: json.dump(result, f) with open(asdata_out, "w") as f: json.dump(asdata, f)