1
0
mirror of https://github.com/peeringdb/peeringdb.git synced 2024-05-11 05:55:09 +00:00
Files
peeringdb-peeringdb/peeringdb_server/search.py
Matt Griswold ea55c4dc38 July updates (#762)
* Change label from primary ASN to ASN

* Raise validation error when trying to update ASN

* first steps for dotf importer procotol (#697)

* migrations (#697)

* Add translation to error meessage

* Make ASN readonly in table

* Add test now that ASN should not be able to update

* Set fac.rencode to '' for all entries and make it readonly in serializer

* Add unique constraints to network ixlan ip addresses

* Add migration to null out duplicate ipaddresses for deleted netixlans

* Add unique constraints to network ixlan ip addresses

* Add migration to null out duplicate ipaddresses for deleted netixlans

* remove old migrations (#697)

* fix netixlan ipaddr dedupe migration (#268)
add netixlan ipaddr unique constraint migration (#268)

* ixf_member_data migrations (#697)

* fix table name (#697)

* importer protocol (#697)

* fix netixlan ipaddr dedupe migration (#268)
add netixlan ipaddr unique constraint migration (#268)

* ixf proposed changes notifications (#697)

* Delete repeated query

* Add a test to show rencode is readonly

* Blank out rencode when mocking data

* Remove validator now that constraint exists

* Add back unique field validator w Check Deleted true

* conflict resolving (#697)

* UniqueFieldValidator raise error with code "unique" (#268)

* conflict resolution (#697)

* Add fixme comment to tests

* conflict resolution (#697)

* Remove now invalid undelete tests

* UniqueFieldValidator raise error with code "unique" (#268)

* delete admin tools for duplicate ip addresses

* Make migration to delete duplicateipnetworkixlan

* Add ixlan-ixpfx status matching validation, add corresponding test

* delete redundant checking in test

* resolve conflict ui (#697)

* fix migrations hierarchy

* squash migrations for ixf member data

* clean up preview and post-mortem tools

* remove non-sensical permission check when undeleting soft-deleted objects through unique integrity error handling

* only include the ix-f data url in notifications to admincom (#697)

* resolve on --skip-import (#697)

* ac conflict resolution (#697)

* Define more accurately the incompatible statuses for ixlan and ixpfx

* Add another status test

* Preventing disrupting changes (#697)

* fix tests (#697)

* Stop allow_ixp_update from being write only and add a global stat for automated networks

* Add tests for global stats that appear in footer

* Change how timezone is called with datetime, to get test_stats.py/test_generate_for_current_date to pass

* test for protected entities (#697)

* admincom conflict resolution refine readonly fields (#697)
network notifications only if the problem is actually actionable by the network (#697)

* ixp / ac notifcation when ix-f source cannot be parsed (#697)
fix issue with ixlan prefix protection (#697)

* migrations (#697)

* code documentation (#697)

* ux tweaks (#697)

* UX tweaks (#697)

* Fix typo

* fix netixlan returned in IXFMemberData.apply when adding a new one (#697)

* fix import log incosistencies (#697)

* Add IXFMemberData to test

* Update test data

* Add protocol tests

* Add tests for views

* always persist changes to remote data on set_conflict (#697)

* More tests

* always persist changes to remote data on set_conflict (#697)

* suggest-add test

* net_present_at_ix should check status (#697)

* Add more protocol tests

* Edit language of some tests

* django-peeringdb to 2.1.1
relock pipfile, pin django-ratelimit to <3 as it breaks stuff

* Add net_count_ixf field to ix object (#683)

* Add the IX-F Member Export URL to the ixlan API endpoint (#249)

* Lock some objects from being deleted by the owner (#696)

* regenerate api docs (#249)

* always persist changes to remote data on set_add and set_update (#697)

* IXFMemberData: always persist remote data changes during set_add and set_update, also allow for saving without touching the updated field

* always persist changes to remote data on set_add and set_update (#697)

* Fix suggest-add tests

* IXFMemberData: always persist remote data changes during set_add and set_update, also allow for saving without touching the updated field

* IXFMemberData: always persist remote data changes during set_add and set_update, also allow for saving without touching the updated field

* fix issue with deletion when ixfmemberdata for entry existed previously (#697)

* fix test_suggest_delete_local_ixf_no_flag (#697 tests)

* fix issue with deletion when ixfmemberdata for entry existed previously (#697)

* invalid ips get logged and notified to the ix via notify_error (#697)

* Fix more tests

* issue with previous_data when running without save (#697)
properly track speed errors (#697)

* reset errors on ixfmemberdata that go into pending_save (#697)

* add remote_data to admin view (#697)

* fix error reset inconsistency (#697)

* Refine invalid data tests

* remove debug output

* for notifications to ac include contact points for net and ix in the message (#697)

* settings to toggle ix-f tickets / emails (#697)

* allow turning off ix-f notifications for net and ix separately (#697)

* add jsonschema test

* Add idempotent tests to updater

* remove old ixf member tests

* Invalid data tests when ixp_updates are enabled

* fix speed error validation (#697)

* fix issue with rollback (#697)

* fix migration hierarchy

* fix ixfmemberdata _email

* django-peeringdb to 2.2 and relock

* add ixf rollback tests

* ixf email notifications off by default

* black formatted

* pyupgrade

Co-authored-by: egfrank <egfrank@20c.com>
Co-authored-by: Stefan Pratter <stefan@20c.com>
2020-07-15 07:07:01 +00:00

222 lines
7.1 KiB
Python

from django.db.models.signals import post_save, pre_delete
from django.db.models import Q
import peeringdb_server.rest
from peeringdb_server.models import (
UTC,
InternetExchange,
Network,
Facility,
Organization,
)
import re
import time
import datetime
import unidecode
def unaccent(v):
return unidecode.unidecode(v).lower()
# SEARCH INDEX BE STORED HERE
SEARCH_CACHE = {"search_index": {}, "time": 0}
# We want to hook searchable objects into save and delete signals
# so we can update the search index as the data changes without having
# to reload the entire thing all the time
def hook_save(sender, **kwargs):
obj = kwargs.get("instance")
tag = obj._handleref.tag
idx = SEARCH_CACHE.get("search_index")
if obj.status == "ok":
if tag not in idx:
idx[tag] = {}
idx.get(tag)[obj.id] = obj
# print "%d %s refreshed in search index" % (obj.id, tag)
else:
try:
del idx[tag][obj.id]
except KeyError:
pass
# print "%d %s delete from search index" % (obj.id, tag)
def hook_delete(sender, **kwargs):
obj = kwargs.get("instance")
tag = obj._handleref.tag
try:
del SEARCH_CACHE.get["search_index"][tag][obj.id]
except TypeError:
pass
except KeyError:
pass
# print "%d %s deleted from search index " % (obj.id, tag)
searchable_models = [InternetExchange, Network, Facility, Organization]
for model in searchable_models:
post_save.connect(hook_save, sender=model)
pre_delete.connect(hook_delete, sender=model)
def search(term):
"""
Search searchable objects (ixp, network, facility ...) by term
Returns result dict
"""
search_tags = ("fac", "ix", "net", "org")
ref_dict = peeringdb_server.rest.ref_dict()
t = time.time()
if not SEARCH_CACHE.get("search_index"):
# whole db takes 5ish seconds, too slow to cache inline here
search_index = {
tag: {obj.id: obj for obj in model.objects.filter(status__in=["ok"])}
for tag, model in list(ref_dict.items())
if tag in search_tags
}
for typ, stor in list(search_index.items()):
print("CACHED: %d items in %s" % (len(stor), typ))
tag_id_re = re.compile(r"(" + r"|".join(search_tags) + r"|asn|as)(\d+)")
# FIXME: for now lets force a flush every 120 seconds, might want to look
# at an event based update solution instead
SEARCH_CACHE.update(
search_index=search_index, time=t, update_t=t, tag_id_re=tag_id_re
)
else:
search_index = SEARCH_CACHE.get("search_index")
tag_id_re = SEARCH_CACHE.get("tag_id_re")
# while we are using signals to make sure that the search index gets updated whenever
# a model is saved, right now we still have updates from external sources
# to which those signals cannot be easily connected (importer, fac_merge command etc.)
#
# in order to reflect search index changes made by external sources
# we need to find new / updated object regularily and update the
# search index from that
#
# FIXME: this can be taken out when we turn the importer off - or just leave it
# in as a fail-safe as it is fairly unobtrusive
ut = SEARCH_CACHE.get("update_t", 0)
if t - ut > 600:
dut = datetime.datetime.fromtimestamp(ut).replace(tzinfo=UTC())
print("Updating search index with newly created/updates objects")
search_index_update = {
tag: {
obj.id: obj
for obj in model.objects.filter(
Q(created__gte=dut) | Q(updated__gte=dut)
).filter(status="ok")
}
for tag, model in list(ref_dict.items())
if tag in search_tags
}
for tag, objects in list(search_index_update.items()):
if tag not in SEARCH_CACHE["search_index"]:
SEARCH_CACHE["search_index"][tag] = {
obj.id: obj for obj in ref_dict[tag].objects.filter(status="ok")
}
SEARCH_CACHE["search_index"][tag].update(objects)
SEARCH_CACHE["update_t"] = t
# FIXME: for some reason this gets unset sometimes - need to figure out
# why - for now just recreate when its missing
if not tag_id_re:
tag_id_re = re.compile(r"(" + r"|".join(search_tags) + r"|asn|as)(\d+)")
SEARCH_CACHE["tag_id_re"] = tag_id_re
print("Search index retrieval took %.5f seconds" % (time.time() - t))
result = {tag: [] for tag, model in list(ref_dict.items())}
term = unaccent(term)
# try to convert to int for numeric search matching
typed_q = {}
try:
typed_q["int"] = int(term)
except ValueError:
pass
# check for ref tags
try:
match = tag_id_re.match(term)
if match:
typed_q[match.group(1)] = match.group(2)
except ValueError:
pass
# FIXME model should have a search_fields attr on it
# this whole thing should be replaced with something more modular to get
# rid of all the ifs
for tag, index in list(search_index.items()):
for id, data in list(index.items()):
if tag == "org":
data.org_id = data.id
if unaccent(data.name).find(term) > -1:
result[tag].append(
{"id": id, "name": data.search_result_name, "org_id": data.org_id}
)
continue
if hasattr(data, "name_long") and unaccent(data.name_long).find(term) > -1:
result[tag].append(
{"id": id, "name": data.search_result_name, "org_id": data.org_id}
)
continue
if hasattr(data, "aka") and unaccent(data.aka).find(term) > -1:
result[tag].append(
{"id": id, "name": data.search_result_name, "org_id": data.org_id}
)
continue
if typed_q:
if tag in typed_q:
if str(data.id).startswith(typed_q[tag]):
result[tag].append(
{
"id": id,
"name": data.search_result_name,
"org_id": data.org_id,
}
)
continue
# search asn on everyting? probably just if asn in search
# fields
if hasattr(data, "asn"):
asn = typed_q.get(
"as", typed_q.get("asn", str(typed_q.get("int", "")))
)
if asn and str(data.asn).startswith(asn):
result[tag].append(
{
"id": id,
"name": data.search_result_name,
"org_id": data.org_id,
}
)
for k, items in list(result.items()):
result[k] = sorted(items, key=lambda row: row.get("name"))
return result