1
0
mirror of https://github.com/peeringdb/peeringdb.git synced 2024-05-11 05:55:09 +00:00
Files
peeringdb-peeringdb/peeringdb_server/api_cache.py
Matt Griswold abe3e78d50 Dotf fixes (#781)
* fix issue where ix-f import would raise suggestions ipaddresses not that ixlan (#764)

* IX-F Suggestions: Leaving the editor and returning to it via back button issues (#765)

* IX-F importer: Clicking "Preview" (IXP Update Tools) on /net/ page resulted in 170 ticket resolutions (#769)
More robust testing

* black formatting (was lost after pyupgrade)

* Add regex searching to deskpro ticket subjects

* Change operational error

* IX-F suggestions: consolidate delete+add (#770)

* Add reset functions to commandline tool

* Fix commandline tool bugs

* Fix reset commandline tool bugs

* add commandline tool

* Ixlan needs to be set for import commandline tool

* Add email model

* Add admin view to emails

* Allow network and ix to be null

* save emails as part of ixf import

* Add email model

* Add email delete

* add iregex search and better comments

* fix ixlan selection for import

* redefine migration dependencies for this branch

* only enable search w start and end char

* Add caption to regex search

* Remove delete all ixfmemberdata option

* [beta] IX-F importer: don't bother about missing IPv{4,6} address when network is not doing IPv{4,6} (#771)

* Add cmdline tests

* Resolve email conflicts

* Add cmd tool reset tests

* add autocomplete to commandline tool

* Fix email bugs

* Fix email migrations

* Fix typos

* [beta] IX-F importer: prevent Admin Committee overload by initially limiting importer to IXes enabled by AC (#772)

* Finalize regex search for emails and deskprotickets

* Fix keyword bug

* fix typo

* protocol-conflict will now be handled in the notification consolidation

771 changes where if the network indicates neither ipv4 nor ipv6 support, it is handled as supporting both (eg the network didnt configure these at all)

realised that the importer command re instantiates the `Importer` class for each ixlan it processes, so moved the sending of consolidated notifications (#772) out of the `update` function and into the command itself after its done processing all the ixlans. This means for tests you will need to call `importer.notify_proposals` after `importer.update` to test the consolidated notifications.

fixed several MultipleObjectsReturned errors when network switch protocol support in between imports

* should be checking for "ix" in the form data (#773)

* Fix cmd ixf tests

* fix issue in log_peer

* Add commit check for reset tool

* fix importer bugs

* remove dupe IXFImportEmail definition

* ixfimportemail support ix__name and net__name searching

* ticket resolution responses

* Add commit to command ixf import changes

* fix modify entry header

* remove whitespace in notification about remote data changes

* Begin updating tests

* ixf-import command line tool to queue

* refactor conflict inserts

* Update import protocol tests, including tests for 770

* More test edits

* Change cmd tests

* better ixfmemberdata error handling and fix some test data

* dont reset the same ixfmemberdata requirement

* fix many bugs add many tests

* remove debug message

* fix bug during import when consolidating delete+add

* fix perfomance issue in IXFMemberData listing

* dont show reset flags on prod env

* Add regex search tests

* Add 772 tests

* remove debug output

* fix `test_resolve_deskpro_ticket` test

* black formatting

* remove dupe import

* fix issue with unique constraint error handling

* add test for ixp / network ip protocol notification

* add missing test data

Co-authored-by: Stefan Pratter <stefan@20c.com>
Co-authored-by: Elliot Frank <elliot@20c.com>
2020-07-27 04:36:27 +00:00

314 lines
9.7 KiB
Python

import os
import json
import collections
from django.conf import settings
from peeringdb_server.models import InternetExchange, IXLan, Network
import django_namespace_perms.util as nsp
class CacheRedirect(Exception):
"""
Raise this error to redirect to cache response during viewset.get_queryset
or viewset.list()
Argument should be an APICacheLoader instance
"""
def __init__(self, loader):
super(Exception, self).__init__(self, "Result to be loaded from cache")
self.loader = loader
###############################################################################
# API CACHE LOADER
class APICacheLoader:
"""
Checks if an API GET request qualifies for a cache load
and if it does allows you to provide the cached result
"""
def __init__(self, viewset, qset, filters):
request = viewset.request
self.request = request
self.qset = qset
self.filters = filters
self.model = viewset.model
self.viewset = viewset
self.depth = min(int(request.query_params.get("depth", 0)), 3)
self.limit = int(request.query_params.get("limit", 0))
self.skip = int(request.query_params.get("skip", 0))
self.since = int(request.query_params.get("since", 0))
self.fields = request.query_params.get("fields")
if self.fields:
self.fields = self.fields.split(",")
self.path = os.path.join(
settings.API_CACHE_ROOT, f"{viewset.model.handleref.tag}-{self.depth}.json",
)
def qualifies(self):
"""
Check if request qualifies for a cache load
"""
# api cache use is disabled, no
if not getattr(settings, "API_CACHE_ENABLED", False):
return False
# no depth and a limit lower than 251 seems like a tipping point
# were non-cache retrieval is faster still
if (
not self.depth
and self.limit
and self.limit <= 250
and getattr(settings, "API_CACHE_ALL_LIMITS", False) is False
):
return False
# filters have been specified, no
if self.filters or self.since:
return False
# cache file non-existant, no
if not os.path.exists(self.path):
return False
# request method is anything but GET, no
if self.request.method != "GET":
return False
# primary key set in request, no
if self.viewset.kwargs:
return False
return True
def load(self):
"""
Load the cached response according to tag and depth
"""
# read cache file
with open(self.path) as f:
data = json.load(f)
data = data.get("data")
# apply permissions to data
fnc = getattr(self, "apply_permissions_%s" % self.model.handleref.tag, None)
if fnc:
data = fnc(data)
# apply pagination
if self.skip and self.limit:
data = data[self.skip : self.skip + self.limit]
elif self.skip:
data = data[self.skip :]
elif self.limit:
data = data[: self.limit]
return {"results": data, "__meta": {"generated": os.path.getmtime(self.path)}}
def apply_permissions(self, ns, data, ruleset={}):
"""
Wrapper function to apply permissions to a data row and
return the sanitized result
"""
if type(ns) != list:
ns = ns.split(".")
# prepare ruleset
if ruleset:
_ruleset = {}
namespace_str = ".".join(ns)
for section, rules in list(ruleset.items()):
_ruleset[section] = {}
for rule, perms in list(rules.items()):
_ruleset[section][f"{namespace_str}.{rule}"] = perms
ruleset = _ruleset
return nsp.dict_get_path(
nsp.permissions_apply(
nsp.dict_from_namespace(ns, data), self.request.user, ruleset=ruleset
),
ns,
)
def apply_permissions_generic(self, data, explicit=False, join_ids=[], **kwargs):
"""
Apply permissions to all rows according to rules
specified in parameters
explicit <function>
if explicit is passed as a function it will be called and the result will
determine whether or not explicit read perms are required for the row
join_ids [(target_id<str>, proxy_id<str>, model<handleref>), ..]
Since we are checking permissioning namespaces, and those namespaces may
consist of object ids that are not necessarily in the dataset you can
join those ids in via the join_ids parameter
"""
rv = []
joined_ids = collections.OrderedDict()
e = {}
inst = self.model()
# perform id joining
if join_ids:
for t, p, model in join_ids:
joined_ids[t] = {
"p": p,
"ids": self.join_ids(
data,
t,
p,
model,
list(joined_ids.get(p, e).get("ids", e).values()),
),
}
for row in data:
# create dict containing ids needed to build the permissioning
# namespace
init = {k: row.get(v) for k, v in list(kwargs.items())}
# joined ids
for t, j in list(joined_ids.items()):
if j["p"] in row:
init[t] = j["ids"].get(row.get(j["p"]))
elif t in joined_ids:
init[t] = joined_ids.get(t).get("ids").get(init[j["p"]])
# build permissioning namespace
ns = self.model.nsp_namespace_from_id(**init).lower()
# apply fields filter
if self.fields:
for k in list(row.keys()):
if k not in self.fields:
del row[k]
# determine whether or not read perms for this object need
# to be explicitly set
if explicit and callable(explicit):
expl = explicit(row)
else:
expl = False
# initial read perms check
if nsp.has_perms(self.request.user, ns, 0x01, explicit=expl):
ruleset = getattr(inst, "nsp_ruleset", {})
# apply permissions to tree
row = self.apply_permissions(ns, row, ruleset=ruleset)
applicator = getattr(
self.model, "api_cache_permissions_applicator", None
)
if applicator:
applicator(row, ns, self.request.user)
# if row still has data aftewards, append to results
if row:
rv.append(row)
return rv
def join_ids(self, data, target_id, proxy_id, model, stash=[]):
"""
Returns a dict mapping of (proxy_id, target_id)
target ids are obtained by fetching instances of specified
model that match the supplied proxy ids
proxy ids will be gotten from data or stash
data [<dict>, ..] list of data rows from cache load, the field
name provided in "proxy_id" will be used to obtain the id from
each row
stash [<int>,..] list of ids
if stash is set, data and proxy_field will be ignored
"""
if stash:
ids = stash
else:
ids = [r[proxy_id] for r in data]
return {
r["id"]: r[target_id]
for r in model.objects.filter(id__in=ids).values("id", target_id)
}
# permissioning functions for each handlref type
def apply_permissions_org(self, data):
return self.apply_permissions_generic(data, id="id")
def apply_permissions_fac(self, data):
return self.apply_permissions_generic(data, fac_id="id", org_id="org_id")
def apply_permissions_ix(self, data):
return self.apply_permissions_generic(data, ix_id="id", org_id="org_id")
def apply_permissions_net(self, data):
return self.apply_permissions_generic(data, net_id="id", org_id="org_id")
def apply_permissions_ixpfx(self, data):
return self.apply_permissions_generic(
data,
join_ids=[
("ix_id", "ixlan_id", IXLan),
("org_id", "ix_id", InternetExchange),
],
ixlan_id="ixlan_id",
id="id",
)
def apply_permissions_ixlan(self, data):
return self.apply_permissions_generic(
data,
join_ids=[("org_id", "ix_id", InternetExchange)],
ix_id="ix_id",
id="id",
)
def apply_permissions_ixfac(self, data):
return self.apply_permissions_generic(
data,
join_ids=[("org_id", "ix_id", InternetExchange)],
ix_id="ix_id",
id="id",
)
def apply_permissions_netfac(self, data):
return self.apply_permissions_generic(
data,
join_ids=[("org_id", "net_id", Network)],
net_id="net_id",
fac_id="fac_id",
)
def apply_permissions_netixlan(self, data):
return self.apply_permissions_generic(
data,
join_ids=[("org_id", "net_id", Network)],
net_id="net_id",
ixlan_id="ixlan_id",
)
def apply_permissions_poc(self, data):
return self.apply_permissions_generic(
data,
explicit=lambda x: (x.get("visible") != "Public"),
join_ids=[("org_id", "net_id", Network)],
vis="visible",
net_id="net_id",
)