1
0
mirror of https://github.com/CumulusNetworks/ifupdown2.git synced 2024-05-06 15:54:50 +00:00

More fixes and changes

Ticket: CM-1438
Reviewed By:
Testing Done: unit tested with all kinds of interfaces

some high level changes
- moved ipv4/ipv6 address handling in a single module. dhcp
into a separate module.
- new link 'up' module
- igmp fixes
- many other fixes
This commit is contained in:
roopa
2014-01-30 22:36:41 -08:00
parent 7ac4828157
commit 37c0543d34
7 changed files with 1035 additions and 243 deletions

View File

@@ -28,6 +28,7 @@ verbose=
[ "$VERBOSE" = yes ] && verbose=-v
process_exclusions() {
set -- $EXCLUDE_INTERFACES
exclusions=""
@@ -94,12 +95,20 @@ ifup_hotplug () {
fi
}
ifupdown_init() {
[ ! -e /run/network ] && mkdir -p /run/network &>/dev/null
[ ! -e /etc/network/run ] && \
ln -sf /run/network /etc/network/run &>/dev/null
}
case "$1" in
start)
if init_is_upstart; then
exit 1
fi
ifupdown_init
if [ "$CONFIGURE_INTERFACES" = no ]
then
log_action_msg "Not configuring network interfaces, see /etc/default/networking"
@@ -108,7 +117,7 @@ start)
set -f
exclusions=$(process_exclusions)
log_action_begin_msg "Configuring network interfaces"
if ifup -a $verbose --perfmode
if ifup -a $verbose --perfmode --force
then
log_action_end_msg $?
else
@@ -120,6 +129,8 @@ stop)
if init_is_upstart; then
exit 0
fi
ifupdown_init
check_network_file_systems
check_network_swap
@@ -133,22 +144,37 @@ stop)
reload)
ifupdown_init
log_action_begin_msg "Reloading network interfaces configuration"
state=$(cat /run/network/ifstate)
ifdown -a --exclude=lo $verbose --perfmode --force || true
if ifup --exclude=lo $state $verbose ; then
if ifreload -a
then
log_action_end_msg $?
else
log_action_end_msg $?
fi
;;
force-reload|restart)
force-reload)
ifupdown_init
log_action_begin_msg "Reloading network interfaces configuration"
if ifreload -a --force
then
log_action_end_msg $?
else
log_action_end_msg $?
fi
;;
restart)
if init_is_upstart; then
exit 1
fi
#log_warning_msg "Running $0 $1 is deprecated because it may not re-enable some interfaces"
ifupdown_init
log_action_begin_msg "Reconfiguring network interfaces"
ifdown -a --exclude=lo $verbose --perfmode --force || true
set -f

615
pkg/gvgen.py Normal file
View File

@@ -0,0 +1,615 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# $Id: gvgen.py 13085 2008-02-25 16:11:50Z toady $
"""
GvGen - Generate dot file to be processed by graphviz
Copyright (c) 2007-2008 INL
Written by Sebastien Tricaud <sebastien.tricaud@inl.fr>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from sys import stdout
gvgen_version = "0.9"
debug = 0
debug_tree_unroll = 0
class GvGen:
"""
Graphviz dot language Generation Class
For example of usage, please see the __main__ function
"""
def __init__(self, legend_name=None, options="compound=true;"): # allow links between clusters
self.max_line_width = 10
self.max_arrow_width = 2
self.line_factor = 1
self.arrow_factor = 0.5
self.initial_line_width = 1.2
self.initial_arrow_width = 0.8
self.options = options
self.__id = 0
self.__nodes = []
self.__links = []
self.__browse_level = 0 # Stupid depth level for self.browse
self.__opened_braces = [] # We count opened clusters
self.fd=stdout # File descriptor to output dot
self.padding_str=" " # Left padding to make children and parent look nice
self.__styles = {}
self.__default_style = []
self.smart_mode = 0 # Disabled by default
# The graph has a legend
if legend_name:
self.options = self.options + "rankdir=LR;"
self.legend = self.newItem(legend_name)
def __node_new(self, name, parent=None, distinct=None):
"""
Create a new node in the data structure
@name: Name of the node, that will be the graphviz label
@parent: The node parent
@distinct: if true, will not create and node that has the same name
Returns: The node created
"""
# We first check for distincts
if distinct:
if self.__nodes:
for e in self.__nodes:
props = e['properties']
if props['label'] == name:
# We found the label name matching, we return -1
return -1
# We now insert into gvgen datastructure
self.__id += 1
node = {'id': self.__id, # Internal ID
'lock': 0, # When the node is written, it is locked to avoid further references
'parent': parent, # Node parent for easy graphviz clusters
'style':None, # Style that GvGen allow you to create
'properties': { # Custom graphviz properties you can add, which will overide previously defined styles
'label': name
}
}
# Parents should be sorted first
if parent:
self.__nodes.insert(1, node)
else:
self.__nodes.append(node)
return node
def __link_smart(self, link):
"""
Creates a smart link if smart_mode activated:
if a -> b exists, and we now add a <- b,
instead of doing: a -> b
<-
we do: a <-> b
"""
linkfrom = self.__link_exists(link['from_node'], link['to_node'])
linkto = self.__link_exists(link['to_node'], link['from_node'])
if self.smart_mode:
if linkto:
self.__links.remove(linkto)
self.propertyAppend(link, "dir", "both")
pw = self.propertyGet(linkfrom, "penwidth")
if pw:
pw = float(pw)
pw += self.line_factor
if pw < self.max_line_width:
self.propertyAppend(linkfrom, "penwidth", str(pw))
else:
self.propertyAppend(link, "penwidth", str(self.initial_line_width))
aw = self.propertyGet(linkfrom, "arrowsize")
if aw:
aw = float(aw)
if aw < self.max_arrow_width:
aw += self.arrow_factor
self.propertyAppend(linkfrom, "arrowsize", str(aw))
else:
self.propertyAppend(link, "arrowsize", str(self.initial_arrow_width))
if not linkfrom:
self.__links.append(link)
def __link_new(self, from_node, to_node, label = None, cl_from_node=None, cl_to_node=None):
"""
Creates a link between two nodes
@from_node: The node the link comes from
@to_node: The node the link goes to
Returns: The link created
"""
link = {'from_node': from_node,
'to_node': to_node,
'style':None, # Style that GvGen allow you to create
'properties': {}, # Custom graphviz properties you can add, which will overide previously defined styles
'cl_from_node':None, # When linking from a cluster, the link appears from this node
'cl_to_node':None, # When linking to a cluster, the link appears to go to this node
}
if label:
link['properties']['label'] = label
if cl_from_node:
link['cl_from_node'] = cl_from_node
if cl_to_node:
link['cl_to_node'] = cl_to_node
# We let smart link work for us
self.__link_smart(link)
return link
def __link_exists(self, from_node, to_node):
"""
Find if a link exists
@from_node: The node the link comes from
@to_node: The node the link goes to
Returns: true if the given link already exists
"""
for link in self.__links:
if link['from_node'] == from_node and link['to_node'] == to_node:
return link
return None
def __has_children(self, parent):
"""
Find children to a given parent
Returns the children list
"""
children_list = []
for e in self.__nodes:
if e['parent'] == parent:
children_list.append(e)
return children_list
def newItem(self, name, parent=None, distinct=None):
node = self.__node_new(name, parent, distinct)
return node
def newLink(self, src, dst, label=None, cl_src=None, cl_dst=None):
"""
Link two existing nodes with each other
"""
return self.__link_new(src, dst, label, cl_src, cl_dst)
def debug(self):
for e in self.__nodes:
print "element = " + str(e['id'])
def collectLeaves(self, parent):
"""
Collect every leaf sharing the same parent
"""
cl = []
for e in self.__nodes:
if e['parent'] == parent:
cl.append(e)
return cl
def collectUnlockedLeaves(self, parent):
"""
Collect every leaf sharing the same parent
unless it is locked
"""
cl = []
for e in self.__nodes:
if e['parent'] == parent:
if not e['lock']:
cl.append(e)
return cl
def lockNode(self, node):
node['lock'] = 1
#
# Start: styles management
#
def styleAppend(self, stylename, key, val):
if stylename not in self.__styles:
self.__styles[stylename] = []
self.__styles[stylename].append([key, val])
def styleApply(self, stylename, node_or_link):
node_or_link['style'] = stylename
def styleDefaultAppend(self, key, val):
self.__default_style.append([key, val])
#
# End: styles management
#
#
# Start: properties management
#
def propertiesAsStringGet(self, node, props):
"""
Get the properties string according to parent/children
props is the properties dictionnary
"""
allProps = {}
#
# Default style come first, they can then be overriden
#
if self.__default_style:
allProps.update(self.__default_style)
#
# First, we build the styles
#
if node['style']:
stylename = node['style']
allProps.update(self.__styles[stylename])
#
# Now we build the properties:
# remember they override styles
#
allProps.update(props)
if self.__has_children(node):
propStringList = ["%s=\"%s\";\n" % (k, v) for k, v in allProps.iteritems()]
properties = ''.join(propStringList)
else:
if props:
propStringList = ["%s=\"%s\"" % (k, v) for k, v in allProps.iteritems()]
properties = '[' + ','.join(propStringList) + ']'
else:
properties = ''
return properties
def propertiesLinkAsStringGet(self, link):
has_props = 0
props = {}
if link['style']:
stylename = link['style']
# Build the properties string for node
props.update(self.__styles[stylename])
props.update(link['properties'])
properties = ''
if props:
properties += ','.join(["%s=\"%s\"" % (str(k),str(val)) for k, val in props.iteritems()])
return properties
def propertyForeachLinksAppend(self, node, key, val):
for l in self.__links:
if l['from_node'] == node:
props = l['properties']
props[key] = val
def propertyAppend(self, node_or_link, key, val):
"""
Append a property to the wanted node or link
mynode = newItem(\"blah\")
Ex. propertyAppend(mynode, \"color\", \"red\")
"""
props = node_or_link['properties']
props[key] = val
def propertyGet(self, node_or_link, key):
"""
Get the value of a given property
Ex. prop = propertyGet(node, \"color\")
"""
try:
props = node_or_link['properties']
return props[key]
except:
return None
def propertyRemove(self, node_or_link, key):
"""
Remove a property to the wanted node or link
mynode = newItem(\"blah\")
Ex. propertyRemove(mynode, \"color\")
"""
props = node_or_link['properties']
del props[key]
#
# End: Properties management
#
#
# For a good legend, the graph must have
# rankdir=LR property set.
#
def legendAppend(self, legendstyle, legenddescr, labelin=None):
if labelin:
item = self.newItem(legenddescr, self.legend)
self.styleApply(legendstyle, item)
else:
style = self.newItem("", self.legend)
descr = self.newItem(legenddescr, self.legend)
self.styleApply(legendstyle, style)
link = self.newLink(style,descr)
self.propertyAppend(link, "dir", "none")
self.propertyAppend(link, "style", "invis")
self.propertyAppend(descr,"shape","plaintext")
def tree_debug(self, level, node, children):
if children:
print "(level:%d) Eid:%d has children (%s)" % (level,node['id'],str(children))
else:
print "Eid:"+str(node['id'])+" has no children"
#
# Core function that outputs the data structure tree into dot language
#
def tree(self, level, node, children):
"""
Core function to output dot which sorts out parents and children
and do it in the right order
"""
if debug:
print "/* Grabed node = %s*/" % str(node['id'])
if node['lock'] == 1: # The node is locked, nothing should be printed
if debug:
print "/* The node (%s) is locked */" % str(node['id'])
if self.__opened_braces:
self.fd.write(level * self.padding_str)
self.fd.write("}\n")
self.__opened_braces.pop()
return
props = node['properties']
if children:
node['lock'] = 1
self.fd.write(level * self.padding_str)
self.fd.write(self.padding_str + "subgraph cluster%d {\n" % node['id'])
properties = self.propertiesAsStringGet(node, props)
self.fd.write(level * self.padding_str)
self.fd.write(self.padding_str + "%s" % properties)
self.__opened_braces.append([node,level])
else:
# We grab appropriate properties
properties = self.propertiesAsStringGet(node, props)
# We get the latest opened elements
if self.__opened_braces:
last_cluster,last_level = self.__opened_braces[-1]
else:
last_cluster = None
last_level = 0
if debug:
if node['parent']:
parent_str = str(node['parent']['id'])
else:
parent_str = 'None'
if last_cluster:
last_cluster_str = str(last_cluster['id'])
else:
last_cluster_str = 'None'
print "/* e[parent] = %s, last_cluster = %s, last_level = %d, opened_braces: %s */" % (parent_str, last_cluster_str,last_level,str(self.__opened_braces))
# Write children/parent with properties
if node['parent']:
if node['parent'] != last_cluster:
while node['parent'] < last_cluster:
last_cluster,last_level = self.__opened_braces[-1]
if node['parent'] == last_cluster:
last_level += 1
# We browse any property to build a string
self.fd.write(last_level * self.padding_str)
self.fd.write(self.padding_str + "node%d %s;\n" % (node['id'], properties))
node['lock'] = 1
else:
self.fd.write(last_level * self.padding_str)
self.fd.write(self.padding_str + "}\n")
self.__opened_braces.pop()
else:
self.fd.write(level * self.padding_str)
self.fd.write(self.padding_str + "node%d %s;\n" % (node['id'], properties) )
node['lock'] = 1
cl = self.collectUnlockedLeaves(node['parent'])
for l in cl:
props = l['properties']
properties = self.propertiesAsStringGet(l, props)
self.fd.write(last_level * self.padding_str)
self.fd.write(self.padding_str + self.padding_str + "node%d %s;\n" % (l['id'], properties))
node['lock'] = 1
self.lockNode(l)
self.fd.write(level * self.padding_str + "}\n")
self.__opened_braces.pop()
else:
self.fd.write(self.padding_str + "node%d %s;\n" % (node['id'], properties))
node['lock'] = 1
def browse(self, node, cb):
"""
Browse nodes in a tree and calls cb providing node parameters
"""
children = self.__has_children(node)
if children:
cb(self.__browse_level, node, str(children))
for c in children:
self.__browse_level += 1
self.browse(c, cb)
else:
cb(self.__browse_level, node, None)
self.__browse_level = 0
# if debug:
# print "This node is not a child: " + str(node)
def dotLinks(self, node):
"""
Write links between nodes
"""
for l in self.__links:
if l['from_node'] == node:
# Check if we link form a cluster
children = self.__has_children(node)
if children:
if l['cl_from_node']:
src = l['cl_from_node']['id']
else:
src = children[0]['id']
cluster_src = node['id']
else:
src = node['id']
cluster_src = ''
# Check if we link to a cluster
children = self.__has_children(l['to_node'])
if children:
if l['cl_to_node']:
dst = l['cl_to_node']['id']
else:
dst = children[0]['id']
cluster_dst = l['to_node']['id']
else:
dst = l['to_node']['id']
cluster_dst = ''
self.fd.write("node%d->node%d" % (src, dst))
props = self.propertiesLinkAsStringGet(l)
# Build new properties if we link from or to a cluster
if cluster_src:
if props:
props += ','
props += "ltail=cluster%d" % cluster_src
if cluster_dst:
if props:
props += ','
props += "lhead=cluster%d" % cluster_dst
if props:
self.fd.write(" [%s]" % props)
self.fd.write(";\n")
def dot(self, name=None, fd=stdout):
"""
Translates the datastructure into dot
"""
try:
self.fd = fd
self.fd.write("/* Generated by GvGen v.%s (http://software.inl.fr/trac/wiki/GvGen) */\n\n" % (gvgen_version))
if name is None:
self.fd.write("digraph G {\n")
else:
self.fd.write("digraph %s {\n" %name)
if self.options:
self.fd.write(self.options+"\n")
# We write parents and children in order
for e in self.__nodes:
if debug_tree_unroll:
self.browse(e, self.tree_debug)
else:
self.browse(e, self.tree)
# We write the connection between nodes
for e in self.__nodes:
self.dotLinks(e)
# We put all the nodes belonging to the parent
self.fd.write("}\n")
finally:
# Remove our reference to file descriptor
self.fd = None
if __name__ == "__main__":
graph = GvGen()
graph.smart_mode = 1
graph.styleDefaultAppend("color","blue")
parents = graph.newItem("Parents")
father = graph.newItem("Bob", parents)
mother = graph.newItem("Alice", parents)
children = graph.newItem("Children")
child1 = graph.newItem("Carol", children)
child2 = graph.newItem("Eve", children)
child3 = graph.newItem("Isaac", children)
postman = graph.newItem("Postman")
graph.newLink(father,child1)
graph.newLink(child1, father)
graph.newLink(father, child1)
graph.newLink(father,child2)
graph.newLink(mother,child2)
myl = graph.newLink(mother,child1)
graph.newLink(mother,child3)
graph.newLink(postman,child3,"Email is safer")
graph.newLink(parents, postman) # Cluster link
graph.propertyForeachLinksAppend(parents, "color", "blue")
graph.propertyForeachLinksAppend(father, "label", "My big link")
graph.propertyForeachLinksAppend(father, "color", "red")
graph.propertyAppend(postman, "color", "red")
graph.propertyAppend(postman, "fontcolor", "white")
graph.styleAppend("link", "label", "mylink")
graph.styleAppend("link", "color", "green")
graph.styleApply("link", myl)
graph.propertyAppend(myl, "arrowhead", "empty")
graph.styleAppend("Post", "color", "blue")
graph.styleAppend("Post", "style", "filled")
graph.styleAppend("Post", "shape", "rectangle")
graph.styleApply("Post", postman)
graph.dot()

View File

@@ -96,6 +96,8 @@ class iface():
AUTO = 0x1
HOT_PLUG = 0x2
version = '0.1'
def __init__(self):
self.name = None
@@ -106,6 +108,7 @@ class iface():
self.state = ifaceState.NEW
self.status = ifaceStatus.UNKNOWN
self.flags = 0x0
self.priv_flags = 0x0
self.refcnt = 0
# dependents that are listed as in the
# config file
@@ -157,6 +160,12 @@ class iface():
return self.config
def is_config_present(self):
addr_method = self.get_addr_method()
if addr_method is not None:
if (addr_method.find('dhcp') != -1 or
addr_method.find('dhcp6') != -1):
return True
if self.config is None:
return False
@@ -320,6 +329,22 @@ class iface():
attr_status_str = ' (error)'
self.config[attr_name] = attr_value + attr_status_str """
def is_different(self, dstiface):
if self.name != dstiface.name: return True
if self.addr_family != dstiface.addr_family: return True
if self.addr_method != dstiface.addr_method: return True
if self.auto != dstiface.auto: return True
if self.classes != dstiface.classes: return True
if any(True for k in self.config if k not in dstiface.config):
return True
if any(True for k,v in self.config.items()
if v != dstiface.config.get(k)): return True
return False
def dump_raw(self, logger):
indent = ' '
print (self.raw_lines[0])

View File

@@ -29,8 +29,13 @@ class ifupdownMain():
ALL = False
STATE_CHECK = False
modules_dir='/etc/network'
builtin_modules_dir='/usr/share/ifupdownaddons'
# priv flags to mark iface objects
BUILTIN = 0x1
NOCONFIG = 0x2
scripts_dir='/etc/network'
addon_modules_dir='/usr/share/ifupdownaddons'
addon_modules_configfile='/etc/network/.addons.conf'
# iface dictionary in the below format:
# { '<ifacename>' : [<ifaceobject1>, <ifaceobject2> ..] }
@@ -50,19 +55,30 @@ class ifupdownMain():
# Dictionary representing operation, sub operation and modules
# for every sub operation
operations = { 'up' :
OrderedDict([('pre-up', OrderedDict({})),
('up' , OrderedDict({})),
('post-up' , OrderedDict({}))]),
OrderedDict([('pre-up', []),
('up' , []),
('post-up' , [])]),
'query-checkcurr' :
OrderedDict([('query-checkcurr', OrderedDict({}))]),
OrderedDict([('query-checkcurr', [])]),
'query-running' :
OrderedDict([('query-running', OrderedDict({}))]),
OrderedDict([('query-running', [])]),
'down' :
OrderedDict([('pre-down', OrderedDict({})),
('down' , OrderedDict({})),
('post-down' , OrderedDict({}))])}
OrderedDict([('pre-down', []),
('down' , []),
('post-down' , [])])}
# For old style /etc/network/ bash scripts
operations_compat = { 'up' :
OrderedDict([('pre-up', []),
('up' , []),
('post-up' , [])]),
'down' :
OrderedDict([('pre-down', []),
('down' , []),
('post-down' , [])])}
def __init__(self, force=False, dryrun=False, nowait=False,
@@ -76,13 +92,16 @@ class ifupdownMain():
self.PERFMODE = perfmode
self.WITH_DEPENDS = withdepends
self.CACHE = cache
self._DELETE_DEPENDENT_IFACES_WITH_NOCONFIG = True
self._DELETE_DEPENDENT_IFACES_WITH_NOCONFIG = False
self.ifaces = OrderedDict()
self.njobs = njobs
self.pp = pprint.PrettyPrinter(indent=4)
self.load_modules_builtin(self.builtin_modules_dir)
self.load_modules(self.modules_dir)
self.modules = OrderedDict({})
self.load_addon_modules_config()
self.load_addon_modules(self.addon_modules_dir)
self.load_scripts(self.scripts_dir)
self.dependency_graph = {}
try:
self.statemanager = stateManager()
@@ -116,8 +135,6 @@ class ifupdownMain():
return self.FORCE
def set_dryrun(self, dryrun):
if dryrun == True:
self.logger.debug('setting dryrun to true')
self.DRYRUN = dryrun
def get_dryrun(self):
@@ -133,6 +150,12 @@ class ifupdownMain():
del self.ifaceobjdict
self.ifaceobjdict = ifaceobjdict
def set_dependency_graph(self, dependency_graph):
self.dependency_graph = dependency_graph
def get_dependency_graph(self):
return self.dependency_graph
def set_perfmode(self, perfmode):
if perfmode == True:
self.logger.debug('setting perfmode to true')
@@ -196,7 +219,6 @@ class ifupdownMain():
def get_ifaceobjcurr(self, ifacename):
return self.ifaceobjcurrdict.get(ifacename)
def get_ifaceobjrunning(self, ifacename):
return self.ifaceobjrunningdict.get(ifacename)
@@ -216,18 +238,21 @@ class ifupdownMain():
max = i.get_refcnt()
return max
def create_n_save_ifaceobj(self, ifacename, increfcnt=False):
def create_n_save_ifaceobj(self, ifacename, priv_flags=None,
increfcnt=False):
""" creates and returns a fake vlan iface object.
This was added to support creation of simple vlan
devices without any user specified configuration.
"""
ifaceobj = iface()
ifaceobj.set_name(ifacename)
ifaceobj.priv_flags = priv_flags
ifaceobj.set_auto()
if increfcnt == True:
ifaceobj.inc_refcnt()
self.ifaceobjdict[ifacename] = [ifaceobj]
def is_builtin_iface(self, ifacename):
def is_iface_builtin(self, ifacename):
""" Returns true if iface name is a builtin interface.
A builtin interface is an interface which ifupdown understands.
@@ -238,6 +263,31 @@ class ifupdownMain():
return True
return False
def is_ifaceobj_builtin(self, ifaceobj):
""" Returns true if iface name is a builtin interface.
A builtin interface is an interface which ifupdown understands.
The following are currently considered builtin ifaces:
- vlan interfaces in the format <ifacename>.<vlanid>
"""
if (ifaceobj.priv_flags & self.BUILTIN) != 0:
return True
return False
def is_ifaceobj_noconfig(self, ifaceobj):
""" Returns true if iface name did not have a user defined config.
These interfaces appear only when they are dependents of interfaces
which have user defined config
"""
if (ifaceobj.priv_flags & self.NOCONFIG) != 0:
return True
return False
def preprocess_dependency_list(self, dlist, op):
""" We go through the dependency list and
delete or add interfaces from the interfaces dict by
@@ -254,15 +304,16 @@ class ifupdownMain():
present in the ifacesdict
"""
del_list = []
create_list = []
self.logger.debug('pre-processing dependency list: %s' %list(dlist))
for d in dlist:
dilist = self.get_iface_objs(d)
if dilist == None:
if (self.is_builtin_iface(d) == True or
self._DELETE_DEPENDENT_IFACES_WITH_NOCONFIG == False):
create_list.append(d)
if self.is_iface_builtin(d) == True:
self.create_n_save_ifaceobj(d, self.BUILTIN, True),
elif self._DELETE_DEPENDENT_IFACES_WITH_NOCONFIG == False:
# create fake devices to all dependents that dont
# have config
self.create_n_save_ifaceobj(d, self.NOCONFIG, True),
else:
del_list.append(d)
else:
@@ -272,48 +323,40 @@ class ifupdownMain():
for d in del_list:
dlist.remove(d)
# create fake devices to all dependents that dont have config
map(lambda i: self.create_n_save_ifaceobj(i, increfcnt=True),
create_list)
self.logger.debug('After Processing dependency list: %s'
%list(dlist))
def get_dependents(self, ifaceobj, op):
""" Gets iface dependents by calling into respective modules """
dlist = None
self.logger.debug('%s: ' %ifaceobj.get_name() + 'getting dependency')
# Get dependents for interface by querying respective modules
subopdict = self.operations.get(op)
for subop, mdict in subopdict.items():
for mname, mdata in mdict.items():
if mdata.get('ftype') == 'pmodule':
module = mdata.get('module')
if op == 'query-running':
if (hasattr(module,
'get_dependent_ifacenames_running') == False):
continue
dlist = module.get_dependent_ifacenames_running(
ifaceobj)
else:
if (hasattr(module,
'get_dependent_ifacenames') == False):
continue
dlist = module.get_dependent_ifacenames(ifaceobj,
for subop, mlist in subopdict.items():
for mname in mlist:
module = self.modules.get(mname)
if op == 'query-running':
if (hasattr(module,
'get_dependent_ifacenames_running') == False):
continue
dlist = module.get_dependent_ifacenames_running(ifaceobj)
else:
if (hasattr(module, 'get_dependent_ifacenames') == False):
continue
dlist = module.get_dependent_ifacenames(ifaceobj,
self.ifaceobjdict.keys())
if dlist is not None:
ifaceobj.set_realdev_dependents(dlist[:])
self.logger.debug('%s: ' %ifaceobj.get_name() +
if dlist is not None and len(dlist) > 0:
ifaceobj.set_realdev_dependents(dlist[:])
self.logger.debug('%s: ' %ifaceobj.get_name() +
'got dependency list: %s' %str(dlist))
break
break
return dlist
def generate_dependency_info(self, ifacenames, dependency_graph, op):
def populate_dependency_info(self, ifacenames, op):
""" recursive function to generate iface dependency info """
self.logger.debug('generating dependency info for %s' %str(ifacenames))
if ifacenames is None:
ifacenames = self.ifaceobjdict.keys()
self.logger.debug('populating dependency info for %s' %str(ifacenames))
iqueue = deque(ifacenames)
while iqueue:
@@ -333,11 +376,13 @@ class ifupdownMain():
if dlist is not None:
self.preprocess_dependency_list(dlist, op)
self.logger.debug('%s: dependency list after processing: %s'
%(i, str(dlist)))
ifaceobj.set_dependents(dlist)
[iqueue.append(d) for d in dlist]
if dependency_graph.get(i) is None:
dependency_graph[i] = dlist
if self.dependency_graph.get(i) is None:
self.dependency_graph[i] = dlist
def is_valid_state_transition(self, ifname, to_be_state):
return self.statemanager.is_valid_state_transition(ifname,
@@ -392,54 +437,56 @@ class ifupdownMain():
' %s' %mname + ' of type %s' %mftype)
def load_modules_builtin(self, modules_dir):
def load_addon_modules_config(self):
with open(self.addon_modules_configfile, 'r') as f:
lines = f.readlines()
for l in lines:
litems = l.rstrip(' \n').split(',')
operation = litems[0]
mname = litems[1]
if operation.find('up') != -1:
self.operations['up'][operation].append(mname)
elif operation.find('down') != -1:
self.operations['down'][operation].append(mname)
def load_addon_modules(self, modules_dir):
""" load python modules from modules_dir
Default modules_dir is /usr/share/ifupdownmodules
"""
self.logger.info('loading builtin modules from %s' %modules_dir)
if not modules_dir in sys.path:
sys.path.append(modules_dir)
sys.path.append(modules_dir)
try:
module_list = os.listdir(modules_dir)
for module in module_list:
if re.search('.*\.pyc', module, 0) != None:
continue
mname, mext = os.path.splitext(module)
if mext is not None and mext == '.py':
self.logger.info('loading ' + modules_dir + '/' + module)
try:
m = __import__(mname)
mclass = getattr(m, mname)
except:
raise
minstance = mclass(force=self.get_force(),
dryrun=self.get_dryrun(),
nowait=self.get_nowait(),
perfmode=self.get_perfmode(),
cache=self.get_cache())
ops = minstance.get_ops()
for op in ops:
if re.search('query', op) is not None:
self.save_module(op, op, mname, 'pmodule',
minstance)
elif re.search('up', op) is not None:
self.save_module('up', op, mname, 'pmodule',
minstance)
else:
self.save_module('down', op, mname,
'pmodule', minstance)
for op, opdict in self.operations.items():
for subop, mlist in opdict.items():
for mname in mlist:
if self.modules.get(mname) is not None:
continue
mpath = modules_dir + '/' + mname + '.py'
if os.path.exists(mpath) == True:
try:
m = __import__(mname)
mclass = getattr(m, mname)
except:
raise
minstance = mclass(force=self.get_force(),
dryrun=self.get_dryrun(),
nowait=self.get_nowait(),
perfmode=self.get_perfmode(),
cache=self.get_cache())
self.modules[mname] = minstance
except:
raise
# Assign all modules to query operations
self.operations['query-checkcurr']['query-checkcurr'] = self.modules.keys()
self.operations['query-running']['query-running'] = self.modules.keys()
def load_modules(self, modules_dir):
def load_scripts(self, modules_dir):
""" loading user modules from /etc/network/.
Note that previously loaded python modules override modules found
@@ -447,45 +494,21 @@ class ifupdownMain():
"""
self.logger.info('loading user modules from %s' %modules_dir)
for op, subops in self.operations.items():
if re.search('query', op) is not None:
continue
self.logger.info('looking for user scripts under %s' %modules_dir)
for op, subops in self.operations_compat.items():
for subop in subops.keys():
msubdir = modules_dir + '/if-%s.d' %subop
self.logger.info('loading modules under %s ...' %msubdir)
self.logger.info('loading scripts under %s ...' %msubdir)
try:
module_list = os.listdir(msubdir)
for module in module_list:
if re.search('.*\.pyc', module, 0) != None:
continue
mname, mext = os.path.splitext(module)
if mext is not None and mext == '.py':
self.logger.debug('loading ' + msubdir + '/' + module)
try:
m = imp.load_source(module,
msubdir + '/' + module)
mclass = getattr(m, mname)
except:
raise
minstance = mclass()
self.save_module(op, subop, mname, 'pmodule',
minstance)
else:
self.save_module(op, subop, mname, 'script',
if self.modules.get(module) is not None:
continue
self.operations_compat[op][subop].append(
msubdir + '/' + module)
except:
raise
#self.logger.debug('modules ...')
#self.pp.pprint(self.operations)
# For query, we add a special entry, basically use all 'up' modules
self.operations['query'] = self.operations.get('up')
def conv_iface_namelist_to_objlist(self, intf_list):
for intf in intf_list:
iface_obj = self.get_iface(intf)
@@ -498,83 +521,88 @@ class ifupdownMain():
def run_without_dependents(self, op, ifacenames):
runifacenames = []
ifaceSched = ifaceScheduler(force=self.FORCE)
""" Run interfaces without executing their dependents.
self.logger.debug('run_without_dependents for op %s' %op +
' for %s' %str(ifacenames))
Even though we are running without dependents here, we will have
to cover the builtin dependents. Because the only way builtin
devices are operated on is when they are seen as dependents.
So we include them. And also we need to execute the user provided
interface names in order of their dependencies.
So, we created a special dependency_graph with interfaces matching
the above constraints here
if self.ALL is True you are better off using the default
dependency graph self.dependency_graph that carries all dependents
"""
if ifacenames == None:
raise ifupdownInvalidValue('no interfaces found')
# Even though we are running without dependents here, we will have
# to cover the builtin dependents. Because the only way builtin
# devices are created is when they are seen as dependents
self.logger.debug('run_without_dependents for op %s' %op +
' for %s' %str(ifacenames))
dependency_graph = {}
indegrees = {}
ifaceSched = ifaceScheduler(force=self.FORCE)
for i in ifacenames:
dlist = self.get_dependents(self.get_iface_obj_first(i), op)
if dlist is None:
runifacenames.append(i)
if dependency_graph.get(i) is not None:
continue
# If these dependents are builtin devices which dont require
# any config section, we must include them in
builtin_dependents = [d for d in dlist
if self.get_iface_objs(d) is None and
self.is_builtin_iface(d)]
if len(builtin_dependents) != 0:
self.logger.info('Adding builtin interfaces %s '
'to the list of interfaces to bringup '
%builtin_dependents)
map(lambda x: self.create_n_save_ifaceobj(x),
builtin_dependents)
runifacenames += builtin_dependents
runifacenames.append(i)
dependency_graph[i] = []
indegrees[i] = 0
ifaceobj = self.get_iface_obj_first(i)
dlist = ifaceobj.get_dependents()
if dlist is None:
continue
return ifaceSched.run_iface_list(self, runifacenames, op)
for d in dlist:
ifaceobj = self.get_iface_obj_first(d)
if (self.is_ifaceobj_builtin(ifaceobj) == True or
self.is_ifaceobj_noconfig(ifaceobj) == True or
d in ifacenames):
dependency_graph[i].append(d)
dependency_graph[d] = None
indegrees[d] = 1
self.logger.debug('dependency graph: %s' %str(dependency_graph))
ifaceSched.run_iface_dependency_graph(self, dependency_graph, op,
indegrees,
graphsortall=True)
def run_with_dependents(self, op, ifacenames):
dependency_graph = {}
ret = 0
self.logger.debug('run_with_dependents for op %s'
%op + ' for %s' %str(ifacenames))
self.logger.debug('running \'%s\' with dependents for %s'
%(op, str(ifacenames)))
ifaceSched = ifaceScheduler()
if ifacenames is None:
ifacenames = self.ifaceobjdict.keys()
# generate dependency graph of interfaces
self.generate_dependency_info(ifacenames, dependency_graph, op)
if self.logger.isEnabledFor(logging.DEBUG) == True:
self.logger.debug('dependency graph:')
self.pp.pprint(dependency_graph)
self.logger.debug(self.pp.pformat(self.dependency_graph))
if self.njobs > 1:
ret = ifaceSched.run_iface_dependency_graph_parallel(self,
dependency_graph, op)
self.dependency_graph, op)
else:
ret = ifaceSched.run_iface_dependency_graph(self,
dependency_graph, op)
self.dependency_graph, op)
return ret
def print_dependency(self, op, ifacenames, format):
dependency_graph = {}
if ifacenames is None:
ifacenames = self.ifaceobjdict.keys()
# generate dependency graph of interfaces
self._DELETE_DEPENDENT_IFACES_WITH_NOCONFIG = False
self.generate_dependency_info(ifacenames, dependency_graph, op)
if format == 'list':
self.pp.pprint(dependency_graph)
self.pp.pprint(self.dependency_graph)
elif format == 'dot':
indegrees = {}
map(lambda i: indegrees.update({i :
self.get_iface_refcnt(i)}),
dependency_graph.keys())
graph.generate_dots(dependency_graph, indegrees)
self.dependency_graph.keys())
graph.generate_dots(self.dependency_graph, indegrees)
def validate_ifaces(self, ifacenames):
""" validates interface list for config existance.
@@ -590,7 +618,7 @@ class ifupdownMain():
err_iface += ' ' + i
if len(err_iface) != 0:
self.logger.error('did not find interfaces: %s' %err_iface)
self.logger.error('could not find interfaces: %s' %err_iface)
return -1
return 0
@@ -657,7 +685,6 @@ class ifupdownMain():
""" main ifupdown run method """
if auto == True:
self.logger.debug('setting flag ALL')
self.ALL = True
self.WITH_DEPENDS = True
@@ -706,6 +733,8 @@ class ifupdownMain():
raise Exception('no ifaces found matching ' +
'given allow lists')
self.populate_dependency_info(filtered_ifacenames, op)
if printdependency is not None:
self.print_dependency(op, filtered_ifacenames, printdependency)
return
@@ -744,11 +773,12 @@ class ifupdownMain():
if auto == True:
self.logger.debug('setting flag ALL')
self.ALL = True
self.WITH_DEPENDS = True
if op == 'query-running':
self._DELETE_DEPENDENT_IFACES_WITH_NOCONFIG = False
# create fake devices to all dependents that dont have config
map(lambda i: self.create_n_save_ifaceobj(i), ifacenames)
map(lambda i: self.create_n_save_ifaceobj(i, self.NOCONFIG),
ifacenames)
else:
try:
self.read_iface_config()
@@ -786,9 +816,12 @@ class ifupdownMain():
return self.print_ifaceobjs_saved_state_detailed_pretty(
filtered_ifacenames)
if printdependency is not None:
self.print_dependency(op, filtered_ifacenames, printdependency)
return
self.populate_dependency_info(filtered_ifacenames, op)
#if printdependency is not None:
# self.print_dependency(op, filtered_ifacenames, printdependency)
# return
if self.WITH_DEPENDS == True:
self.run_with_dependents(op, filtered_ifacenames)
@@ -804,28 +837,36 @@ class ifupdownMain():
self.print_ifaceobjsrunning_pretty(filtered_ifacenames)
return
def reload(self, auto=False, allow=None,
ifacenames=None, excludepats=None):
ifacenames=None, excludepats=None, downchangediface=False):
""" main ifupdown run method """
allow_classes = []
self.logger.debug('reloading interface config ..')
if auto == True:
self.logger.debug('setting flag ALL')
self.ALL = True
self.WITH_DEPENDS = True
try:
# Read the current interface config
self.read_iface_config()
except Exception, e:
raise
# Save a copy of new iface objects
# generate dependency graph of interfaces
self.populate_dependency_info(ifacenames, 'up')
# Save a copy of new iface objects and dependency_graph
new_ifaceobjdict = self.get_ifaceobjdict()
new_dependency_graph = self.get_dependency_graph()
if len(self.statemanager.get_ifaceobjdict()) > 0:
# if old state is present, read old state and mark op for 'down'
# followed by 'up' aka: reload
# old interface config is read into self.ifaceobjdict
#
self.read_old_iface_config()
op = 'reload'
else:
@@ -833,20 +874,67 @@ class ifupdownMain():
op = 'up'
if ifacenames is None: ifacenames = self.ifaceobjdict.keys()
if (op == 'reload' and ifacenames is not None and
len(ifacenames) != 0):
filtered_ifacenames = [i for i in ifacenames
if self.iface_whitelisted(auto, allow_classes,
excludepats, i) == True]
ifacedownlist = Set(filtered_ifacenames).difference(
Set(new_ifaceobjdict.keys()))
# Generate the interface down list
# Interfaces that go into the down list:
# - interfaces that were present in last config and are not
# present in the new config
# - interfaces that were changed between the last and current
# config
#
ifacedownlist = []
for ifname, lastifobjlist in self.ifaceobjdict.items():
objidx = 0
# If interface is not present in the new file
# append it to the down list
newifobjlist = new_ifaceobjdict.get(ifname)
if newifobjlist == None:
ifacedownlist.append(ifname)
continue
if downchangediface == False:
continue
# If interface has changed between the current file
# and the last installed append it to the down list
if len(newifobjlist) != len(lastifobjlist):
ifacedownlist.append(ifname)
continue
# compare object list
for objidx in range(0, len(lastifobjlist)):
oldobj = lastifobjlist[objidx]
newobj = newifobjlist[objidx]
if newobj.is_different(oldobj) == True:
ifacedownlist.append(ifname)
continue
#ifacedownlist = Set(filtered_ifacenames).difference(
# Set(new_ifaceobjdict.keys()))
if ifacedownlist is not None and len(ifacedownlist) > 0:
self.logger.debug('bringing down interfaces: %s'
self.logger.info('Executing down on interfaces: %s'
%str(ifacedownlist))
if self.WITH_DEPENDS == True:
self.run_without_dependents('down', ifacedownlist)
else:
# Generate dependency info for old config
self.populate_dependency_info(ifacedownlist, 'down')
if len(ifacedownlist) == len(self.ifaceobjdict):
# if you are downing all interfaces, its better run
# with dependents
self.run_with_dependents('down', ifacedownlist)
else:
# if not, down only the interfaces that we have in the
# down list
self.run_without_dependents('down', ifacedownlist)
# Update persistant iface states
try:
@@ -859,20 +947,24 @@ class ifupdownMain():
t = sys.exc_info()[2]
traceback.print_tb(t)
self.logger.warning('error saving state (%s)' %str(e))
else:
self.logger.debug('no interfaces to down ..')
# Now, run up with new dict
# Now, run up with new config dict
self.set_ifaceobjdict(new_ifaceobjdict)
self.set_dependency_graph(new_dependency_graph)
ifacenames = self.ifaceobjdict.keys()
filtered_ifacenames = [i for i in ifacenames
if self.iface_whitelisted(auto, allow_classes,
excludepats, i) == True]
self.logger.debug('bringing up interfaces: %s'
%str(filtered_ifacenames))
self.logger.info('Executing up on interfaces: %s'
%str(filtered_ifacenames))
if self.WITH_DEPENDS == True:
self.run_without_dependents('up', filtered_ifacenames)
else:
self.run_with_dependents('up', filtered_ifacenames)
else:
self.run_without_dependents('up', filtered_ifacenames)
# Update persistant iface states
try:
@@ -933,7 +1025,8 @@ class ifupdownMain():
elif ifaceobj.get_status() == ifaceStatus.ERROR:
ret = 1
if ifaceobj.is_config_present() == False:
if (self.is_iface_builtin(i) or
ifaceobj.is_config_present() == False):
continue
if format is None or format == 'nwifaces':
@@ -955,6 +1048,8 @@ class ifupdownMain():
print 'iface %s' %ifaceobj.get_name() + ' (not found)\n'
continue
#if (self.is_iface_builtin(i) and
# ifaceobj.is_config_present() == False):
if ifaceobj.is_config_present() == False:
continue

View File

@@ -65,12 +65,10 @@ class networkInterfaces():
def process_source(self, lines, cur_idx, lineno):
# Support regex
self.logger.debug('process_source ..%s' %lines[cur_idx])
self.logger.debug('processing sourced line ..\'%s\'' %lines[cur_idx])
sourced_file = lines[cur_idx].split(' ', 2)[1]
if sourced_file is not None:
self.logger.debug('process_source ..%s' %sourced_file)
for f in glob.glob(sourced_file):
self.logger.info('Reading sourced file %s' %f)
self.read_file(f)
else:
self.logger.warn('unable to read source line at %d', lineno)
@@ -129,7 +127,11 @@ class networkInterfaces():
lines_consumed = line_idx - cur_idx
# Create iface object
ifaceobj.set_name(ifacename)
if ifacename.find(':') != -1:
ifaceobj.set_name(ifacename.split(':')[0])
else:
ifaceobj.set_name(ifacename)
ifaceobj.set_config(iface_config)
ifaceobj.generate_env()
if len(iface_attrs) > 2:
@@ -146,7 +148,7 @@ class networkInterfaces():
ifaceobj.set_class(c)
# Call iface found callback
self.logger.debug('saving interface %s' %ifaceobj.get_name())
#self.logger.debug('saving interface %s' %ifaceobj.get_name())
self.callbacks.get('iface_found')(ifaceobj)
return lines_consumed # Return next index
@@ -214,8 +216,8 @@ class networkInterfaces():
try:
from mako.template import Template
except:
self.logger.warning('template engine mako not found ' +
'skipping template parsing');
self.logger.warning('template engine mako not found. ' +
'skip template parsing ..');
return textdata
t = Template(text=textdata, output_encoding='utf-8')
@@ -226,7 +228,7 @@ class networkInterfaces():
if ifaces_file == None:
ifaces_file=self.ifaces_file
self.logger.debug('reading ifaces_file %s' %ifaces_file)
self.logger.debug('reading interfaces file %s' %ifaces_file)
f = open(ifaces_file)
filedata = f.read()
f.close()
@@ -241,5 +243,4 @@ class networkInterfaces():
def load(self, filename=None):
self.logger.debug('loading ifaces file ..')
return self.read_file(filename)

View File

@@ -35,32 +35,30 @@ class ifaceScheduler(ifupdownBase):
self.__class__.__name__)
self.FORCE = force
def run_iface_subop(self, ifupdownobj, ifaceobj, op, subop, mdict, cenv):
def run_iface_subop(self, ifupdownobj, ifaceobj, op, subop, mlist, cenv):
""" Runs sub operation on an interface """
self.logger.debug('%s: ' %ifaceobj.get_name() + 'op %s' %op +
' subop = %s' %subop)
for mname, mdata in mdict.items():
m = mdata.get('module')
for mname in mlist:
m = ifupdownobj.modules.get(mname)
err = 0
try:
if (mdata.get('ftype') == 'pmodule' and
hasattr(m, 'run') == True):
self.logger.debug('%s: ' %ifaceobj.get_name() +
'running module %s' %mname +
' op %s' %op + ' subop %s' %subop)
if hasattr(m, 'run') == True:
self.logger.debug('%s: %s : running module %s'
%(ifaceobj.get_name(), subop, mname))
if op == 'query-checkcurr':
m.run(ifaceobj, subop, query_check=True,
# Dont check state if the interface object was
# auto generated
if ((ifaceobj.priv_flags & ifupdownobj.BUILTIN) != 0 or
(ifaceobj.priv_flags & ifupdownobj.NOCONFIG) != 0):
continue
m.run(ifaceobj, subop,
query_ifaceobj=ifupdownobj.create_ifaceobjcurr(
ifaceobj))
else:
m.run(ifaceobj, subop)
else:
self.logger.debug('%s: ' %ifaceobj.get_name() +
'running script %s' %mname +
' op %s' %op + ' subop %s' %subop)
self.exec_command(m, cmdenv=cenv)
except Exception, e:
err = 1
self.log_error(str(e))
@@ -75,6 +73,18 @@ class ifaceScheduler(ifupdownBase):
ifaceState.from_str(subop),
ifaceStatus.SUCCESS)
# execute /etc/network/ scripts
subop_dict = ifupdownobj.operations_compat.get(op)
if subop_dict is None: return
for mname in subop_dict.get(subop):
self.logger.debug('%s: %s : running script %s'
%(ifaceobj.get_name(), subop, mname))
try:
self.exec_command(mname, cmdenv=cenv)
except Exception, e:
err = 1
self.log_error(str(e))
def run_iface_subops(self, ifupdownobj, ifaceobj, op):
""" Runs all sub operations on an interface """
@@ -84,8 +94,8 @@ class ifaceScheduler(ifupdownBase):
# Each sub operation has a module list
subopdict = ifupdownobj.operations.get(op)
for subop, mdict in subopdict.items():
self.run_iface_subop(ifupdownobj, ifaceobj, op, subop, mdict, cenv)
for subop, mlist in subopdict.items():
self.run_iface_subop(ifupdownobj, ifaceobj, op, subop, mlist, cenv)
def run_iface(self, ifupdownobj, ifacename, op):
@@ -134,7 +144,8 @@ class ifaceScheduler(ifupdownBase):
sorted_by_dependency=False):
""" Runs interface list through sub operation handler. """
self.logger.debug('running sub operation %s on all given interfaces' %op)
self.logger.debug('running sub operation %s on all given interfaces'
%subop)
iface_run_queue = deque(ifacenames)
for i in range(0, len(iface_run_queue)):
if op == 'up':
@@ -176,9 +187,6 @@ class ifaceScheduler(ifupdownBase):
'up'
"""
self.logger.debug('run_iface_list_stages: running interface list for %s'
%op)
# Each sub operation has a module list
subopdict = ifupdownobj.operations.get(op)
for subop, mdict in subopdict.items():
@@ -186,24 +194,29 @@ class ifaceScheduler(ifupdownBase):
sorted_by_dependency)
def run_iface_dependency_graph(self, ifupdownobj, dependency_graph,
operation):
def run_iface_dependency_graph(self, ifupdownobj, dependency_graphs,
operation, indegrees=None,
graphsortall=False):
""" runs interface dependency graph """
indegrees = OrderedDict()
self.logger.debug('creating indegree array ...')
for ifacename in dependency_graph.keys():
indegrees[ifacename] = ifupdownobj.get_iface_refcnt(ifacename)
if indegrees is None:
indegrees = OrderedDict()
for ifacename in dependency_graphs.keys():
indegrees[ifacename] = ifupdownobj.get_iface_refcnt(ifacename)
if self.logger.isEnabledFor(logging.DEBUG) == True:
self.logger.debug('indegree array :')
ifupdownobj.pp.pprint(indegrees)
self.logger.debug(ifupdownobj.pp.pformat(indegrees))
try:
self.logger.debug('calling topological sort on the graph ...')
sorted_ifacenames = graph.topological_sort_graphs(
dependency_graph, indegrees)
if graphsortall == True:
sorted_ifacenames = graph.topological_sort_graphs_all(
dependency_graphs, indegrees)
else:
sorted_ifacenames = graph.topological_sort_graphs(
dependency_graphs, indegrees)
except Exception:
raise

View File

@@ -19,7 +19,11 @@ def run(args, op):
if len(args.iflist) == 0:
iflist = None
cachearg=False if iflist is not None or args.nocache == True else True
cachearg=(False if (iflist is not None or
args.nocache == True or
args.perfmode == True)
else True)
logger.debug('creating ifupdown object ..')
if op == 'up' or op == 'down' or op == 'reload':
ifupdown_handle = ifupdownMain(force=args.force,
@@ -35,7 +39,7 @@ def run(args, op):
format=args.format,
cache=cachearg)
logger.debug('calling %s' %op + ' for all interfaces ..')
logger.debug('calling \'%s\'' %op + ' for all interfaces ..')
if op == 'up':
ifupdown_handle.up(args.all, args.CLASS, iflist,
excludepats=args.excludepats,
@@ -54,7 +58,6 @@ def run(args, op):
if iflist is None:
iflist = [i for i in os.listdir('/sys/class/net/')
if os.path.isdir('/sys/class/net/%s' %i) == True]
print iflist
qtype='query-running'
elif args.pretty == True:
qtype='query-pretty'
@@ -62,13 +65,14 @@ def run(args, op):
qtype='query'
ifupdown_handle.query(qtype, args.all, args.CLASS, iflist,
excludepats=args.excludepats)
excludepats=args.excludepats,
printdependency=args.printdependency)
elif op == 'reload':
if iflist is not None:
raise Exception('iflist is currently not supported with reload')
ifupdown_handle.reload(args.all, args.CLASS, iflist,
excludepats=args.excludepats)
excludepats=args.excludepats,
downchangediface=args.downchangediface)
except:
raise
@@ -96,10 +100,12 @@ def deinit():
def update_argparser(argparser):
""" base parser, common to all commands """
argparser.add_argument('-a', '--all', action='store_true',
argparser.add_argument('-a', '--all', action='store_true', required=False,
help='process all interfaces marked \"auto\"')
argparser.add_argument('iflist', metavar='IFACE',
nargs='*', help='interfaces list')
nargs='*', help='interface list separated by spaces')
argparser.add_argument('-v', '--verbose', dest='verbose',
action='store_true', help='verbose')
argparser.add_argument('-d', '--debug', dest='debug',
@@ -111,7 +117,7 @@ def update_argparser(argparser):
argparser.add_argument('--allow', dest='CLASS',
help='ignore non-\"allow-CLASS\" interfaces')
argparser.add_argument('--with-depends', dest='withdepends',
action='store_true', help='run with all dependencies')
action='store_true', help='run with all dependent interfaces')
argparser.add_argument('--perfmode', dest='perfmode',
action='store_true', help=argparse.SUPPRESS)
argparser.add_argument('-j', '--jobs', dest='jobs', type=int,
@@ -119,10 +125,9 @@ def update_argparser(argparser):
argparser.add_argument('--nocache', dest='nocache', action='store_true',
help=argparse.SUPPRESS)
argparser.add_argument('-X', '--exclude', dest='excludepats',
action='append', help='exclude interfaces from the list of '
+ 'interfaces to operate on by a PATTERN '
+ '(note that this option doesn\'t disable mappings)')
action='append',
help='Exclude interfaces from the list of interfaces' +
' to operate on')
def update_ifupdown_argparser(argparser):
""" common arg parser for ifup and ifdown """
@@ -145,7 +150,7 @@ def update_ifdown_argparser(argparser):
def update_ifquery_argparser(argparser):
""" arg parser for ifquery options """
# -l is same as '-a', only hear for backward compatibility
# -l is same as '-a', only here for backward compatibility
argparser.add_argument('-l', '--list', action='store_true', dest='all',
help=argparse.SUPPRESS)
group = argparser.add_mutually_exclusive_group(required=False)
@@ -173,12 +178,20 @@ def update_ifquery_argparser(argparser):
group.add_argument('--pretty', action='store_true', dest='pretty',
help='pretty print config file entries')
argparser.add_argument('--print-dependency',
dest='printdependency', choices=['list', 'dot'],
help=argparse.SUPPRESS)
def update_ifreload_argparser(argparser):
update_ifupdown_argparser(argparser)
argparser.add_argument('--down-changediface', dest='downchangediface',
action='store_true',
help='down interfaces that have changed before bringing them up')
def parse_args(argsv, op):
if op == 'query':
descr = 'interface query'
descr = 'query interfaces (all or interface list)'
else:
descr = 'interface management'
@@ -214,8 +227,12 @@ def main(argv):
# Command line arg parser
args = parse_args(argv[1:], op)
if len(args.iflist) == 0 and args.all == False:
print '\'-a\' option or interface list are required'
exit(1)
if len(args.iflist) > 0 and args.all is True:
print 'interface list cannot be specified with all option'
print '\'-a\' option and interface list are mutually exclusive'
exit(1)
init(args)