1 实验题目
假如你有一个假如你有⼀个笔友遍天下爱写信的朋友叫李华,她生活在1972年的UCLA,希望通过ARPANET(世界第一个数据包交换网络,互联网的鼻祖,接入了25个研究机构,共计55条链路。具体拓扑见下图)发送一封Email给位于MIT的李明同学,现在需要你借助Ryu控制器编写Ryu APP帮助她
- 为减少网络中节点的中转,希望找到一条从UCLA到MIT跳数最少的连接,输出经过的路线
- 为了尽快发送Email,希望能找到⼀条从UCLA到MIT时延最短的连接,输出经过的路线及总的时延,利用Ping包的RTT验证你的结果(此问题选做)
- 实验资料下载
https://www.aliyundrive.com/s/fzR7s2Twe3u
2 实验内容
2.1 跳数最少
2.1.1 实现思路
- 拓扑中存在环路,需要解决ARP包洪泛的问题。利用(dpid,mac,dstnation ip)作为键值记录对应的port,每个交换机第一次收到广播的Arp Request时记录下来,下一次收到键值相同但是port不同的Arp Request直接将包丢弃,从而避免了洪泛。
- 对于图的存储及最短路径算法,使用现有的库
networkx
2.1.2 具体实现
- 全局变量
mac_to_port
{{dpid:dst_mac}:port}
,用于控制器流表的下发。arp_table
{ip:mac}
,用于记录ip
与mac
地址的映射关系。sw
{(dpid,src_mac,dst_ip):port}
,用于解决ARP包洪泛问题。net
用于记录实验拓扑的有向图。
- ARP包洪泛问题的解决
- 利用
(dpid,mac,dstnation ip)
作为键值记录对应的port,每个交换机第一次收到广播的Arp Request时记录下来,下一次收到键值相同但是port
不同的Arp Request直接将包丢弃,从而避免了洪泛。 - 具体代码如下:
- 利用
if (datapath.id, eth_src, arp_dst_ip) in self.sw: # Break the loop
if self.sw[(datapath.id, eth_src, arp_dst_ip)] != in_port:
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath,
buffer_id=datapath.ofproto.OFP_NO_BUFFER,
in_port=in_port,
actions=[], data=None)
datapath.send_msg(out)
return True
else:
self.sw[(datapath.id, eth_src, arp_dst_ip)] = in_port
- 获取实验拓扑
- 新建线程,每隔两秒更新一次实验拓扑,使用现有库
networkx
保存拓扑。 - 具体代码如下:
- 新建线程,每隔两秒更新一次实验拓扑,使用现有库
def _get_topology(self):
while True:
#self.logger.info('\n')
hosts = get_all_host(self)
switch_list = get_all_switch(self)
links_list = get_all_link(self)
switches =[switch.dp.id for switch in switch_list]
self.net.add_nodes_from(switches) # add node
links=[(link.src.dpid,link.dst.dpid,{'port':link.src.port_no}) for link in links_list] # [src_dpid,dst_dpid,port:src_port]
self.net.add_edges_from(links) # add edge
hub.sleep(2) # get topo in 2 seconds
_packet_in_handler()
- 过滤掉lldp包和ipv6包
lldp包和ipv6包存在的情况会导致拓扑结构获取出现问题 - 加入主机到
net
中
将接收到packet_in
报文的mac
地址作为节点加入到net
中,并增加主机到交换机两者间的有向边。 - 使用
networkx.shortest_path()
求取最短路
- 过滤掉lldp包和ipv6包
2.1.3 测试截图
- 启动控制器
sudo ryu-manager short_path.py --observe-links
- 启动加载自定义拓扑文件
sudo mn --custom SDNexp3Topo.py --topo generated --controller remote
- 测试最短路
UCLA ping MIT
控制器端打印链路:
2.2 时延最短
2.2.1 实现思路
对ryu源码进行修改,增加计算获取链路时延功能,重新编译安装ryu,在安装目录下运行sudo python setup.py install
2.2.2 具体实现
在有向图上增加边的权重为时延,即可完成实验任务
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
src_dpid, src_port_no = switches.LLDPPacket.lldp_parse(msg.data)
if self.switches is None:
self.switches = app_manager.lookup_service_brick('switches')
for port in self.switches.ports.keys():
if src_dpid == port.dpid and src_port_no == port.port_no:
delay=self.switches.ports[port].delay*1000
print("src_switch:%s dst_switch:%s delay:%s"%(src_dpid,dpid,delay))
self.lldp_delay[(src_dpid,dpid)] = delay
self.net.add_edge(src_dpid,dpid,weight = delay)
self.net.add_edge(dpid,src_dpid,weight = delay)
2.2.3 测试截图
- 启动控制器
sudo ryu-manager min_delay.py --observe-links
- 启动加载自定义拓扑文件
sudo python SDNexp3Topo.py --controller remote
- 测试最小时延
UCLA ping MIT
控制器端打印链路时延:
3 代码
3.1 拓扑
# SDNexp3Topo.py
"""
Custom topology for Mininet, generated by GraphML-Topo-to-Mininet-Network-Generator.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class GeneratedTopo( Topo ):
"Internet Topology Zoo Specimen."
def __init__( self, **opts ):
"Create a topology."
# Initialize Topology
Topo.__init__( self, **opts )
# add nodes, switches first...
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
s8 = self.addSwitch( 's8' )
s9 = self.addSwitch( 's9' )
s10 = self.addSwitch( 's10' )
s11 = self.addSwitch( 's11' )
s12 = self.addSwitch( 's12' )
s13 = self.addSwitch( 's13' )
s14 = self.addSwitch( 's14' )
s15 = self.addSwitch( 's15' )
s16 = self.addSwitch( 's16' )
s17 = self.addSwitch( 's17' )
s18 = self.addSwitch( 's18' )
s19 = self.addSwitch( 's19' )
s20 = self.addSwitch( 's20' )
s21 = self.addSwitch( 's21' )
s22 = self.addSwitch( 's22' )
s23 = self.addSwitch( 's23' )
s24 = self.addSwitch( 's24' )
s25 = self.addSwitch( 's25' )
# ... and now hosts
h1 = self.addHost( 'ILLINOIS' )
h2 = self.addHost( 'MITRE' )
h3 = self.addHost( 'CARNEGIE' )
h4 = self.addHost( 'CASE' )
h5 = self.addHost( 'ETAC' )
h6 = self.addHost( 'AFGWC' )
h7 = self.addHost( 'BBN' )
h8 = self.addHost( 'NBS' )
h9 = self.addHost( 'Tinker' )
h10 = self.addHost( 'AMES' )
h11 = self.addHost( 'RADC' )
h12 = self.addHost( 'McClellan' )
h13 = self.addHost( 'RAND' )
h14 = self.addHost( 'AMES13' )
h15 = self.addHost( 'SDC' )
h16 = self.addHost( 'BBN15' )
h17 = self.addHost( 'HARVARD' )
h18 = self.addHost( 'SRI' )
h19 = self.addHost( 'UCSB' )
h20 = self.addHost( 'UCLA' )
h21 = self.addHost( 'Stanford' )
h22 = self.addHost( 'USC' )
h23 = self.addHost( 'UTAH' )
h24 = self.addHost( 'Lincoln' )
h25 = self.addHost( 'MIT' )
# add edges between switch and corresponding host
self.addLink( s1 , h1 )
self.addLink( s2 , h2 )
self.addLink( s3 , h3 )
self.addLink( s4 , h4 )
self.addLink( s5 , h5 )
self.addLink( s6 , h6 )
self.addLink( s7 , h7 )
self.addLink( s8 , h8 )
self.addLink( s9 , h9 )
self.addLink( s10 , h10 )
self.addLink( s11 , h11 )
self.addLink( s12 , h12 )
self.addLink( s13 , h13 )
self.addLink( s14 , h14 )
self.addLink( s15 , h15 )
self.addLink( s16 , h16 )
self.addLink( s17 , h17 )
self.addLink( s18 , h18 )
self.addLink( s19 , h19 )
self.addLink( s20 , h20 )
self.addLink( s21 , h21 )
self.addLink( s22 , h22 )
self.addLink( s23 , h23 )
self.addLink( s24 , h24 )
self.addLink( s25 , h25 )
# add edges between switches
self.addLink( s1 , s25, bw=10, delay='50ms')
self.addLink( s1 , s23, bw=10, delay='34ms')
self.addLink( s2 , s3, bw=10, delay='13ms')
self.addLink( s2 , s5, bw=10, delay='14ms')
self.addLink( s3 , s4, bw=10, delay='15ms')
self.addLink( s4 , s11, bw=10, delay='12ms')
self.addLink( s4 , s6, bw=10, delay='17ms')
self.addLink( s5 , s8, bw=10, delay='10ms')
self.addLink( s7 , s25, bw=10, delay='18ms')
self.addLink( s7 , s16, bw=10, delay='17ms')
self.addLink( s8 , s17, bw=10, delay='13ms')
self.addLink( s9 , s22, bw=10, delay='14ms')
self.addLink( s9 , s16, bw=10, delay='19ms')
self.addLink( s10 , s18, bw=10, delay='14ms')
self.addLink( s10 , s14, bw=10, delay='15ms')
self.addLink( s11 , s24, bw=10, delay='17ms')
self.addLink( s12 , s18, bw=10, delay='40ms')
self.addLink( s12 , s23, bw=10, delay='44ms')
self.addLink( s13 , s20, bw=10, delay='15ms')
self.addLink( s13 , s21, bw=10, delay='18ms')
self.addLink( s13 , s15, bw=10, delay='15ms')
self.addLink( s14 , s21, bw=10, delay='19ms')
self.addLink( s15 , s22, bw=10, delay='15ms')
self.addLink( s16 , s17, bw=10, delay='12ms')
self.addLink( s18 , s19, bw=10, delay='44ms')
self.addLink( s19 , s20, bw=10, delay='48ms')
self.addLink( s22 , s23, bw=10, delay='16ms')
self.addLink( s24 , s25, bw=10, delay='13ms')
topos = { 'generated': ( lambda: GeneratedTopo() ) }
# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
# the following code produces an executable script working with a remote controller
# and providing ssh access to the the mininet hosts from within the ubuntu vm
controller_ip = ''
def setupNetwork(controller_ip):
"Create network and run simple performance test"
# check if remote controller's ip was set
# else set it to localhost
topo = GeneratedTopo()
if controller_ip == '':
#controller_ip = '10.0.2.2';
controller_ip = '127.0.0.1'
net = Mininet(topo=topo, controller=lambda a: RemoteController( a, ip=controller_ip, port=6633 ), host=CPULimitedHost, link=TCLink)
return net
def connectToRootNS( network, switch, ip, prefixLen, routes ):
"Connect hosts to root namespace via switch. Starts network."
"network: Mininet() network object"
"switch: switch to connect to root namespace"
"ip: IP address for root namespace node"
"prefixLen: IP address prefix length (e.g. 8, 16, 24)"
"routes: host networks to route to"
# Create a node in root namespace and link to switch 0
root = Node( 'root', inNamespace=False )
intf = TCLink( root, switch ).intf1
root.setIP( ip, prefixLen, intf )
# Start network that now includes link to root namespace
network.start()
# Add routes from root ns to hosts
for route in routes:
root.cmd( 'route add -net ' + route + ' dev ' + str( intf ) )
def sshd( network, cmd='/usr/sbin/sshd', opts='-D' ):
"Start a network, connect it to root ns, and run sshd on all hosts."
switch = network.switches[ 0 ] # switch to use
ip = '10.123.123.1' # our IP address on host network
routes = [ '10.0.0.0/8' ] # host networks to route to
connectToRootNS( network, switch, ip, 8, routes )
for host in network.hosts:
host.cmd( cmd + ' ' + opts + '&' )
# DEBUGGING INFO
print()
print ("Dumping host connections")
dumpNodeConnections(network.hosts)
print()
print("*** Hosts are running sshd at the following addresses:")
print()
for host in network.hosts:
print(host.name, host.IP())
print()
print("*** Type 'exit' or control-D to shut down network")
print()
print("*** For testing network connectivity among the hosts, wait a bit for the controller to create all the routes, then do 'pingall' on the mininet console.")
print()
CLI( network )
for host in network.hosts:
host.cmd( 'kill %' + cmd )
network.stop()
# by zys
def start_network(network):
network.start()
# DEBUGGING INFO
print()
print("Dumping host connections")
dumpNodeConnections(network.hosts)
print
for host in network.hosts:
print(host.name, host.IP())
print()
print("*** Type 'exit' or control-D to shut down network")
print()
print("*** For testing network connectivity among the hosts, wait a bit for the controller to create all the routes, then do 'pingall' on the mininet console.")
print()
print("*** edited for xjtu sdn_exp_2020")
print()
CLI( network )
network.stop()
if __name__ == '__main__':
setLogLevel('info')
#setLogLevel('debug')
# sshd( setupNetwork(controller_ip) )
start_network(setupNetwork(controller_ip))
3.2 跳数最少
# short_path.py
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import arp
from ryu.lib import hub
from ryu.topology.api import get_all_host, get_all_link, get_all_switch
from ryu.topology import event, switches
import networkx as nx
import matplotlib.pyplot as plt
ETHERNET = ethernet.ethernet.__name__
ETHERNET_MULTICAST = "ff:ff:ff:ff:ff:ff" # broadcast mac
ARP = arp.arp.__name__
class ARP_PROXY_13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ARP_PROXY_13, self).__init__(*args, **kwargs)
self.mac_to_port = {}# {{dpid,dstmac}:port}
self.arp_table = {} # {ip:mac}
self.sw = {}# {(dpid,eth_src,arp_dst_ip):port}
self.net = nx.DiGraph()# nx graph
self.topo_thread = hub.spawn(self._get_topology)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=5, hard_timeout=15,
match=match, instructions=inst)
datapath.send_msg(mod)
def _get_topology(self):
while True:
#self.logger.info('\n')
hosts = get_all_host(self)
switch_list = get_all_switch(self)
links_list = get_all_link(self)
switches =[switch.dp.id for switch in switch_list]
self.net.add_nodes_from(switches) # add node
links=[(link.src.dpid,link.dst.dpid,{'port':link.src.port_no}) for link in links_list] # [src_dpid,dst_dpid,port:src_port]
self.net.add_edges_from(links) # add edge
hub.sleep(2) # get topo in 2 seconds
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
#print "packet in"
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
dpid = dp.id
parser = dp.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
# Filtering out lldp package and IPv6 package will lead to the problem of topology acquisition
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
return
if eth.ethertype == 34525:
return
header_list = dict(
(p.protocol_name, p)for p in pkt.protocols if type(p) != str)
if ARP in header_list:
self.arp_table[header_list[ARP].src_ip] = src # ARP learning
self.mac_to_port.setdefault(dpid, {})
#self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port # L2LearningSwitch
# add host to net
if src not in self.net:
self.net.add_node(src)
self.net.add_edge(dpid, src, port=in_port, weight=0)
self.net.add_edge(src, dpid, weight=0)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
if dst in self.net:
path = nx.shortest_path(self.net, src, dst, weight=0)
print("path : %s" % path)
if dpid in path:
next = path[path.index(dpid) + 1]
out_port = self.net[dpid][next]['port']
else:
if self.arp_handler(header_list, dp, in_port, msg.buffer_id):
# 1:reply or drop; 0: flood
print("ARP_PROXY_13")
return None
else:
out_port = ofp.OFPP_FLOOD
print('OFPP_FLOOD')
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_icn next time
if out_port != ofp.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(dp, 1, match, actions)
data = None
if msg.buffer_id == ofp.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=dp, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
dp.send_msg(out)
def arp_handler(self, header_list, datapath, in_port, msg_buffer_id):
header_list = header_list
datapath = datapath
in_port = in_port
if ETHERNET in header_list:
eth_dst = header_list[ETHERNET].dst
eth_src = header_list[ETHERNET].src
print(header_list)
'''
Using (dpid, srcmac, dstip) as the port corresponding to the key value record,
each switch records the ARP request when it receives the broadcast for the first time,
and discards the packet when it receives the ARP request with the same key value but different port next time,
so as to avoid flooding
'''
if eth_dst == ETHERNET_MULTICAST and ARP in header_list:
arp_dst_ip = header_list[ARP].dst_ip
# If (datapath. ID, eth_ src, arp_ dst_ IP) in sw, directly discard the packet and return true
if (datapath.id, eth_src, arp_dst_ip) in self.sw: # Break the loop
if self.sw[(datapath.id, eth_src, arp_dst_ip)] != in_port:
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath,
buffer_id=datapath.ofproto.OFP_NO_BUFFER,
in_port=in_port,
actions=[], data=None)
datapath.send_msg(out)
return True
else:
self.sw[(datapath.id, eth_src, arp_dst_ip)] = in_port
return False
3.3 时延最短
# min_delay.py
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import arp
from ryu.lib import hub
from ryu.topology.api import get_all_host, get_all_link, get_all_switch
from ryu.topology import event, switches
import networkx as nx
import matplotlib.pyplot as plt
ETHERNET = ethernet.ethernet.__name__ #
ETHERNET_MULTICAST = "ff:ff:ff:ff:ff:ff" # broadcast mac
ARP = arp.arp.__name__
class ARP_PROXY_13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ARP_PROXY_13, self).__init__(*args, **kwargs)
self.mac_to_port = {} # {{dpid,dstmac}:port}
self.arp_table = {} # {ip:mac}
self.sw = {} # {(dpid,eth_src,arp_dst_ip):port}
self.net = nx.DiGraph() # nx graph
self.lldp_delay={} # record the delay
self.switches = None
self.topo_thread = hub.spawn(self._get_topology)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=5, hard_timeout=15,
match=match, instructions=inst)
datapath.send_msg(mod)
def _get_topology(self):
while True:
#self.logger.info('\n')
hosts = get_all_host(self)
switch_list = get_all_switch(self)
links_list = get_all_link(self)
switches =[switch.dp.id for switch in switch_list]
self.net.add_nodes_from(switches) # add node
edges=[]
for link in links_list:
edges.append((link.src.dpid,link.dst.dpid,{'port':link.src.port_no}))
self.net.add_edges_from(edges) # add edge
hub.sleep(2) # get topo in 2 seconds
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
dpid = dp.id
parser = dp.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
sum_delay=0
# acculate delay
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
src_dpid, src_port_no = switches.LLDPPacket.lldp_parse(msg.data)
if self.switches is None:
self.switches = app_manager.lookup_service_brick('switches')
for port in self.switches.ports.keys():
if src_dpid == port.dpid and src_port_no == port.port_no:
delay=self.switches.ports[port].delay*1000
print("src_switch:%s dst_switch:%s delay:%s"%(src_dpid,dpid,delay))
self.lldp_delay[(src_dpid,dpid)] = delay
self.net.add_edge(src_dpid,dpid,weight = delay)
self.net.add_edge(dpid,src_dpid,weight = delay)
# Filtering out lldp package and IPv6 package will lead to the problem of topology acquisition
if eth.ethertype == ether_types.ETH_TYPE_LLDP or eth.ethertype == 34525:
return
header_list = dict(
(p.protocol_name, p)for p in pkt.protocols if type(p) != str)
if ARP in header_list:
self.arp_table[header_list[ARP].src_ip] = src # ARP learning
self.mac_to_port.setdefault(dpid, {})
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port # L2LearningSwitch
# add host to net
if src not in self.net:
self.net.add_node(src)
self.net.add_edge(dpid, src, port=in_port, weight=0)
self.net.add_edge(src, dpid, weight=0)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
if dst in self.net:
path = nx.shortest_path(self.net, src, dst, weight='weight')
print("path : %s" % path)
if dpid in path:
next = path[path.index(dpid) + 1]
out_port = self.net[dpid][next]['port']
for i in range(1, len(path)-1):
now_switch = path[i]
next_switch = path[i+1]
if i < len(path)-2:
print("src_switch:%s dst_switch:%s delay:%s"%(now_switch,next_switch,self.net[now_switch][next_switch]['weight']))
sum_delay = sum_delay + self.net[now_switch][next_switch]['weight']
print("sum_delay: %s" % sum_delay)
else:
if self.arp_handler(header_list, dp, in_port, msg.buffer_id):
# 1:reply or drop; 0: flood
print("ARP_PROXY_13")
return None
else:
out_port = ofp.OFPP_FLOOD
print('OFPP_FLOOD')
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_icn next time
if out_port != ofp.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(dp, 1, match, actions)
data = None
if msg.buffer_id == ofp.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=dp, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
dp.send_msg(out)
def arp_handler(self, header_list, datapath, in_port, msg_buffer_id):
'''
Using (dpid, srcmac, dstip) as the port corresponding to the key value record,
each switch records the ARP request when it receives the broadcast for the first time,
and discards the packet when it receives the ARP request with the same key value but different port next time,
so as to avoid flooding
'''
header_list = header_list
datapath = datapath
in_port = in_port
if ETHERNET in header_list:
eth_dst = header_list[ETHERNET].dst
eth_src = header_list[ETHERNET].src
print(header_list)
if eth_dst == ETHERNET_MULTICAST and ARP in header_list:
arp_dst_ip = header_list[ARP].dst_ip
# If (datapath. ID, eth_ src, arp_ dst_ IP) in sw, directly discard the packet and return true
if (datapath.id, eth_src, arp_dst_ip) in self.sw: # Break the loop
if self.sw[(datapath.id, eth_src, arp_dst_ip)] != in_port:
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath,
buffer_id=datapath.ofproto.OFP_NO_BUFFER,
in_port=in_port,
actions=[], data=None)
datapath.send_msg(out)
return True
else:
self.sw[(datapath.id, eth_src, arp_dst_ip)] = in_port
return False
4 问题整理
- 获取拓扑
- 问题描述
使用实验指导书中的获取拓扑代码,获取的拓扑是错误的。 - 解决方法
lldp包和ipv6包存在的情况会导致拓扑结构获取出现问题,_packet_in_handler()
中应过滤掉lldp包和ipv6包
- 问题描述
- 获取时延
- 问题描述
使用实验指导书中获取时延代码,但获取的时延是错误的。 - 解决方法
启动加载自定义拓扑文件更改为sudo python SDNexp3Topo.py --controller remote
- 问题描述