documentation:examples:aggregating_multiple_isp_links
no way to compare when less than two revisions
Differences
This shows you the differences between two versions of the page.
| — | documentation:examples:aggregating_multiple_isp_links [2018/11/12 06:09] (current) – created - external edit 127.0.0.1 | ||
|---|---|---|---|
| Line 1: | Line 1: | ||
| + | ====== Aggregating multiple ISP links with ML PPP ====== | ||
| + | This lab shows an example of aggregating multiple independent ISP links with [[http:// | ||
| + | |||
| + | ===== Network diagram ==== | ||
| + | |||
| + | Here is the concept: | ||
| + | |||
| + | {{: | ||
| + | |||
| + | And here is this lab detailed diagram: | ||
| + | |||
| + | {{: | ||
| + | |||
| + | ===== Virtual Lab ===== | ||
| + | |||
| + | This chapter will describe how to start each routers and configuring the 3 central routers. | ||
| + | |||
| + | More information on these BSDRP lab scripts available on [[documentation: | ||
| + | |||
| + | Start the Virtual lab (example using bhyve): | ||
| + | |||
| + | < | ||
| + | # ./ | ||
| + | Setting-up a virtual lab with 6 VM(s): | ||
| + | - Working directory: /tmp/BSDRP | ||
| + | - Each VM has 1 core(s) and 512M RAM | ||
| + | - Emulated NIC: virtio-net | ||
| + | - Switch mode: bridge + tap | ||
| + | - 0 LAN(s) between all VM | ||
| + | - Full mesh Ethernet links between each VM | ||
| + | VM 1 has the following NIC: | ||
| + | - vtnet0 connected to VM 2 | ||
| + | - vtnet1 connected to VM 3 | ||
| + | - vtnet2 connected to VM 4 | ||
| + | - vtnet3 connected to VM 5 | ||
| + | - vtnet4 connected to VM 6 | ||
| + | VM 2 has the following NIC: | ||
| + | - vtnet0 connected to VM 1 | ||
| + | - vtnet1 connected to VM 3 | ||
| + | - vtnet2 connected to VM 4 | ||
| + | - vtnet3 connected to VM 5 | ||
| + | - vtnet4 connected to VM 6 | ||
| + | VM 3 has the following NIC: | ||
| + | - vtnet0 connected to VM 1 | ||
| + | - vtnet1 connected to VM 2 | ||
| + | - vtnet2 connected to VM 4 | ||
| + | - vtnet3 connected to VM 5 | ||
| + | - vtnet4 connected to VM 6 | ||
| + | VM 4 has the following NIC: | ||
| + | - vtnet0 connected to VM 1 | ||
| + | - vtnet1 connected to VM 2 | ||
| + | - vtnet2 connected to VM 3 | ||
| + | - vtnet3 connected to VM 5 | ||
| + | - vtnet4 connected to VM 6 | ||
| + | VM 5 has the following NIC: | ||
| + | - vtnet0 connected to VM 1 | ||
| + | - vtnet1 connected to VM 2 | ||
| + | - vtnet2 connected to VM 3 | ||
| + | - vtnet3 connected to VM 4 | ||
| + | - vtnet4 connected to VM 6 | ||
| + | VM 6 has the following NIC: | ||
| + | - vtnet0 connected to VM 1 | ||
| + | - vtnet1 connected to VM 2 | ||
| + | - vtnet2 connected to VM 3 | ||
| + | - vtnet3 connected to VM 4 | ||
| + | - vtnet4 connected to VM 5 | ||
| + | For connecting to VM' | ||
| + | - VM 1 : cu -l /dev/nmdm1B | ||
| + | - VM 2 : cu -l /dev/nmdm2B | ||
| + | - VM 4 : cu -l /dev/nmdm4B | ||
| + | - VM 3 : cu -l /dev/nmdm3B | ||
| + | - VM 5 : cu -l /dev/nmdm5B | ||
| + | - VM 6 : cu -l /dev/nmdm6B | ||
| + | </ | ||
| + | |||
| + | ==== Backbone routers configuration ==== | ||
| + | |||
| + | === Router 2 === | ||
| + | |||
| + | Router 2 is configured for rate-limiting traffic at 1 Mb/s on interface to/from R1. | ||
| + | |||
| + | < | ||
| + | sysrc hostname=R2 | ||
| + | sysrc ifconfig_vtnet0=" | ||
| + | sysrc ifconfig_vtnet3=" | ||
| + | sysrc defaultrouter=" | ||
| + | sysrc firewall_enable=YES | ||
| + | sysrc firewall_script="/ | ||
| + | |||
| + | cat > / | ||
| + | #!/bin/sh | ||
| + | fwcmd="/ | ||
| + | kldstat -q -m dummynet || kldload dummynet | ||
| + | # Flush out the list before we begin. | ||
| + | ${fwcmd} -f flush | ||
| + | ${fwcmd} pipe 10 config bw 1Mbit/s | ||
| + | ${fwcmd} pipe 20 config bw 1Mbit/s | ||
| + | #Traffic getting out vtnet0 is limited to 1Mbit/s | ||
| + | ${fwcmd} add 1000 pipe 10 all from any to any out via vtnet0 | ||
| + | #Traffic getting int vtnet0 is limited to 1Mbit/s | ||
| + | ${fwcmd} add 2000 pipe 20 all from any to any in via vtnet0 | ||
| + | #We don't want to block traffic, only shape some | ||
| + | ${fwcmd} add 3000 allow ip from any to any | ||
| + | EOF | ||
| + | |||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service ipfw start | ||
| + | hostname R2 | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | === Router 3 === | ||
| + | |||
| + | Router 3 is configured for rate-limiting traffic at 20 Mb/s on interface to/from R1. | ||
| + | |||
| + | < | ||
| + | sysrc hostname=R3 | ||
| + | sysrc ifconfig_vtnet0=" | ||
| + | sysrc ifconfig_vtnet3=" | ||
| + | sysrc defaultrouter=" | ||
| + | sysrc firewall_enable=YES | ||
| + | sysrc firewall_script="/ | ||
| + | |||
| + | cat > / | ||
| + | #!/bin/sh | ||
| + | fwcmd="/ | ||
| + | kldstat -q -m dummynet || kldload dummynet | ||
| + | # Flush out the list before we begin. | ||
| + | ${fwcmd} -f flush | ||
| + | ${fwcmd} pipe 10 config bw 2Mbit/s | ||
| + | ${fwcmd} pipe 20 config bw 2Mbit/s | ||
| + | #Traffic getting out vtnet0 is limited to 2Mbit/s | ||
| + | ${fwcmd} add 1000 pipe 10 all from any to any out via vtnet0 | ||
| + | #Traffic getting int vtnet0 is limited to 2Mbit/s | ||
| + | ${fwcmd} add 2000 pipe 20 all from any to any in via vtnet0 | ||
| + | #We don't want to block traffic, only shape some | ||
| + | ${fwcmd} add 3000 allow ip from any to any | ||
| + | EOF | ||
| + | |||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service ipfw start | ||
| + | hostname R3 | ||
| + | config save | ||
| + | |||
| + | </ | ||
| + | |||
| + | === Router 4 === | ||
| + | |||
| + | Router 4 is configured for rate-limiting traffic at 3 Mb/s on interface to/from R1. | ||
| + | |||
| + | < | ||
| + | sysrc hostname=R4 | ||
| + | sysrc ifconfig_vtnet0=" | ||
| + | sysrc ifconfig_vtnet3=" | ||
| + | sysrc defaultrouter=" | ||
| + | sysrc firewall_enable=YES | ||
| + | sysrc firewall_script="/ | ||
| + | |||
| + | cat > / | ||
| + | #!/bin/sh | ||
| + | fwcmd="/ | ||
| + | kldstat -q -m dummynet || kldload dummynet | ||
| + | # Flush out the list before we begin. | ||
| + | ${fwcmd} -f flush | ||
| + | ${fwcmd} pipe 10 config bw 3Mbit/s | ||
| + | ${fwcmd} pipe 20 config bw 3Mbit/s | ||
| + | #Traffic getting out vtnet0 is limited to 3Mbit/s | ||
| + | ${fwcmd} add 1000 pipe 10 all from any to any out via vtnet0 | ||
| + | #Traffic getting int vten0 is limited to 3Mbit/s | ||
| + | ${fwcmd} add 2000 pipe 20 all from any to any in via vtnet0 | ||
| + | #We don't want to block traffic, only shape some | ||
| + | ${fwcmd} add 3000 allow ip from any to any | ||
| + | EOF | ||
| + | |||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service ipfw start | ||
| + | hostname R4 | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | === Router 5 === | ||
| + | |||
| + | Router 5 is the MLPPP server default gateway. | ||
| + | |||
| + | < | ||
| + | sysrc hostname=R5 | ||
| + | sysrc ifconfig_vtnet1=" | ||
| + | sysrc ifconfig_vtnet2=" | ||
| + | sysrc ifconfig_vtnet3=" | ||
| + | sysrc ifconfig_vtnet4=" | ||
| + | sysrc static_routes=" | ||
| + | sysrc route_ISP1=" | ||
| + | sysrc route_ISP2=" | ||
| + | sysrc route_ISP3=" | ||
| + | service netif restart | ||
| + | service routing restart | ||
| + | hostname R5 | ||
| + | config save | ||
| + | </ | ||
| + | ==== Router 6 : L2TP MLPPP server ==== | ||
| + | |||
| + | Router 6 is configured as a L2TP server. | ||
| + | |||
| + | < | ||
| + | sysrc hostname=R6 | ||
| + | sysrc cloned_interfaces=" | ||
| + | sysrc ifconfig_lo1=" | ||
| + | sysrc ifconfig_vtnet4=" | ||
| + | sysrc ifconfig_vtnet4_alias1=" | ||
| + | sysrc ifconfig_vtnet4_alias2=" | ||
| + | sysrc defaultrouter=" | ||
| + | sysrc mpd_enable=YES | ||
| + | sysrc mpd_flags=" | ||
| + | cat > / | ||
| + | default: | ||
| + | load l2tp_server | ||
| + | l2tp_server: | ||
| + | # IP Pool | ||
| + | set ippool add pool1 10.0.16.10 10.0.16.100 | ||
| + | # Create bundle template named B | ||
| + | create bundle template B | ||
| + | # Enable IPv6 | ||
| + | set bundle enable ipv6cp | ||
| + | # Configure interface | ||
| + | set iface enable tcpmssfix | ||
| + | # Handle IPCP configuration | ||
| + | set ipcp yes vjcomp | ||
| + | # Handle the IPCP configuration | ||
| + | set ipcp ranges 10.0.16.1/ | ||
| + | # Create clonable link template named adsl1 | ||
| + | create link template L l2tp | ||
| + | set link action bundle B | ||
| + | set link enable multilink | ||
| + | set link keep-alive 10 30 | ||
| + | set link mtu 1460 | ||
| + | set l2tp secret blah | ||
| + | # SDSL1 | ||
| + | create link static sdsl1 L | ||
| + | set l2tp self 10.0.56.62 | ||
| + | # set DOWNLOAD bandwidth of ISP1 | ||
| + | set link bandwidth 1000000 | ||
| + | set link enable incoming | ||
| + | # SDSL2 | ||
| + | create link static sdsl2 L | ||
| + | set l2tp self 10.0.56.63 | ||
| + | # set DOWNLOAD bandwidth of ISP2 | ||
| + | set link bandwidth 2000000 | ||
| + | set link enable incoming | ||
| + | # SDSL3 | ||
| + | create link static sdsl3 L | ||
| + | set l2tp self 10.0.56.64 | ||
| + | # set DOWNLOAD bandwidth of ISP3 | ||
| + | set link bandwidth 3000000 | ||
| + | set link enable incoming | ||
| + | EOF | ||
| + | |||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service mpd5 start | ||
| + | hostname R6 | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | ==== Router 1 : L2TP MLPPP client ==== | ||
| + | |||
| + | Router 1 is configured as a simple L2TP MLPPP client router connected to 3 Internet link. | ||
| + | |||
| + | < | ||
| + | sysrc hostname=R1 | ||
| + | sysrc ifconfig_vtnet0=" | ||
| + | sysrc ifconfig_vtnet1=" | ||
| + | sysrc ifconfig_vtnet2=" | ||
| + | sysrc static_routes=" | ||
| + | sysrc route_ISP1=" | ||
| + | sysrc route_ISP2=" | ||
| + | sysrc route_ISP3=" | ||
| + | sysrc mpd_enable=YES | ||
| + | sysrc mpd_flags=" | ||
| + | cat > / | ||
| + | default: | ||
| + | load l2tp_client | ||
| + | l2tp_client: | ||
| + | # Create the bundle | ||
| + | create bundle template B | ||
| + | # Enable IPv6 | ||
| + | set bundle enable ipv6cp | ||
| + | # Enable TCP MSS fix | ||
| + | set iface enable tcpmssfix | ||
| + | # Use this interface as default route | ||
| + | set iface route default | ||
| + | # Disable IPCP configuration for the iperf test | ||
| + | #set ipcp yes vjcomp | ||
| + | # Create clonable template link ADSL1 | ||
| + | create link template L l2tp | ||
| + | set link action bundle B | ||
| + | set link enable multilink | ||
| + | set link keep-alive 10 30 | ||
| + | set link mtu 1460 | ||
| + | set l2tp secret blah | ||
| + | # Retry indefinitly to redial | ||
| + | set link max-redial 0 | ||
| + | # SDSL1 | ||
| + | create link static sdsl1 L | ||
| + | set l2tp peer 10.0.56.62 | ||
| + | # Configure the UPLOAD bandwidth | ||
| + | set link bandwidth 1000000 | ||
| + | open link | ||
| + | # SDSL2 | ||
| + | create link static sdsl2 L | ||
| + | set l2tp peer 10.0.56.63 | ||
| + | # Configure the UPLOAD bandwidth | ||
| + | set link bandwidth 2000000 | ||
| + | open link | ||
| + | # SDSL3 | ||
| + | create link static sdsl3 L | ||
| + | set l2tp peer 10.0.56.64 | ||
| + | # Configure the UPLOAD bandwidth | ||
| + | set link bandwidth 3000000 | ||
| + | open link | ||
| + | EOF | ||
| + | |||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service mpd5 start | ||
| + | hostname R1 | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | ===== Final testing ===== | ||
| + | |||
| + | ==== Each ISP link bandwidth ==== | ||
| + | |||
| + | Start iperf in server mode on R6: | ||
| + | < | ||
| + | [root@R6]~# iperf3 -s | ||
| + | ----------------------------------------------------------- | ||
| + | Server listening on 5201 | ||
| + | ----------------------------------------------------------- | ||
| + | </ | ||
| + | |||
| + | |||
| + | Now check the correct limited bandwitdh for each different links: | ||
| + | * Link to R6 across R2: 1Mb/s | ||
| + | * Link to R6 across R3: 2Mb/s | ||
| + | * Link to R6 across R4: 3Mb/s | ||
| + | |||
| + | < | ||
| + | [root@R1]~# iperf3 -i 0 -c 10.0.56.62 | ||
| + | Connecting to host 10.0.56.62, port 5201 | ||
| + | [ 5] local 10.0.12.1 port 30648 connected to 10.0.56.62 port 5201 | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | - - - - - - - - - - - - - - - - - - - - - - - - - | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | [ 5] | ||
| + | |||
| + | iperf Done. | ||
| + | [root@R1]~# iperf3 -i 0 -c 10.0.56.63 | ||
| + | Connecting to host 10.0.56.63, port 5201 | ||
| + | [ 5] local 10.0.13.1 port 13090 connected to 10.0.56.63 port 5201 | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | - - - - - - - - - - - - - - - - - - - - - - - - - | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | [ 5] | ||
| + | |||
| + | iperf Done. | ||
| + | [root@R1]~# iperf3 -i 0 -c 10.0.56.64 | ||
| + | Connecting to host 10.0.56.64, port 5201 | ||
| + | [ 5] local 10.0.14.1 port 57319 connected to 10.0.56.64 port 5201 | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | - - - - - - - - - - - - - - - - - - - - - - - - - | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | [ 5] | ||
| + | |||
| + | iperf Done. | ||
| + | |||
| + | </ | ||
| + | ==== Aggregated ISP link bandwidth ==== | ||
| + | |||
| + | The aggregated link bandwidth should be negotiated to 6Mb/s (1+2+3): | ||
| + | |||
| + | < | ||
| + | [root@R1]~# grep Bundle / | ||
| + | Nov 12 06:04:54 router ppp[87823]: [B-1] Bundle: Interface ng0 created | ||
| + | Nov 12 06:04:54 router ppp[87823]: [B-1] Bundle: Status update: up 1 link, total bandwidth 2000000 bps | ||
| + | Nov 12 06:04:54 router ppp[87823]: [B-1] Bundle: Status update: up 2 links, total bandwidth 3000000 bps | ||
| + | Nov 12 06:04:54 router ppp[87823]: [B-1] Bundle: Status update: up 3 links, total bandwidth 6000000 bps | ||
| + | </ | ||
| + | |||
| + | and iperf measurement close to 6Mb/s: | ||
| + | < | ||
| + | [root@R1]~# iperf3 -i 0 -c 10.6.6.6 | ||
| + | Connecting to host 10.6.6.6, port 5201 | ||
| + | [ 5] local 10.0.16.10 port 51350 connected to 10.6.6.6 port 5201 | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | - - - - - - - - - - - - - - - - - - - - - - - - - | ||
| + | [ ID] Interval | ||
| + | [ 5] | ||
| + | [ 5] | ||
| + | |||
| + | iperf Done. | ||
| + | </ | ||
| + | |||
| + | In the same time, if you start a " | ||
| + | |||
| + | |||
| + | |||
| + | |||
| + | ===== Performance lab ===== | ||
| + | |||
| + | This lab will test mpd5 performance by aggregating 2 gigabit links. | ||
| + | |||
| + | Here is the concept: | ||
| + | |||
| + | {{: | ||
| + | |||
| + | This lab use 3 [[IBM System x3550 M3]] with **quad** cores (Intel Xeon L5630 2.13GHz, hyper-threading disabled), quad NIC 82580 connected to the PCI-Express Bus and dual port Intel 10-Gigabit X540-AT2 connected to the PCI-Express Bus. | ||
| + | |||
| + | ==== Router 1 ==== | ||
| + | |||
| + | Router 1 is configured a a simple end-point. | ||
| + | |||
| + | Set the base parameters: | ||
| + | < | ||
| + | sysrc hostname=R1 | ||
| + | sysrc ifconfig_ix0=" | ||
| + | sysrc ifconfig_ix0_ipv6=" | ||
| + | sysrc defaultrouter=" | ||
| + | sysrc ipv6_defaultrouter=" | ||
| + | service netif restart | ||
| + | service routing restart | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | ==== Router 2 ==== | ||
| + | |||
| + | Router 2 is configured as a L2TP MLPPP client router. | ||
| + | |||
| + | Configure global parameters: | ||
| + | < | ||
| + | sysrc hostname=R2 | ||
| + | sysrc ifconfig_ix0=" | ||
| + | sysrc ifconfig_ix0_ipv6=" | ||
| + | sysrc ifconfig_igb2=" | ||
| + | sysrc ifconfig_igb3=" | ||
| + | sysrc mpd_enable=YES | ||
| + | sysrc mpd_flags=" | ||
| + | </ | ||
| + | |||
| + | Configure mpd: | ||
| + | |||
| + | < | ||
| + | cat > / | ||
| + | default: | ||
| + | load l2tp_client | ||
| + | l2tp_client: | ||
| + | # Create the bundle | ||
| + | create bundle template B | ||
| + | # Enable IPv6 | ||
| + | set bundle enable ipv6cp | ||
| + | # Disable compression (for iperf test) | ||
| + | #set bundle enable compression | ||
| + | #set ccp yes deflate | ||
| + | # Enable TCP MSS fix | ||
| + | set iface enable tcpmssfix | ||
| + | # Use this interface as default route | ||
| + | set iface route default | ||
| + | # Disable IPCP configuration for the iperf test | ||
| + | #set ipcp yes vjcomp | ||
| + | # Create clonable template link | ||
| + | create link template L l2tp | ||
| + | set link action bundle B | ||
| + | set link enable multilink | ||
| + | set link keep-alive 10 30 | ||
| + | set link mtu 1460 | ||
| + | set l2tp secret blah | ||
| + | set link max-redial 0 | ||
| + | # LINK1 | ||
| + | create link static link1 L | ||
| + | set l2tp peer 10.0.23.3 | ||
| + | open link | ||
| + | # LINK2 | ||
| + | create link static link2 L | ||
| + | set l2tp peer 10.1.23.3 | ||
| + | open link | ||
| + | ' | ||
| + | </ | ||
| + | |||
| + | And apply your changes: | ||
| + | |||
| + | < | ||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service mpd5 start | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | ==== Router 3 ==== | ||
| + | |||
| + | Router 3 is configured as a (L2TP server). | ||
| + | |||
| + | Set the global parameters: | ||
| + | < | ||
| + | sysrc hostname=R3 | ||
| + | sysrc cloned_interfaces=" | ||
| + | sysrc ifconfig_lo1=" | ||
| + | sysrc ifconfig_lo1_ipv6=" | ||
| + | sysrc ifconfig_igb2=" | ||
| + | sysrc ifconfig_igb3=" | ||
| + | sysrc mpd_enable=YES | ||
| + | sysrc mpd_flags=" | ||
| + | </ | ||
| + | |||
| + | Configure mpd5: | ||
| + | |||
| + | < | ||
| + | cat > / | ||
| + | default: | ||
| + | load l2tp_server | ||
| + | l2tp_server: | ||
| + | # IP Pool | ||
| + | set ippool add pool1 10.3.23.10 10.3.23.100 | ||
| + | # Create bundle template named B | ||
| + | create bundle template B | ||
| + | # Enable compression (disabled on the client) | ||
| + | set bundle enable compression | ||
| + | set ccp yes deflate | ||
| + | # Enable IPv6 | ||
| + | set bundle enable ipv6cp | ||
| + | # Configure interface | ||
| + | set iface enable tcpmssfix | ||
| + | # Handle IPCP configuration | ||
| + | set ipcp yes vjcomp | ||
| + | # Handle the IPCP configuration | ||
| + | set ipcp ranges 10.3.23.1/ | ||
| + | # Create clonable link template | ||
| + | create link template L l2tp | ||
| + | set link action bundle B | ||
| + | set link enable multilink | ||
| + | set link keep-alive 10 30 | ||
| + | set link mtu 1460 | ||
| + | set l2tp secret blah | ||
| + | # LINK1 | ||
| + | create link static link1 L | ||
| + | set l2tp self 10.0.23.3 | ||
| + | set link enable incoming | ||
| + | # LINK2 | ||
| + | create link static link2 L | ||
| + | set l2tp self 10.1.23.3 | ||
| + | set link enable incoming | ||
| + | ' | ||
| + | </ | ||
| + | |||
| + | if-up script (for installing routes to R1 subnet): | ||
| + | |||
| + | < | ||
| + | cat > / | ||
| + | #!/bin/sh | ||
| + | #mpd5 call script with options: | ||
| + | #interface proto local-ip remote-ip authname [ dns1 server-ip ] [ dns2 server-ip ] peer-address | ||
| + | #Examples | ||
| + | #command "/ | ||
| + | #command "/ | ||
| + | #mpd5 wait for 0 as successful | ||
| + | set -e | ||
| + | logger "$0 called with parameters: $@" | ||
| + | remote_inet=" | ||
| + | remote_inet6=" | ||
| + | eval " | ||
| + | if route get -net -\$2 \${remote_$2}; | ||
| + | logger \" | ||
| + | return 0 | ||
| + | else | ||
| + | cmd=\" | ||
| + | fi | ||
| + | " | ||
| + | if $cmd; then | ||
| + | logger "$0: $cmd successfull" | ||
| + | return 0 | ||
| + | else | ||
| + | logger "$0: $cmd failed" | ||
| + | return 1 | ||
| + | fi | ||
| + | ' | ||
| + | chmod +x / | ||
| + | </ | ||
| + | |||
| + | Then the if-down script: | ||
| + | |||
| + | < | ||
| + | cat > / | ||
| + | #!/bin/sh | ||
| + | #mpd5 call script with options: | ||
| + | #interface proto local-ip remote-ip authname peer-address | ||
| + | #example: | ||
| + | #command "/ | ||
| + | logger "$0 called with parameters: $@" | ||
| + | remote_inet=" | ||
| + | remote_net6=" | ||
| + | eval " | ||
| + | if ! route get -net -\$2 ${remote_$2}; | ||
| + | logger "Route ${remote_inet} not in table" | ||
| + | return 0 | ||
| + | else | ||
| + | cmd=\" | ||
| + | fi | ||
| + | " | ||
| + | if $cmd; then | ||
| + | logger " | ||
| + | return 0 | ||
| + | else | ||
| + | logger " | ||
| + | return 1 | ||
| + | fi | ||
| + | |||
| + | ' | ||
| + | chmod +x / | ||
| + | </ | ||
| + | And apply your changes: | ||
| + | |||
| + | < | ||
| + | service netif restart | ||
| + | service routing restart | ||
| + | service mpd5 start | ||
| + | config save | ||
| + | </ | ||
| + | |||
| + | ==== Performance tests ==== | ||
| + | |||
| + | === Checking perf tool === | ||
| + | |||
| + | == Direct tests between R1 and R2 === | ||
| + | |||
| + | < | ||
| + | [root@bsdrp1]~# | ||
| + | ------------------------------------------------------------ | ||
| + | Client connecting to 1.1.1.2, TCP port 5001 | ||
| + | TCP window size: 32.5 KByte (default) | ||
| + | ------------------------------------------------------------ | ||
| + | [ 3] local 1.1.1.1 port 57149 connected with 1.1.1.2 port 5001 | ||
| + | [ ID] Interval | ||
| + | [ 3] 0.0-60.0 sec 24.5 GBytes | ||
| + | </ | ||
| + | |||
| + | ==== Direct tests between R2 and R3 ==== | ||
| + | We start by testing each Gigabit links between R2 and R3 for measuring iperf value on standard gigabit link: | ||
| + | |||
| + | < | ||
| + | [root@R2]~# iperf -c 10.0.23.3 -t 60 | ||
| + | ------------------------------------------------------------ | ||
| + | Client connecting to 10.0.23.3, TCP port 5001 | ||
| + | TCP window size: 32.5 KByte (default) | ||
| + | ------------------------------------------------------------ | ||
| + | [ 3] local 10.0.23.2 port 21046 connected with 10.0.23.3 port 5001 | ||
| + | [ ID] Interval | ||
| + | [ 3] 0.0-60.0 sec 6.54 GBytes | ||
| + | [root@R2]~# iperf -c 10.1.23.3 -t 60 | ||
| + | ------------------------------------------------------------ | ||
| + | Client connecting to 10.1.23.3, TCP port 5001 | ||
| + | TCP window size: 32.5 KByte (default) | ||
| + | ------------------------------------------------------------ | ||
| + | [ 3] local 10.1.23.2 port 50717 connected with 10.1.23.3 port 5001 | ||
| + | [ ID] Interval | ||
| + | [ 3] 0.0-60.0 sec 6.55 GBytes | ||
| + | </ | ||
| + | ==== mpd5 perfs === | ||
| + | |||
| + | === between R2 and R3 == | ||
| + | |||
| + | Iperf will use the MPPP tunnel extremity: | ||
| + | < | ||
| + | [root@R2]~# set DEST=`ifconfig ng0 | grep 'inet ' | cut -d ' ' -f 4` | ||
| + | [root@R2]~# iperf -c $DEST -t 60 | ||
| + | ------------------------------------------------------------ | ||
| + | Client connecting to 10.3.23.1, TCP port 5001 | ||
| + | TCP window size: 32.5 KByte (default) | ||
| + | ------------------------------------------------------------ | ||
| + | [ 3] local 10.3.23.10 port 19383 connected with 10.3.23.1 port 5001 | ||
| + | [ ID] Interval | ||
| + | [ 3] 0.0-60.1 sec 6.14 GBytes | ||
| + | </ | ||
| + | |||
| + | |||
| + | |||
| + | The value is almost the same than without MLPPP aggregated link, but correctly load-balanced across each link. | ||
| + | iR2 stats during this test: | ||
| + | |||
| + | < | ||
| + | /0 / | ||
| + | Load Average | ||
| + | |||
| + | Interface | ||
| + | ng0 in | ||
| + | | ||
| + | |||
| + | | ||
| + | | ||
| + | |||
| + | | ||
| + | | ||
| + | </ | ||
| + | |||
| + | And the load show: | ||
| + | < | ||
| + | [root@R2]~# top -nCHSIzs1 | ||
| + | last pid: 14152; | ||
| + | 155 processes: 5 running, 99 sleeping, 51 waiting | ||
| + | |||
| + | Mem: 3564K Active, 26M Inact, 384M Wired, 256K Cache, 17M Buf, 15G Free | ||
| + | Swap: | ||
| + | |||
| + | |||
| + | PID USERNAME | ||
| + | 8524 root | ||
| + | 8524 root | ||
| + | 8524 root | ||
| + | 14149 root 36 0 32136K | ||
| + | 8524 root | ||
| + | 11 root | ||
| + | 11 root | ||
| + | 11 root | ||
| + | 11 root | ||
| + | 11 root | ||
| + | 0 root | ||
| + | </ | ||
| + | |||
| + | |||
| + | For information, | ||
| + | < | ||
| + | netblast $DEST 9090 1470 30 `sysctl -n hw.ncpu` | ||
| + | </ | ||
| + | |||
| + | and measure the bandwidth received on R3: | ||
| + | < | ||
| + | 0 pps 0.000 Mbps - 0 pkts in 0.522753682 ns | ||
| + | 2149 pps | ||
| + | 0 pps 0.000 Mbps - 0 pkts in 0.501999569 ns | ||
| + | 45 pps 0.539 Mbps - 23 pkts in 0.502000238 ns | ||
| + | 0 pps 0.000 Mbps - 0 pkts in 0.502000362 ns | ||
| + | 713 pps 8.387 Mbps - 358 pkts in 0.501999604 ns | ||
| + | 2107 pps | ||
| + | 0 pps 0.000 Mbps - 0 pkts in 0.501998712 ns | ||
| + | 0 pps 0.000 Mbps - 0 pkts in 0.501998967 ns | ||
| + | 21 pps 0.255 Mbps - 11 pkts in 0.508000385 ns | ||
| + | 0 pps 0.000 Mbps - 0 pkts in 0.501998785 ns | ||
| + | </ | ||
| + | |||
| + | The packet generator prevent to manage keepalive: | ||
| + | |||
| + | < | ||
| + | Jan 27 10:50:38 R2 ppp: [link1] LCP: no reply to 1 echo request(s) | ||
| + | Jan 27 10:50:38 R2 ppp: [link2] LCP: no reply to 1 echo request(s) | ||
| + | Jan 27 10:50:48 R2 ppp: [link2] LCP: no reply to 2 echo request(s) | ||
| + | Jan 27 10:50:48 R2 ppp: [link2] LCP: peer not responding to echo requests | ||
| + | Jan 27 10:50:48 R2 ppp: [link2] LCP: state change Opened --> Stopping | ||
| + | Jan 27 10:50:48 R2 ppp: [link2] Link: Leave bundle " | ||
| + | </ | ||
documentation/examples/aggregating_multiple_isp_links.txt · Last modified: 2018/11/12 06:09 by 127.0.0.1
