-Subproject commit 6dbb1cc423ddd36489fbe21b37b9583adf0a5c38
+Subproject commit 3253f871623a1d1f767923771a593a42b121df0f
--- /dev/null
+Virtual SDN over Raspberry Pi Cluster
+"""
+In this folder are the scripts the VSORC project is using, and also some text files
+that are used by some scripts.
+"""
--- /dev/null
+#!/bin/bash
+: '
+Written by: Oscar J. Rodriguez
+
+This is a filter used by precompiler.py
+'
+cat $1 | grep -E "[sh]+[0-9]{1,3}:[sh]+[0-9]{1,3}" > $1_clean
+cat $1 | grep -vE "[sh]+[0-9]{1,3}:[sh]+[0-9]{1,3}" > $1_dirt
+
--- /dev/null
+#!/usr/bin/python
+"""
+Written by: Felix G. Tejada
+
+This is the principal script to start the project.
+You can start different topology, but by default is starting anything from the precompiler
+
+"""
+"clusterGRE.py: Mininet Raspberry Pi Cluster"
+from mininet.examples.cluster import MininetCluster, SwitchBinPlacer, RemoteGRELink
+from mininet.topolib import TreeTopo
+from mininet.log import setLogLevel
+from mininet.examples.clustercli import ClusterCLI as CLI
+from mininet.node import RemoteController
+from topotest import MiTopo
+from precompiler import TopoFromCompiler
+
+def inicia():
+ IPs = '/home/pi/scripts/iplist'
+ c = RemoteController('c1', ip='192.168.25.10', port=6633)
+ servers = readIPs(IPs)
+ #servers = ['192.168.25.2', '192.168.25.3', '192.168.25.4','192.168.25.5','192.168.25.6']
+ topo1 = TreeTopo(depth=2, fanout=2) # Topologia basica
+ topo2 = MiTopo() # Topologia desde el archivo topotest
+ topo3 = TopoFromCompiler() # Topologia creada a partir del archivo data
+ topo4 = TreeTopo(depth=3, fanout=3) # Topologia grande en arbol
+ net = MininetCluster(topo=topo3, servers=servers, link=RemoteGRELink, placement=SwitchBinPlacer, controller=c)
+ net.start()
+ CLI(net)
+ net.stop()
+
+
+def readIPs(path):
+ file = open(path, "r") #abre el archivo en read
+ if file.mode == "r":
+ servers = file.read().splitlines() #lee el archivo y lo divide por lineas
+ return servers
+
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ inicia()
--- /dev/null
+#!/usr/bin/env bash
+
+# SSH authentication script for cluster edition
+# This script will create a single key pair, which is then
+# propagated throughout the entire cluster.
+# There are two options for setup; temporary setup
+# persistent setup. If no options are specified, and the script
+# is only given ip addresses or host names, it will default to
+# the temporary setup. An ssh directory is then created in
+# /tmp/mn/ssh on each node, and mounted with the keys over the
+# user's ssh directory. This setup can easily be torn down by running
+# clustersetup with the -c option.
+# If the -p option is used, the setup will be persistent. In this
+# case, the key pair will be be distributed directly to each node's
+# ssh directory, but will be called cluster_key. An option to
+# specify this key for use will be added to the config file in each
+# user's ssh directory.
+
+
+set -e
+num_options=0
+persistent=false
+showHelp=false
+clean=false
+declare -a hosts=()
+user=$(whoami)
+SSHDIR=/tmp/mn/ssh
+USERDIR=$HOME/.ssh
+usage="./clustersetup.sh [ -p|h|c ] [ host1 ] [ host2 ] ...\n
+ Authenticate yourself and other cluster nodes to each other
+ via ssh for mininet cluster edition. By default, we use a
+ temporary ssh setup. An ssh directory is mounted over
+ $USERDIR on each machine in the cluster.
+
+ -h: display this help
+ -p: create a persistent ssh setup. This will add
+ new ssh keys and known_hosts to each nodes
+ $USERDIR directory
+ -c: method to clean up a temporary ssh setup.
+ Any hosts taken as arguments will be cleaned
+ "
+
+persistentSetup() {
+ echo "***creating key pair"
+ ssh-keygen -t rsa -C "Cluster_Edition_Key" -f $USERDIR/cluster_key -N '' # &> /dev/null
+ cat $USERDIR/cluster_key.pub >> $USERDIR/authorized_keys
+ echo "***configuring ssh"
+ echo "IdentityFile $USERDIR/cluster_key" >> $USERDIR/config
+ echo "IdentityFile $USERDIR/id_rsa" >> $USERDIR/config
+
+ for host in $hosts; do
+ echo "***copying public key to $host"
+ ssh-copy-id -i $USERDIR/cluster_key.pub $user@$host &> /dev/null
+ echo "***copying key pair to remote host"
+ scp $USERDIR/cluster_key $user@$host:$USERDIR
+ scp $USERDIR/cluster_key.pub $user@$host:$USERDIR
+ echo "***configuring remote host"
+ ssh -o ForwardAgent=yes $user@$host "
+ echo 'IdentityFile $USERDIR/cluster_key' >> $USERDIR/config
+ echo 'IdentityFile $USERDIR/id_rsa' >> $USERDIR/config"
+ done
+
+ for host in $hosts; do
+ echo "***copying known_hosts to $host"
+ scp $USERDIR/known_hosts $user@$host:$USERDIR/cluster_known_hosts
+ ssh $user@$host "
+ cat $USERDIR/cluster_known_hosts >> $USERDIR/known_hosts
+ rm $USERDIR/cluster_known_hosts"
+ done
+}
+
+tempSetup() {
+
+ echo "***creating temporary ssh directory"
+ mkdir -p $SSHDIR
+ echo "***creating key pair"
+ ssh-keygen -t rsa -C "Cluster_Edition_Key" -f $SSHDIR/id_rsa -N '' &> /dev/null
+
+ echo "***mounting temporary ssh directory"
+ sudo mount --bind $SSHDIR $USERDIR
+ cp $SSHDIR/id_rsa.pub $SSHDIR/authorized_keys
+
+ for host in $hosts; do
+ echo "***copying public key to $host"
+ ssh-copy-id $user@$host &> /dev/null
+ echo "***mounting remote temporary ssh directory for $host"
+ ssh -o ForwardAgent=yes $user@$host "
+ mkdir -p $SSHDIR
+ cp $USERDIR/authorized_keys $SSHDIR/authorized_keys
+ sudo mount --bind $SSHDIR $USERDIR"
+ echo "***copying key pair to $host"
+ scp $SSHDIR/{id_rsa,id_rsa.pub} $user@$host:$SSHDIR
+ done
+
+ for host in $hosts; do
+ echo "***copying known_hosts to $host"
+ scp $SSHDIR/known_hosts $user@$host:$SSHDIR
+ done
+}
+
+cleanup() {
+
+ for host in $hosts; do
+ echo "***cleaning up $host"
+ ssh $user@$host "sudo umount $USERDIR
+ sudo rm -rf $SSHDIR"
+ done
+
+ echo "**unmounting local directories"
+ sudo umount $USERDIR
+ echo "***removing temporary ssh directory"
+ sudo rm -rf $SSHDIR
+ echo "done!"
+
+}
+
+
+if [ $# -eq 0 ]; then
+ echo "ERROR: No Arguments"
+ echo "$usage"
+ exit
+else
+ while getopts 'hpc' OPTION
+ do
+ ((num_options+=1))
+ case $OPTION in
+ h) showHelp=true;;
+ p) persistent=true;;
+ c) clean=true;;
+ ?) showHelp=true;;
+ esac
+ done
+ shift $(($OPTIND - 1))
+fi
+
+if [ "$num_options" -gt 1 ]; then
+ echo "ERROR: Too Many Options"
+ echo "$usage"
+ exit
+fi
+
+if $showHelp; then
+ echo "$usage"
+ exit
+fi
+
+for i in "$@"; do
+ output=$(getent ahostsv4 "$i")
+ if [ -z "$output" ]; then
+ echo '***WARNING: could not find hostname "$i"'
+ echo ""
+ else
+ hosts+="$i "
+ fi
+done
+
+if $clean; then
+ cleanup
+ exit
+fi
+
+echo "***authenticating to:"
+for host in $hosts; do
+ echo "$host"
+done
+
+echo
+
+if $persistent; then
+ echo '***Setting up persistent SSH configuration between all nodes'
+ persistentSetup
+ echo $'\n*** Sucessfully set up ssh throughout the cluster!'
+
+else
+ echo '*** Setting up temporary SSH configuration between all nodes'
+ tempSetup
+ echo $'\n***Finished temporary setup. When you are done with your cluster'
+ echo $' session, tear down the SSH connections with'
+ echo $' ./clustersetup.sh -c '$hosts''
+fi
+
+echo
--- /dev/null
+#!/bin/bash
+#sudo -E mn --controller=remote,ip=192.168.25.10 --custom ~/topotest.py --topo TopoMia --cluster $(awk 'BEGIN { ORS="" } { print p$0; p="," } END { print "\n" }' iplist) -v debug
+sudo -E mn --controller=remote,ip=192.168.25.10 --topo tree,2,2 --cluster $(awk 'BEGIN { ORS="" } { print p$0; p="," } END { print "\n" }' iplist) -v debug
+
--- /dev/null
+s1:h1
+s1:h2
+s2:s1
+s2:h3
+s3:s2
+s3:h4
+
--- /dev/null
+s1:h1
+s1:h2
+s2:s1
+s2:h3
+s3:s2
+s3:h4
--- /dev/null
+#!/bin/bash
+: '
+Written by: Oscar J. Rodriguez
+
+This is used to discover all the raspberry on the network, and save it in a iplist
+'
+#nmap -sn 192.168.18.83/25 | grep 192 > log.txt && awk '{print $5}' log.txt | grep 192 | tee log.txt
+#for HOST in $(cat log.txt ) ; do ssh $HOST "uname -a" ; done
+nmap -sn $1 > log.txt
+grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" log.txt > iplist
--- /dev/null
+#!/bin/bash
+cd /home/pi/ryu
+sudo ./bin/ryu-manager --observe-links /home/pi/flowmanager/flowmanager/flowmanager.py /home/pi/ryu/ryu/app/simple_switch_13.py
+#./ejecutarcontroller.sh > /dev/null 2>&1 & Para ejecutarlo en segundo plano
+
--- /dev/null
+#!/bin/bash
+sudo ./home/pi/clusterGRE.py > aichivo 2>&1
--- /dev/null
+#!/bin/bash
+: '
+Written by: Oscar J. Rodrigez
+
+This is used for have the ID of the raspberry, based to the ip
+'
+echo $(ifconfig | grep inet | grep -v inet6 | grep -v 127.0.0.7) | awk '{print $2}' | awk -F "." '{print $4}'
--- /dev/null
+192.168.25.2
+192.168.25.3
+192.168.25.4
+192.168.25.5
+192.168.25.6
+192.168.25.7
+192.168.25.8
+192.168.25.9
+192.168.25.11
+192.168.25.12
+192.168.25.13
+192.168.25.14
+192.168.25.15
+192.168.25.16
--- /dev/null
+
+Starting Nmap 7.40 ( https://nmap.org ) at 2019-11-14 23:07 GMT
+Nmap scan report for 192.168.25.1
+Host is up (0.0091s latency).
+Nmap scan report for 192.168.25.2
+Host is up (0.0030s latency).
+Nmap scan report for 192.168.25.3
+Host is up (0.0028s latency).
+Nmap scan report for 192.168.25.4
+Host is up (0.0027s latency).
+Nmap scan report for 192.168.25.5
+Host is up (0.0026s latency).
+Nmap scan report for 192.168.25.6
+Host is up (0.0024s latency).
+Nmap scan report for 192.168.25.7
+Host is up (0.0023s latency).
+Nmap scan report for 192.168.25.8
+Host is up (0.0058s latency).
+Nmap scan report for 192.168.25.9
+Host is up (0.0046s latency).
+Nmap scan report for 192.168.25.10
+Host is up (0.00073s latency).
+Nmap scan report for 192.168.25.11
+Host is up (0.0021s latency).
+Nmap scan report for 192.168.25.12
+Host is up (0.0018s latency).
+Nmap scan report for 192.168.25.13
+Host is up (0.0029s latency).
+Nmap scan report for 192.168.25.14
+Host is up (0.0027s latency).
+Nmap scan report for 192.168.25.15
+Host is up (0.0026s latency).
+Nmap scan report for 192.168.25.16
+Host is up (0.0025s latency).
+Nmap done: 128 IP addresses (16 hosts up) scanned in 1.93 seconds
--- /dev/null
+#!/bin/bash
+
+pscp -h iplist -A -l pi "$@"
--- /dev/null
+#!/bin/bash
+#Ingresar el comando por parametro y se ejecuta abriendo una sesion ssh
+#con las Ip del archivo iplist(todas las pi)
+pssh -h iplist -i -l pi "$@"
--- /dev/null
+#!/usr/bin/python
+"""
+Written by: Oscar J. Rodriguez and Felix G. Tejada
+
+This code is for precompile the VSORC language to Mininet topology API.
+VSORC language is a topology languaje, for example:
+
+s1:h1
+s2:h2
+s1:s2
+
+This create a simple two switch topology with two host each one.
+"""
+
+import sys
+import os
+import time
+from mininet.topo import Topo
+from mininet.log import setLogLevel, info
+
+links = []
+devices = []
+hosts = []
+switches = []
+#Lists
+
+#cmd = './cleaner.sh '+sys.argv[1]
+cmd2 = './cleaner.sh data'
+os.system(cmd2)
+time.sleep(.300)
+#document = open(sys.argv[1] + "_clean" ,"r+")
+document = open("data" + "_clean" ,"r+")
+links = document.readlines()
+document.close
+#"data" is the file with the topo vsorc script
+
+#clean the \n in the colected data
+a = 0
+for linkline in links:
+ links[a] = linkline.rstrip()
+ a+=1
+
+
+# get a list of non repeating devices
+for value in links:
+ value_split = value.split(':')
+ devices.append(value_split[0])
+ devices.append(value_split[1])
+devices = list(dict.fromkeys(devices))
+
+
+class TopoFromCompiler(Topo):
+#This class is for create the custom topology from the data collected.
+#Here we also process the data to make the topo
+ def build(self):
+ for device in devices:
+ if device.startswith("h"):
+ host = device
+ host = self.addHost(host) #Create a host with the data collected from the list
+ hosts.append(host)
+
+ elif device.startswith("s"):
+ switch = device
+ switch = self.addSwitch(switch) #Create a switch
+ switches.append(switch)
+
+ print ("Devices: " + str(devices) + "\n" + "Links: " + str(links) + "\n" + "Hosts: " + str(hosts) + "\n" + "Switches: " + str(switches) + "\n")
+
+ #Create links
+ for pair in links:
+ split = pair.split(":")
+ self.addLink(split[0],split[1])
+
--- /dev/null
+#!/bin/bash
+#Ingresar el comando por parametro y se ejecuta abriendo una sesion ssh
+#con las Ip del archivo iplist(todas las pi)
+pssh -h iplist -i -l pi "sudo reboot now"
+sudo reboot now
--- /dev/null
+watch -n 5 "(vcgencmd measure_temp && ./multissh.sh vcgencmd measure_temp) | grep temp"
+
--- /dev/null
+#!/bin/bash
+cd /home/pi && mkfifo fifo && touch aichivo
+cd /home/pi && cat fifo | sudo ./clusterGRE.py > aichivo 2>&1 &
+exec 3>fifo
+
+
--- /dev/null
+#!/usr/bin/python
+"""
+Crea una topologia en bucle.
+Este archivo es usado para crear una topologia donde se pueda mostrar STP
+"""
+from mininet.topo import Topo
+# from mininet.log import setLogLevel, info
+# from mininet.node import Controller, RemoteController, OVSController
+# from mininet.node import CPULimitedHost, Host, Node
+# from mininet.node import OVSKernelSwitch, UserSwitch
+# from mininet.node import IVSSwitch
+# from mininet.cli import CLI
+# from mininet.link import TCLink, Intf
+# from subprocess import call
+
+
+class MiTopo(Topo):
+
+ def build(self):
+ # Contructor de topologia custom
+ s1 = self.addSwitch('s1')
+ s2 = self.addSwitch('s2')
+ s3 = self.addSwitch('s3')
+ h1 = self.addHost('h1')
+ h2 = self.addHost('h2')
+ h3 = self.addHost('h3')
+
+ self.addLink(s1, s2)
+ self.addLink(s2, s3)
+ self.addLink(s3, s1)
+ self.addLink(s1, h1)
+ self.addLink(s2, h2)
+ self.addLink(s3, h3)
+ # info('llegue al final de mitopo\n')
+
+# topos = { 'TopoMia': ( lambda: MiTopo() ) }