TF實戰丨使用Vagrant安裝Tungsten Fabric

TF中文社群發表於2020-06-19

本文為蘇寧網路架構師陳剛的原創文章。


01
準備測試機


在16G的筆記本沒跑起來,就乾脆拼湊了一臺遊戲工作室級別的機器:雙路E5-2860v3 CPU,24核48執行緒,128G DDR4 ECC記憶體,NVME盤 512G。在上面開5個VM,假裝是物理伺服器。


· 192.16.35.110  deployer

· 192.16.35.111  tf控制器

· 192.16.35.112  openstack伺服器,同時也是計算節點

· 192.16.35.113  k8s master

· 192.16.35.114  k8s的Node k01,同時也是ops的計算節點

 

直接使用vagrant拉映象會很慢,就先下載下來:


https://cloud.centos.org/centos/7/vagrant/x86_64/images/

 

下載對應的VirtualBox.box檔案。

 

然後使用命令, 命名為vagrant的box:

 

vagrant box add centos/7 CentOS-7-x86_64-Vagrant-2004_01.VirtualBox.box


    
    cat << EEOOFF > vagrantfile
    
    
    ### start 
    
    
    # -*- mode: ruby -*-
    
    
    # vi: set ft=ruby :
    
    Vagrant.require_version 
    ">=2.0.3"
    
    
    
    # All Vagrant configuration is done below. The "2" in Vagrant.configure # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing.
    ENV[ "LC_ALL"] = "en_US.UTF-8"
    VAGRANTFILE_API_VERSION = "2"
    Vagrant.configure( "2") do |config|   # The most common configuration options are documented and commented below.   # For a complete reference, please see the online documentation at   #
      # Every Vagrant development environment requires a box. You can search for   # boxes at
     config.vm.box = "geerlingguy/centos7"   # config.vbguest.auto_update = false   # config.vbguest.no_remote = true  
     config.vm.define "deployer" do | dp |    dp.vm.provider "virtualbox" do | v |      v.memory = "8000"      v.cpus = 2    end    dp.vm.network "private_network", ip: "192.16.35.110", auto_config: true    dp.vm.hostname = "deployer"  end
     config.vm.define "tf" do | tf |    tf.vm.provider "virtualbox" do | v |      v.memory = "64000"      v.cpus = 16    end    tf.vm.network "private_network", ip: "192.16.35.111", auto_config: true    tf.vm.hostname = "tf"  end
     config.vm.define "ops" do | os |    os.vm.provider "virtualbox" do | v |      v.memory = "16000"      v.cpus = 4    end    os.vm.network "private_network",ip: "192.16.35.112",  auto_config: true    os.vm.hostname = "ops"  end
     config.vm.define "k8s" do | k8 |    k8.vm.provider "virtualbox" do | v |      v.memory = "8000"      v.cpus = 2    end    k8.vm.network "private_network", ip: "192.16.35.113", auto_config: true    k8.vm.hostname = "k8s"  end
     config.vm.define "k01" do | k1 |    k1.vm.provider "virtualbox" do | v |      v.memory = "4000"      v.cpus = 2    end    k1.vm.network "private_network", ip: "192.16.35.114", auto_config: true    k1.vm.hostname = "k01"  end
     config.vm.provision "shell", privileged: true, path: "./setup.sh"
    end

    EEOOFF
    cat << EEOOFF > setup.sh #!/bin/bash # # Setup vagrant vms. #
    set -eu
    # Copy hosts info cat <<EOF > /etc/hosts 127.0.0.1 localhost 127.0.1.1 vagrant.vm vagrant
    192.16.35.110 deployer 192.16.35.111 tf 192.16.35.112 ops 192.16.35.113 k8s 192.16.35.114 k01

    # The following lines are desirable for IPv6 capable hosts ::1     localhost ip6-localhost ip6-loopback ff02::1 ip6-allnodes ff02::2 ip6-allrouters EOF
    systemctl stop firewalld systemctl disable firewalld iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat iptables -P FORWARD ACCEPT
    swapoff -a sed -i 's/.*swap.*/#&/' /etc/fstab # swapoff -a && sysctl -w vm.swappiness=0
    # setenforce  0 sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config  
    # modprobe ip_vs_rr modprobe br_netfilter
    yum -y update
    # sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory # sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory # yum install -y bridge-utils.x86_64 # modprobe bridge # modprobe br_netfilter # Setup system vars
    yum install -y epel-release yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools vim chrony python python-setuptools python-pip iproute lrzsz tree git
    yum install -y libguestfs-tools libvirt-python virt-install libvirt ansible
    pip install wheel --upgrade -i pip install pip --upgrade -i pip install ansible  netaddr --upgrade -i
    # python-urllib3 should be installed before "pip install requests" # if install failed, pip uninstall urllib3, then reinstall python-urllib3 # pip uninstall -y urllib3 | true # yum install -y python-urllib3 pip install requests -i
    systemctl disable libvirtd.service systemctl disable dnsmasq systemctl stop libvirtd.service systemctl stop dnsmasq
    if [  -d "/root/.ssh" ]; then      rm -rf /root/.ssh fi
    ssh-keygen -q -t rsa -N "" -f ~/.ssh/id_rsa
    cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys chmod go-rwx ~/.ssh/authorized_keys
    # # timedatectl set-timezone Asia/Shanghai
    if [ -f "/etc/chrony.conf" ]; then   mv /etc/chrony.conf /etc/chrony.conf.bak fi
    cat <<EOF > /etc/chrony.conf      allow 192.16.35.0/24      server ntp1.aliyun.com iburst       local stratum 10      logdir /var/ log/chrony      rtcsync      makestep 1.0 3      driftfile /var/lib/chrony/drift EOF
    systemctl restart chronyd.service systemctl enable chronyd.service
    echo "* soft nofile 65536" >> /etc/security/limits.conf echo "* hard nofile 65536" >> /etc/security/limits.conf echo "* soft nproc 65536"  >> /etc/security/limits.conf echo "* hard nproc 65536"  >> /etc/security/limits.conf echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf echo "* hard memlock  unlimited"  >> /etc/security/limits.conf
    if [ ! -d "/var/log/journal" ]; then  mkdir /var/ log/journal fi
    if [ ! -d "/etc/systemd/journald.conf.d" ]; then  mkdir /etc/systemd/journald.conf.d fi
    cat <<EOF > /etc/systemd/journald.conf.d/99-prophet.conf [Journal] Storage=persistent
    Compress=yes
    SyncIntervalSec=5m RateLimitInterval=30s RateLimitBurst=1000
    SystemMaxUse=10G
    SystemMaxFileSize=200M
    ForwardToSyslog=no
    EOF
    systemctl restart systemd-journald

    EEOOFF


    02
    在所有的節點上安裝docker


    CentOS


    例如: 如果pip安裝軟體的速度很慢,可以考慮使用基於aliyun的pip加速


    · 各個節點設定pip加速

      mkdir .pip && tee ~/.pip/pip.conf <<-'EOF'[global]trusted-host =  mirrors.aliyun.comindex-url = 


      注意requests包不能在urllib3之後安裝,否則會出錯:

        pip uninstall urllib3pip uninstall chardetpip install requests


        (這些命令應該都已經在 setup.sh 中執行過了)

          yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools iproute lrzsz tree gityum-config-manager   --add-repo   


          03
          拉取並啟動Contrail-Kolla-Ansible-Deployer容器


          容器的Nightly builds 可以從這裡訪問:  Docker Hub



          例如:

            
            vim /etc/docker/daemon.json
            
            { "registry-mirrors" : [ "
            
                " ] }
            
            systemctl restart docker
            
            
            
            export CAD_IMAGE=opencontrailnightly/contrail-kolla-ansible-deployer:master-latest docker run -td --net host --name contrail_kolla_ansible_deployer $CAD_IMAGE


            04
              將配置檔案複製到容器


            instance.yaml: 用於配置Tungsten Fabric叢集的模板檔案。



            要獲得有關如何配置該檔案中所有可用引數的資訊,可閱讀這裡:


             

              
              
              cat 
              << EOF > instances.yaml
              
              
              provider_config:  
              bms:    ssh_pwd: vagrant    ssh_user: root    ntpserver: ntp1.aliyun.com    domainsuffix: localinstances:  tf:    provider: bms    ip: 192.16.35.111    roles:      config_database:      config:      control:      analytics_database:      analytics:      webui:  ops:    provider: bms    ip: 192.16.35.112    roles:
              
                    
              openstack:
              
                    
              openstack_compute:  
              
                    
              vrouter:
              
                      
              PHYSICAL_INTERFACE: 
              enp0s8
              
                
              k8s:    
              provider: bms    ip: 192.16.35.113    roles:
              
                    
              k8s_master:
              
                    
              k8s_node:
              
                    
              kubemanager:
              
                    
              vrouter:
              
                      
              PHYSICAL_INTERFACE: 
              enp0s8
              
                
              k01:    
              provider: bms    ip: 192.16.35.114    roles:
              
                    
              openstack_compute:
              
                    
              k8s_node:
              
                    
              vrouter:
              
                      
              PHYSICAL_INTERFACE: 
              enp0s8
              
              
              contrail_configuration:  
              AUTH_MODE: keystone  KEYSTONE_AUTH_URL_VERSION: /v3
              
                
              KEYSTONE_AUTH_ADMIN_PASSWORD: 
              vagrant
              
                
              CLOUD_ORCHESTRATOR: 
              openstack
              
                
              CONTRAIL_VERSION: 
              latest
              
                
              UPGRADE_KERNEL: 
              true
              
                
              ENCAP_PRIORITY: 
              "VXLAN,MPLSoUDP,MPLSoGRE"
              
                
              PHYSICAL_INTERFACE: 
              enp0s8
              
              
              global_configuration:
              
                
              CONTAINER_REGISTRY: 
              opencontrailnightly
              
              
              kolla_config:
              
                
              kolla_globals:    
              enable_haproxy: no    enable_ironic: "no"    enable_swift: "no"
              
                  
              network_interface: 
              "enp0s8"
              
                
              kolla_passwords:    
              keystone_admin_password: vagrant
              
              
              
              EOF
              export INSTANCES_FILE=instances.yaml docker cp $INSTANCES_FILE contrail_kolla_ansible_deployer:/root/contrail-ansible-deployer/config/instances.yaml

              05
              準備好所有節點的環境


              除了deployer,我在所有節點上都做了一遍。


              正常的做法是建個自己的repository放各種image,實驗環境節點少,直接國內下載也很快的。

               

              注意python和python-py這兩個包是衝突的,只能安裝其中之一,最好先全解除安裝,再安裝其中一個:

                
                pip uninstall docker-py docker
                
                 pip install python
                
                
                
                yum -y install python-devel python-subprocess32 python-setuptools python-pip
                pip install --upgrade pip
                find / -name *subpro*.egg-info find / -name *subpro*.egg-info |xargs rm -rf
                pip install -I sixpip install -I docker-compose


                將k8s  repository改成阿里的,預設的Google源太慢或不通:vi

                playbooks/roles/k8s/tasks/RedHat.yml

                  yum_repository:name: Kubernetesdescription: k8s repobaseurl: 


                  playbook中安裝這些需要訪問海外網站,可以從國內下載,然後改個tag:

                    k8s.gcr.io/kube-apiserver:v1.14.8k8s.gcr.io/kube-controller-manager:v1.14.8k8s.gcr.io/kube-scheduler:v1.14.8k8s.gcr.io/kube-proxy:v1.14.8k8s.gcr.io/pause:3.1k8s.gcr.io/etcd:3.3.10k8s.gcr.io/coredns:1.3.1


                    換個方法變通處理一下

                      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.14.8docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.14.8docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.14.8docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.14.8docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10docker pull coredns/coredns:1.3.1docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.8.3


                      再重新給下載的打個tag

                        docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.14.8 k8s.gcr.io/kube-apiserver:v1.14.8docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.14.8 k8s.gcr.io/kube-controller-manager:v1.14.8docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.14.8 k8s.gcr.io/kube-scheduler:v1.14.8docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.14.8 k8s.gcr.io/kube-proxy:v1.14.8docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10docker tag docker.io/coredns/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.8.3  k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3


                        06
                        啟動deployer容器,進入其中進行部署


                          docker start contrail_kolla_ansible_deployer


                          進入deployer容器:

                            
                            docker exec -it contrail_kolla_ansible_deployer bashcd /root/contrail-ansible-deployer
                            
                            ansible-playbook -i inventory/ -e orchestrator=openstack playbooks/provision_instances.yml
                            
                            ansible-playbook -i inventory/ -e orchestrator=openstack playbooks/configure_instances.yml
                            
                            ansible-playbook -i inventory/ -e orchestrator=openstack playbooks/install_openstack.yml
                            
                            ansible-playbook -i inventory/ -e orchestrator=openstack playbooks/install_k8s.yml
                            
                            ansible-playbook -i inventory/ -e orchestrator=openstack playbooks/install_contrail.yml
                            
                            
                            

                            kubectl taint nodes k8s node-role.kubernetes.io/master-


                            最後一次kubelet升級到最新,遇到CSI的bug,修改一下配置檔案後重啟kubelet即可:

                              After experiencing the same issue, editing /var/lib/kubelet/config.yaml to add:featureGates:  CSIMigration: false

                              07
                              安裝完成後,建2個VM和容器測試一下


                                
                                yum install -y gcc python-devel
                                
                                pip install python-openstackclient
                                
                                pip install python-ironicclient
                                
                                
                                
                                source /etc/kolla/kolla-toolbox/admin-openrc.sh


                                如果openstack命令有如下“queue”的報錯,是需要python3:

                                  File "/usr/lib/python2.7/site-packages/openstack/utils.py", line 13, in <module>    import queueImportError: No module named queue


                                    
                                    rm -f /usr/bin/python
                                    
                                    ln -s /usr/bin/python3 /usr/bin/python
                                    
                                    pip install python-openstackclient
                                    
                                    pip install python-ironicclient
                                    
                                    yum install -y python3-pip
                                    
                                    
                                    
                                    yum install -y gcc python-devel wgetpip install --upgrade setuptoolspip install --ignore-installed python-openstackclient
                                    我每次都需要python3,所以乾脆也安裝了這個: pip3 install python-openstackclient -i pip3 install python-ironicclient -i


                                    進入Tungsten Fabric,用瀏覽器:


                                    進入openstack,用瀏覽器:

                                     

                                    在k8s master上( 192.16.35.113 ):

                                      scp root@192.16.35.114:/opt/cni/bin/contrail-k8s-cni /opt/cni/bin/mkdir /etc/cni/net.dscp root@192.16.35.114:/etc/cni/net.d/10-contrail.conf /etc/cni/net.d/10-contrail.conf


                                      wget 

                                      https://github.com/cirros-dev/cirros/releases/download/0.4.0/cirros-0.4.0-x86_64-disk.img


                                      官方下載地址 

                                       

                                      curl -O 

                                       

                                      wget 


                                      wget 

                                       

                                      (都沒有找到帶tcpdump的版本)

                                       

                                      reboot

                                      source /etc/kolla/kolla-toolbox/admin-openrc.sh

                                        openstack image create cirros --disk-format qcow2 --public --container-format bare --file cirros-0.4.0-x86_64-disk.imgnova flavor-create m1.tiny auto 512 1 1openstack network create net1openstack subnet create --subnet-range 10.1.1.0/24 --network net1 mysubnet1NET_ID=`openstack network list | grep net1 | awk -F '|' '{print $2}' | tr -d ' '` nova boot --image cirros --flavor m1.tiny --nic net-id=${NET_ID} VM1nova boot --image cirros --flavor m1.tiny --nic net-id=${NET_ID} VM2


                                        進入k8s_master, 192.16.35.113:

                                          yum install -y gitgit clone https://github.com/virtualhops/k8s-demokubectl create -f k8s-demo/po-ubuntuapp.ymlkubectl create -f k8s-demo/rc-frontend.ymlkubectl expose rc/frontendkubectl exec -it ubuntuapp curl frontend # many times


                                          參考方案:

                                          %5B-Container-Workflow%5D-Deploying-Contrail-with-OpenStack



                                          推薦閱讀

                                          Tungsten Fabric實戰:對接vMX虛擬路由平臺填坑

                                          Tungsten Fabric實戰:基於K8s的部署踩坑

                                          TF實戰Q&A丨你不理解透,出了問題都不知道怎麼弄

                                          TF 實戰Q&A丨只在此網中,雲深不知處





                                          來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/69957171/viewspace-2699332/,如需轉載,請註明出處,否則將追究法律責任。

                                          相關文章