본문 바로가기

Openstack Ansible

Openstack-Ansible Ceph 기반 설치

위 내용 기반으로 설치

 

deploy node

/etc/opesntack_deploy/env.d/ceph.yml

---

container_skel:
  ceph-mon_container:
    properties:
      is_metal: true
  ceph-rgw_container:
    properties:
      is_metal: true
  ceph-nfs_container:
    properties:
      is_metal: true
  ceph-mds_container:
    properties:
      is_metal: true

 

/etc/openstack_deploy/user_variable.yml

---
debug: true
ssh_delay: 10

#lxc_cache_prep_timeout: 3000

openstack_service_publicuri_proto: http
openstack_external_ssl: false
haproxy_ssl: true
rabbitmq_use_ssl: false

horizon_images_upload_mode: legacy

haproxy_keepalived_external_vip_cidr: "192.168.130.11/24"
haproxy_keepalived_internal_vip_cidr: "172.28.236.11/22"
haproxy_keepalived_external_interface: enp1s0 # my interface name
haproxy_keepalived_internal_interface: br-mgmt

neutron_plugin_base:
  - router

openstack_host_specific_kernel_modules:
  - name: "openvswitch"
    pattern: "CONFIG_OPENVSWITCH="
    group: "network_hosts"

neutron_plugin_type: ml2.ovs.dvr
neutron_l2_population: true
neutron_tunnel_types:  vxlan

neutron_provider_networks:
  network_flat_networks: "*"
  network_types: "vxlan, flat, vlan"
  network_vxlan_ranges: "10001:20000"
  network_mappings: "public:br-ex" # 21.a에서 설정한 네트워크 이름과 매핑
  network_interface_mappings: "br-ex:enp6s0"

cidr_networks:
  container: 172.28.236.0/22
  storage: 172.28.244.0/22
  tunnel: 172.28.240.0/22

generate_fsid: false
fsid: 2f09a698-e57f-4266-867d-24885aa6eab5

monitor_address_block: "{{ cidr_networks.container  }}"
public_network: "{{ cidr_networks.container  }}"
cluster_network: "{{ cidr_networks.storage  }}"

openstack_config: true

cinder_ceph_client: cinder
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
nova_libvirt_images_rbd_pool: vms

cinder_default_volume_type: RBD

cinder_backends:
  RBD:
    volume_driver: cinder.volume.drivers.rbd.RBDDriver
    rbd_pool: volumes
    rbd_ceph_conf: /etc/ceph/ceph.conf
    rbd_store_chunk_size: 8
    volume_backend_name: rbddriver
    rbd_user: "{{ cinder_ceph_client  }}"
    rbd_secret_uuid: "{{ cinder_ceph_client_uuid  }}"
    report_discard_supported: true


common_single_host_mode: true
osd_pool_default_size: 1
osd_pool_default_pg_num: 8
osd_pool_default_pgp_num: 8

openstack_service_setup_host: "{{ groups['utility_all'][0] }}"
openstack_service_setup_host_python_interpreter: "/openstack/venvs/utility-{{ openstack_release }}/bin/python"

neutron_ml2_drivers_type: "local,flat,vlan,vxlan"
horizon_network_provider_types: ['local', 'flat', 'vxlan', 'geneve', 'vlan']

 

/etc/openstack_deploy/user_variable/opestack_user_config.yml

---
cidr_networks:
  container: 172.28.236.0/22 # MGMT
  tunnel: 172.28.240.0/22 # VXLAN
  storage: 172.28.244.0/22

used_ips:
  - "172.28.236.1,172.28.236.50"
  - "172.28.240.1,172.28.240.50"
  - "172.28.244.1,172.28.244.50"

global_overrides:
  # The internal and external VIP should be different IPs, however they
  # do not need to be on separate networks.
  external_lb_vip_address: 192.168.130.11
  internal_lb_vip_address: 172.28.236.11
  management_bridge: "br-mgmt"
  provider_networks:
    - network:
        container_bridge: "br-mgmt"
        container_type: "veth"
        container_interface: "eth1"
        ip_from_q: "container"
        type: "raw"
        group_binds:
          - all_containers
          - hosts
        is_container_address: true
    - network:
        container_bridge: "br-vxlan"
        container_type: "veth"
        container_interface: "eth10"
        ip_from_q: "tunnel"
        type: "vxlan"
        range: "1:1000"
        net_name: "vxlan"
        group_binds:
          - neutron_openvswitch_agent
    - network:
        container_bridge: "br-ex"
        container_type: "veth"
        container_interface: "eth12"
        host_bind_override: "eth12"
        type: "flat"
        net_name: "physnet"
        group_binds:
          - neutron_openvswitch_agent
    - network:
        container_bridge: "br-storage"
        container_type: "veth"
        container_interface: "eth2"
        ip_from_q: "storage"
        type: "raw"
        group_binds:
          - glance_api
          - cinder_api
          - cinder_volume
          - nova_compute
          - ceph-osd
          - ceph-rgw

###
### Infrastructure
###

# galera, memcache, rabbitmq, utility
shared-infra_hosts:
  controller1:
    ip: 172.28.236.11

# repository (apt cache, python packages, etc)
repo-infra_hosts:
  controller1:
    ip: 172.28.236.11

# load balancer
haproxy_hosts:
  controller1:
    ip: 172.28.236.11

###
### OpenStack
###

# keystone
identity_hosts:
  controller1:
    ip: 172.28.236.11

# cinder api services
storage-infra_hosts:
  controller1:
    ip: 172.28.236.11

# glance
image_hosts:
  controller1:
    ip: 172.28.236.11

# placement
placement-infra_hosts:
  controller1:
    ip: 172.28.236.11

# nova api, conductor, etc services
compute-infra_hosts:
  controller1:
    ip: 172.28.236.11

# heat
orchestration_hosts:
  controller1:
    ip: 172.28.236.11

# horizon
dashboard_hosts:
  controller1:
    ip: 172.28.236.11

# neutron server, agents (L3, etc)
network_hosts:
  controller1:
    ip: 172.28.236.11

# nova hypervisors
compute_hosts:
  compute1:
    ip: 172.28.236.21

# cinder storage host (LVM-backed)
storage_hosts:
  controller1:
    ip: 172.28.236.11

ceph-mon_hosts:
  ceph1:
    ip: 172.28.236.31

# need to check about belows from 'ceph-rgw_hosts' to 'ceph-osd_hosts'.
# is it right a range about 'ceph-rgw_hosts'? and need to check configuration of 'ceph-osd_hosts'.
ceph-rgw_hosts:
  ceph1:
    ip: 172.28.236.31

ceph-osd_hosts:
  ceph1:
    ip: 172.28.236.31
    container_vars:
      devices:
        - /dev/vdd
        - /dev/vde
        - /dev/vdf

 

controller node

/etc/haproxy/haproxy.cfg

vi /etc/haproxy/haproxy.cfg
###### glance 변경
frontend glance_api-front-1
    mode tcp
frontend glance_api-front-2
    mode tcp
backend glance_api-back
#    mode http
    mode tcp
#    balance source
    balance leastconn
    
##### horizon 변경
frontend horizon-redirect-front-1
    bind 192.168.130.11:443
    option httplog
    option forwardfor except 127.0.0.0/8
    option http-server-close
    mode tcp
    http-request add-header          X-Forwarded-Proto https
    timeout client 600s
    timeout server 600s
    default_backend horizon-back


frontend horizon-redirect-front-2
    bind 172.28.236.11:443
    option httplog
    option forwardfor except 127.0.0.0/8
    option http-server-close
    mode tcp
    http-request add-header          X-Forwarded-Proto https
    timeout client 600s
    timeout server 600s
    default_backend horizon-back

backend horizon-back
    mode tcp
    balance source
    stick store-request src
    stick-table type ip size 256k expire 30m
    option forwardfor
    option ssl-hello-chk
    timeout client 600s
    timeout server 600s
    # 아래는 lxc-ls -f로 horizon 컨테이너 정보 찾아서 기입
    server controller_horizon_container-73de3042 172.28.239.120:443 check port 443 inter 12000 rise 1 fall 1