首页 > 其他分享 >HA-OpenStackYoga

HA-OpenStackYoga

时间:2023-05-24 22:56:02浏览次数:38  
标签:service nova etc conf openstack HA OpenStackYoga neutron

1.环境准备

1.1 服务器准备

主机名 IP
haproxy01 172.21.48.10
haproxy02 172.21.48.11
controller01 172.21.48.12
controller02 172.21.48.13
compute01 172.21.48.14
vip 172.21.48.100

1.2 配置hosts解析

  • 所有节点

cat >> /etc/hosts << EOF
172.21.48.10 haproxy01
172.21.48.11 haproxy02
172.21.48.12 controller01
172.21.48.13 controller02
172.21.48.14 compute01
172.21.48.100 openstack.vip.org
EOF

1.3 配置离线源

  • 所有节点

# 解压
tar zxvf openstackyoga.tar.gz -C /opt/

# 备份文件
cp /etc/apt/sources.list{,.bak}

# 配置离线源
cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/openstackyoga/debs/
EOF

# 清空缓存
apt clean all

# 加载源
apt update

2.Haproxy+Keepalived

  • haproxy01节点配置

  • 安装配置haproxy

apt install -y haproxy

vim /etc/haproxy/haproxy.cfg
global
   maxconn 100000
   stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
   #uid 99
   #gid 99
   user haproxy
   group haproxy
   daemon
   # nbproc 4
   # cpu-map 1 0
   # cpu-map 2 1
   # cpu-map 3 2
   # cpu-map 4 3
   pidfile /var/lib/haproxy/haproxy.pid
   log 127.0.0.1 local2 info

defaults
   option http-keep-alive
   option forwardfor
   maxconn 100000
   mode http
   timeout connect 300000ms
   timeout client 300000ms
   timeout server 300000ms

listen stats
   mode http
   bind 0.0.0.0:9999
   stats enable
   log global
   stats uri    /haproxy
   stats auth   admin:123456
  • 屏蔽vip地址检测
    • haproxy需要vip地址才能启动,这里不检测vip也能启动
vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1

sysctl -p
  • 安装配置keepalived(haproxy01节点)

apt install -y keepalived

vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
       [email protected]
   }
   notification_email_from [email protected]
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id haprxy1
   vrrp_skip_check_adv_addr
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance HA_openstack {
   state BACKUP
   interface eth0
   virtual_router_id 66
   priority 100
   advert_int 2
   nopreempt

   virtual_ipaddress {
      172.21.48.100/24 dev eth0 label eth0:1       
   }
}

# 重启服务生效
systemctl restart keepalived
  • 安装配置keepalived(haproxy02节点)

apt install -y keepalived

vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
       [email protected]
   }
   notification_email_from [email protected]
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id haprxy1
   vrrp_skip_check_adv_addr
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance HA_openstack {
   state BACKUP
   interface eth0
   virtual_router_id 66
   priority 80
   advert_int 2
   nopreempt

   virtual_ipaddress {
      172.21.48.100/24 dev eth0 label eth0:1       
   }
}

# 重启服务生效
systemctl restart keepalived

3.mariadb集群

3.1 mariadb集群部署

  • 所有节点安装

apt install -y mariadb-server
  • controller01节点

cd /etc/mysql/mariadb.conf.d/

# 配置启动文件
vim 50-server.cnf 
[client]
socket=/var/run/mysqld/mysql.sock
 
[mysqld]
server-id=1
datadir=/var/lib/mysql
socket=/var/run/mysqld/mysql.sock
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
binlog_expire_logs_seconds=604800
skip-external-locking
skip-name-resolve
max_connections=5000
connect_timeout=5
wait_timeout=600
max_allowed_packet=16M
thread_cache_size=128
sort_buffer_size=4M
bulk_insert_buffer_size=16M
tmp_table_size=32M
max_heap_table_size=32M
 
character-set-client-handshake = FALSE
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
init_connect='SET NAMES utf8mb4'




# 配置集群文件
vim 60-galera.cnf
[galera]
wsrep_causal_reads=ON
wsrep_provider_options="gcache.size=128M"
wsrep_certify_nonPK=ON
log-bin=/data/mariadb/binlog/mysql-bin
log_slave_updates=1
query_cache_size=0
wsrep_on=ON
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_cluster_name=MGC-Cluster
wsrep_cluster_address=gcomm://172.21.48.12,172.21.48.13,172.21.48.14
wsrep_node_name=controller01
wsrep_node_address=172.21.48.12
wsrep_sst_method=xtrabackup-v2
wsrep_sst_method=rsync
binlog_format=row  
default_storage_engine=InnoDB  
innodb_autoinc_lock_mode=2
wsrep_slave_threads=8
innodb_flush_log_at_trx_commit=0
innodb_buffer_pool_size=500M


# mariadb集群文件准备(mysql1节点)
mkdir -pv /data/mariadb/binlog/

chown -R mysql.mysql /data/


# 引导 GRC 集群
# ubuntu会安装启动服务,这里需要先停止服务
systemctl stop mariadb
galera_new_cluster


# 验证
mysql

show status like 'wsrep%';
  • controller02节点

# 拷贝集群文件
scp 172.21.48.12:/etc/mysql/mariadb.conf.d/50-server.cnf /etc/mysql/mariadb.conf.d/

scp 172.21.48.12:/etc/mysql/mariadb.conf.d/60-galera.cnf /etc/mysql/mariadb.conf.d/

# 准备集群文件
mkdir -pv /data/mariadb/binlog/

chown -R mysql.mysql /data/

# 编辑配置文件
sed -i "s/controller01/controller02/g" /etc/mysql/mariadb.conf.d/60-galera.cnf

sed -i "s/wsrep_node_address=172.21.48.12/wsrep_node_address=172.21.48.13/g" /etc/mysql/mariadb.conf.d/60-galera.cnf

# 启动数据库并验证
systemctl restart mariadb

# 验证
mysql

show status like 'wsrep%';
  • compute01节点

# 拷贝集群文件
scp 172.21.48.12:/etc/mysql/mariadb.conf.d/50-server.cnf /etc/mysql/mariadb.conf.d/

scp 172.21.48.12:/etc/mysql/mariadb.conf.d/60-galera.cnf /etc/mysql/mariadb.conf.d/

# 准备集群文件
mkdir -pv /data/mariadb/binlog/

chown -R mysql.mysql /data/

# 编辑配置文件
sed -i "s/controller01/compute01/g" /etc/mysql/mariadb.conf.d/60-galera.cnf

sed -i "s/wsrep_node_address=172.21.48.12/wsrep_node_address=172.21.48.14/g" /etc/mysql/mariadb.conf.d/60-galera.cnf

# 启动数据库并验证
systemctl restart mariadb

# 验证
mysql

show status like 'wsrep%';

3.2 全关机恢复集群操作

  • controller01节点
# 删除controller01上的集群缓存文件
cd /var/lib/mysql

rm -rf galera.cache

rm -rf grastate.dat

galera_new_cluster
  • 其它节点正常重启即可
systemctl restart mariadb

4.memcache

  • 所有OpenStack节点

# 安装服务
apt install -y memcached python3-memcache

# 配置文件信息
vim /etc/memcached.conf
35 -l 0.0.0.0

# 重启服务生效
service memcached restart

5.RabbitMQ

  • 所有OpenStack节点

apt install -y rabbitmq-server
  • controller01节点

rabbitmqctl add_user openstack 000000

rabbitmqctl set_permissions openstack ".*" ".*" ".*"
  • controller02节点

    • 忽略输出信息
scp 172.21.48.12:/var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/

systemctl restart rabbitmq-server.service

# 1.停止服务
rabbitmqctl stop_app
# 2.重置状态
rabbitmqctl reset
# 3.节点加入
rabbitmqctl join_cluster rabbit@data1
# 4.启动服务
rabbitmqctl start_app
  • compute01节点

    • 忽略输出信息
scp 172.21.48.12:/var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/

systemctl restart rabbitmq-server.service

# 1.停止服务
rabbitmqctl stop_app
# 2.重置状态
rabbitmqctl reset
# 3.节点加入
rabbitmqctl join_cluster rabbit@data1
# 4.启动服务
rabbitmqctl start_app

5.1 验证

  • 任意openstack节点
rabbitmqctl  cluster_status

6. 时间同步

  • 所有openstack节点

apt install -y chrony
  • controller01节点

# 配置文件
vim /etc/chrony/chrony.conf 
20 server controller01 iburst maxsources 2
61 allow all
63 local stratum 10

# 重启服务
systemctl restart chronyd
  • controller02节点

vim /etc/chrony/chrony.conf
server controller01 iburst maxsources 2

# 重启服务
systemctl restart chronyd
  • compute01节点

vim /etc/chrony/chrony.conf
server controller01 iburst maxsources 2

# 重启服务
systemctl restart chronyd

7.OpenStack客户端

  • 所有控制节点节点

apt install -y python3-openstackclient

8.数据代理配置

  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
'''
listen mariadb_cluster
   bind 172.21.48.100:3306
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:3306 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:3306 check inter 3000 fall 2 rise 5
   server compute03   172.21.48.14:3306 check inter 3000 fall 2 rise 5
listen rabbitmq_cluster
   bind 172.21.48.100:5672
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:5672 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:5672 check inter 3000 fall 2 rise 5
   server compute03   172.21.48.14:5672 check inter 3000 fall 2 rise 5
  • 重启服务生效
systemctl restart haproxy.service

9.keystone安装部署

  • controller01节点

  • 创建数据库与用户给予keystone使用

# 创建数据库
CREATE DATABASE keystone;

# 创建用户
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystoneang';
  • 安装服务
apt install -y keystone
  • 配置keystone文件
# 备份配置文件
cp /etc/keystone/keystone.conf{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf

vim /etc/keystone/keystone.conf
[DEFAULT]
log_dir = /var/log/keystone
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:[email protected]/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[extra_headers]
Distribution = Ubuntu
[federation]
[fernet_receipts]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[jwt_tokens]
[ldap]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[receipt]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[token]
provider = fernet
[tokenless_auth]
[totp]
[trust]
[unified_limit]
[wsgi]
  • 填充数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
  • 调用用户和组的密钥库
# 用户
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

# 组
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
  • 在Queens发布之前,keystone需要在两个单独的端口上运行,以容纳Identity v2 API,后者通常在端口35357上运行单独的仅限管理员的服务。随着v2 API的删除,keystones可以在所有接口的同一端口上运行5000
keystone-manage bootstrap --bootstrap-password 000000 --bootstrap-admin-url http://openstack.vip.org:5000/v3/ --bootstrap-internal-url http://openstack.vip.org:5000/v3/ --bootstrap-public-url http://openstack.vip.org:5000/v3/ --bootstrap-region-id RegionOne
  • 编辑/etc/apache2/apache2.conf文件并配置ServerName选项以引用控制器节点
echo "ServerName controller01" >> /etc/apache2/apache2.conf 
  • 重新启动Apache服务生效配置
service apache2 restart
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen keystone
   bind 172.21.48.100:5000
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:5000 check inter 3000 fall 2 rise 5

systemctl reload haproxy
  • controller01节点

  • 配置OpenStack认证环境变量

cat > /etc/keystone/admin-openrc.sh << EOF
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_AUTH_URL=http://openstack.vip.org:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
  • 加载环境变量
source /etc/keystone/admin-openrc.sh
  • 创建服务项目,后期组件将使用这个项目
openstack project create --domain default --description "Service Project" service

10.glance安装部署

  • controller01节点操作

  • 创建数据库与用户给予glance使用

# 创建数据库
CREATE DATABASE glance;

# 创建用户
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glanceang';
  • 创建glance浏览用户
openstack user create --domain default --password glance glance
  • 将管理员角色添加到浏览用户和服务项目
openstack role add --project service --user glance admin
  • 创建浏览服务实体
openstack service create --name glance --description "OpenStack Image" image
  • 创建镜像服务API端点
openstack endpoint create --region RegionOne image public http://openstack.vip.org:9292

openstack endpoint create --region RegionOne image internal http://openstack.vip.org:9292

openstack endpoint create --region RegionOne image admin http://openstack.vip.org:9292
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen glance
   bind 172.21.48.100:9292
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:9292 check inter 3000 fall 2 rise 5

systemctl reload haproxy
  • controller01节点

  • 安装glance镜像服务

apt install -y glance
  • 配置glance配置文件
# 备份配置文件
cp /etc/glance/glance-api.conf{,.bak}

# 过滤覆盖配置文件
grep -Ev "^$|#" /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf

# 配置项信息
vim /etc/glance/glance-api.conf
[DEFAULT]
[barbican]
[barbican_service_user]
[cinder]
[cors]
[database]
connection = mysql+pymysql://glance:[email protected]/glance
[file]
[glance.store.http.store]
[glance.store.rbd.store]
[glance.store.s3.store]
[glance.store.swift.store]
[glance.store.vmware_datastore.store]
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[healthcheck]
[image_format]
disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop.root-tar
[key_manager]
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000
auth_url = http://openstack.vip.org:5000
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
[vault]
[wsgi]
  • 填充数据库
su -s /bin/sh -c "glance-manage db_sync" glance
  • 重启glance服务生效配置
service glance-api restart
  • 上传镜像验证
# 下载镜像
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

# 上传镜像命令
glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility=public

# 查看镜像运行状态
root@controller:~# openstack image list
+--------------------------------------+--------+--------+
| ID                                   | Name   | Status |
+--------------------------------------+--------+--------+
| 12a404ea-5751-41c6-a319-8f63de543cd8 | cirros | active |
+--------------------------------------+--------+--------+

11.placement

  • controller01节点

  • 创建数据库与用户给予placement使用

# 创建数据库
CREATE DATABASE placement;

# 创建用户
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placementang';
  • 创建服务用户
openstack user create --domain default --password placement placement
  • 将Placement用户添加到具有管理员角色的服务项目中
openstack role add --project service --user placement admin
  • 在服务目录中创建Placement API条目
openstack service create --name placement --description "Placement API" placement
  • 创建Placement API服务端点
openstack endpoint create --region RegionOne placement public http://openstack.vip.org:8778
  
openstack endpoint create --region RegionOne placement internal http://openstack.vip.org:8778
  
openstack endpoint create --region RegionOne placement admin http://openstack.vip.org:8778
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen placement
   bind 172.21.48.100:8778
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8778 check inter 3000 fall 2 rise 5

systemctl reload haproxy
  • controller01节点

  • 安装placement服务

apt install -y placement-api
  • 配置placement文件
# 备份配置文件
cp /etc/placement/placement.conf{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/placement/placement.conf.bak > /etc/placement/placement.conf

# 配置文件
vim /etc/placement/placement.conf
[DEFAULT]
[api]
auth_strategy = keystone
[cors]
[keystone_authtoken]
auth_url = http://openstack.vip.org:5000/v3
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement
[oslo_middleware]
[oslo_policy]
[placement]
[placement_database]
connection = mysql+pymysql://placement:[email protected]/placement
[profiler]
  • 填充数据库
su -s /bin/sh -c "placement-manage db sync" placement
  • 重启apache加载placement配置
service apache2 restart
  • 验证
root@controller:~# placement-status upgrade check
+-------------------------------------------+
| Upgrade Check Results                     |
+-------------------------------------------+
| Check: Missing Root Provider IDs          |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Incomplete Consumers               |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+

12.控制节点nova

  • controller01

  • 创建数据库与用户给予nova使用

# 存放nova交互等数据
CREATE DATABASE nova_api;

# 存放nova资源等数据
CREATE DATABASE nova;

# 存放nova等元数据
CREATE DATABASE nova_cell0;

# 创建管理nova_api库的用户
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'novaang';

# 创建管理nova库的用户
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'novaang';

# 创建管理nova_cell0库的用户
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'novaang';
  • 创建nova用户
openstack user create --domain default --password nova nova
  • 将管理员角色添加到nova用户
openstack role add --project service --user nova admin
  • 创建nova服务实体
openstack service create --name nova --description "OpenStack Compute" compute
  • 创建计算API服务端点
openstack endpoint create --region RegionOne compute public http://openstack.vip.org:8774/v2.1
  
openstack endpoint create --region RegionOne compute internal http://openstack.vip.org:8774/v2.1
  
openstack endpoint create --region RegionOne compute admin http://openstack.vip.org:8774/v2.1
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen nova_vnc
   bind 172.21.48.100:6080
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:6080 check inter 3000 fall 2 rise 5

listen nova
   bind 172.21.48.100:8774
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8774 check inter 3000 fall 2 rise 5

listen nova_metadata
   bind 172.21.48.100:8775
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8775 check inter 3000 fall 2 rise 5

systemctl reload haproxy
  • controller01节点

  • 安装服务

apt install -y nova-api nova-conductor nova-novncproxy nova-scheduler
  • 配置nova文件
# 备份配置文件
cp /etc/nova/nova.conf{,.bak}

# 过滤提取文件
grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf

# 配置结果
vim /etc/nova/nova.conf
[DEFAULT]
log_dir = /var/log/nova
lock_path = /var/lock/nova
state_path = /var/lib/nova
transport_url = rabbit://openstack:[email protected]:5672/
my_ip = 172.21.48.12
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:[email protected]/nova_api
[barbican]
[barbican_service_user]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:[email protected]/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://openstack.vip.org:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000/
auth_url = http://openstack.vip.org:5000/
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[metrics]
[mks]
[neutron]
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://openstack.vip.org:5000/v3
username = placement
password = placement
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[zvm]
[cells]
enable = False
[os_region_name]
openstack = 
  • 填充nova_api数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
  • 注册cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
  • 创建cell1单元格
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
  • 填充nova数据库
su -s /bin/sh -c "nova-manage db sync" nova
  • 验证nova、cell0和cell1是否正确注册
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
  • 重启相关nova服务加载配置文件
# 处理api服务
service nova-api restart
# 处理资源调度服务
service nova-scheduler restart
# 处理数据库服务
service nova-conductor restart
# 处理vnc远程窗口服务
service nova-novncproxy restart

13.计算节点nova

  • compute03节点

  • 安装nova-compute服务

apt install -y nova-compute
  • 配置nova文件
# 备份配置文件
cp /etc/nova/nova.conf{,.bak}

# 过滤覆盖配置文件
grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf

# 完整配置
vim /etc/nova/nova.conf
[DEFAULT]
log_dir = /var/log/nova
lock_path = /var/lock/nova
state_path = /var/lib/nova
transport_url = rabbit://openstack:[email protected]
my_ip = 172.21.48.14
[api]
auth_strategy = keystone
[api_database]
[barbican]
[barbican_service_user]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://openstack.vip.org:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000/
auth_url = http://openstack.vip.org:5000/
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[libvirt]
[metrics]
[mks]
[neutron]
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://openstack.vip.org:5000/v3
username = placement
password = placement
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://172.21.48.100:6080/vnc_auto.html
[workarounds]
[wsgi]
[zvm]
[cells]
enable = False
[os_region_name]
openstack = 
  • 检测是否支持硬件加速
    • 如果结果返回0,需要配置如下
# 确定计算节点是否支持虚拟机的硬件加速
egrep -c '(vmx|svm)' /proc/cpuinfo

# 如果结果返回 “0” ,那么需要配置如下
vim /etc/nova/nova-compute.conf
[libvirt]
virt_type = qemu
  • 重启服务生效nova配置
service nova-compute restart

14.配置主机发现

  • controller01节点

  • 查看有那些可用的计算节点

openstack compute service list --service nova-compute
  • 发现计算主机
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
  • 配置每5分钟主机发现一次
vim /etc/nova/nova.conf
'''
[scheduler]
discover_hosts_in_cells_interval = 300
'''
  • 重启生效配置
service nova-api restart
  • 校验nova服务
root@controller01:~# openstack compute service list
+--------------------------------------+----------------+--------------+----------+---------+-------+----------------------------+
| ID                                   | Binary         | Host         | Zone     | Status  | State | Updated At                 |
+--------------------------------------+----------------+--------------+----------+---------+-------+----------------------------+
| 016d7dc2-f2f5-4f8e-8305-9851fdfa5709 | nova-scheduler | controller01 | internal | enabled | up    | 2023-05-23T09:04:36.000000 |
| 4934b88f-49fb-4989-a5fd-58d4dda78ed2 | nova-conductor | controller01 | internal | enabled | up    | 2023-05-23T09:04:36.000000 |
| f60ad50f-7145-4b82-abbb-cda2b1d6ca63 | nova-compute   | compute01    | nova     | enabled | up    | 2023-05-23T09:04:37.000000 |
+--------------------------------------+----------------+--------------+----------+---------+-------+----------------------------+

15.控制节点neutron

  • controller01节点

  • 创建数据库与用给予neutron使用

# 创建数据库
CREATE DATABASE neutron;

# 创建用户
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutronang';
  • 创建neutron用户
openstack user create --domain default --password neutron neutron
  • 向neutron用户添加管理员角色
openstack role add --project service --user neutron admin
  • 创建neutron实体
openstack service create --name neutron --description "OpenStack Networking" network
  • 创建neutron的api端点
openstack endpoint create --region RegionOne network public http://openstack.vip.org:9696
  
openstack endpoint create --region RegionOne network internal http://openstack.vip.org:9696
  
openstack endpoint create --region RegionOne network admin http://openstack.vip.org:9696
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen neutron
   bind 172.21.48.100:9696
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:9696 check inter

systemctl reload haproxy.service
  • controller01节点

  • 配置内核转发

cat >> /etc/sysctl.conf << EOF
# 用于控制系统是否开启对数据包源地址的校验,关闭
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
# 开启二层转发设备
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
EOF
  • 加载模块
    • 作用:桥接流量转发到iptables链
modprobe br_netfilter
  • 生效内核配置
sysctl -p
  • 安装ovs服务
apt install -y neutron-server neutron-plugin-ml2  neutron-l3-agent neutron-dhcp-agent  neutron-metadata-agent neutron-openvswitch-agent
  • 配置neutron.conf文件
    • 用于提供neutron主体服务
# 备份配置文件
cp /etc/neutron/neutron.conf{,.bak}

# 过滤提取配置文件
grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf


# 完整配置
vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
auth_strategy = keystone
state_path = /var/lib/neutron
dhcp_agent_notification = true
allow_overlapping_ips = true
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
transport_url = rabbit://openstack:000000@controller
[agent]
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
[cache]
[cors]
[database]
connection = mysql+pymysql://neutron:neutronang@controller/neutron
[healthcheck]
[ironic]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[placement]
[privsep]
[quotas]
[ssl]
  • 配置ml2_conf.ini文件
    • 用户提供二层网络插件服务
# 备份配置文件
cp  /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini

# 完整配置
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan,gre
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = physnet1
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 1:1000
[ovs_driver]
[securitygroup]
enable_ipset = true
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[sriov_driver]
  • 配置openvswitch_agent.ini文件
    • 提供ovs代理服务
# 备份文件
cp /etc/neutron/plugins/ml2/openvswitch_agent.ini{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/neutron/plugins/ml2/openvswitch_agent.ini.bak > /etc/neutron/plugins/ml2/openvswitch_agent.ini

# 完整配置
vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
[DEFAULT]
[agent]
l2_population = True
tunnel_types = vxlan
prevent_arp_spoofing = True
[dhcp]
[network_log]
[ovs]
local_ip = 172.21.48.12
bridge_mappings = physnet1:br-eth1
[securitygroup]
  • 配置l3_agent.ini文件
    • 提供三层网络服务
# 备份文件
cp /etc/neutron/l3_agent.ini{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/neutron/l3_agent.ini.bak > /etc/neutron/l3_agent.ini

# 完整配置
vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge =
[agent]
[network_log]
[ovs]
  • 配置dhcp_agent文件
    • 提供dhcp动态网络服务
# 备份文件
cp /etc/neutron/dhcp_agent.ini{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini

# 完整配置
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
[agent]
[ovs]
  • 配置metadata_agent.ini文件
    • 提供元数据服务
    • 元数据什么?
      • 用来支持如指示存储位置、历史数据、资源查找、文件记录等功能。元数据算是一种电子式目录,为了达到编制目录的目的,必须在描述并收藏数据的内容或特色,进而达成协助数据检索的目的。
# 备份文件
cp  /etc/neutron/metadata_agent.ini{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini

# 完整配置
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = openstack.vip.org
metadata_proxy_shared_secret = ws
[agent]
[cache]
  • 配置nova文件
    • 主要识别neutron配置,从而能调用网络
vim /etc/nova/nova.conf
'''
[default]
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSlnterfaceDriver

[neutron]
auth_url = http://openstack.vip.org:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = ws
'''
  • 填充数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
  • 重启nova-api服务生效neutron配置
service nova-api restart
  • 新建一个外部网络桥接
ovs-vsctl add-br br-eth1
  • 将外部网络桥接映射到网卡
    • 这里绑定第二张网卡,属于业务网卡
ovs-vsctl add-port br-eth1 eth1
  • 重启neutron相关服务生效配置
# 提供neutron服务
service neutron-server restart
# 提供ovs服务
service neutron-openvswitch-agent restart
# 提供地址动态服务
service neutron-dhcp-agent restart
# 提供元数据服务
service neutron-metadata-agent restart
# 提供三层网络服务
service neutron-l3-agent restart

16.计算节点neuron

  • compute01节点

  • 配置内核转发

cat >> /etc/sysctl.conf << EOF
# 用于控制系统是否开启对数据包源地址的校验,关闭
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
# 开启二层转发设备
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
EOF
  • 加载模块
    • 作用:桥接流量转发到iptables链
modprobe br_netfilter
  • 生效内核配置
sysctl -p
  • 安装neutron-ovs服务
apt install -y neutron-openvswitch-agent
  • 配置neutron文件
    • 提供neutron主体服务
# 备份文件
cp /etc/neutron/neutron.conf{,.bak}

# 过滤提取文件
grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf

# 完整配置
vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
state_path = /var/lib/neutron
allow_overlapping_ips = true
transport_url = rabbit://openstack:[email protected]
[agent]
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
[cache]
[cors]
[database]
[healthcheck]
[ironic]
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000
auth_url = http://openstack.vip.org:5000
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[placement]
[privsep]
[quotas]
[ssl]
  • 配置openvswitch_agent.ini文件
    • 提供ovs网络服务
# 备份文件
cp /etc/neutron/plugins/ml2/openvswitch_agent.ini{,.bak}

# 过滤提取文件
grep -Ev "^$|#" /etc/neutron/plugins/ml2/openvswitch_agent.ini.bak > /etc/neutron/plugins/ml2/openvswitch_agent.ini

# 完整配置
vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
[DEFAULT]
[agent]
l2_population = True
tunnel_types = vxlan
prevent_arp_spoofing = True
[dhcp]
[network_log]
[ovs]
local_ip = 172.21.48.14
bridge_mappings = physnet1:br-eth1
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
  • 配置nova文件识别neutron配置
vim /etc/nova/nova.conf
'''
[DEFAULT]
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSlnterfaceDriver
vif_plugging_is_fatal = true
vif_pligging_timeout = 300

[neutron]
auth_url = http://openstack.vip.org:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
'''
  • 重启nova服务识别网络配置
service nova-compute restart
  • 新建一个外部网络桥接
ovs-vsctl add-br br-eth1
  • 将外部网络桥接映射到网卡
    • 这里绑定第二张网卡,属于业务网卡
ovs-vsctl add-port br-eth1 eth1
  • 重启服务加载ovs配置
service neutron-openvswitch-agent restart

17.dashboard

  • 安装服务
apt install -y openstack-dashboard
  • 配置local_settings.py文件
vim /etc/openstack-dashboard/local_settings.py
'''
# 配置仪表板以在控制器节点上使用OpenStack服务
OPENSTACK_HOST = "controller01"

# 在Dashboard configuration部分中,允许主机访问Dashboard
ALLOWED_HOSTS = ["*"]

# 配置memcached会话存储服务
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller01:11211,controller02:11211,compute01:11211',
    }
}

# 启用Identity API版本3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

# 启用对域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

# 配置API版本
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

# 将Default配置为通过仪表板创建的用户的默认域
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

# 将用户配置为通过仪表板创建的用户的默认角色
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

# 启用卷备份
OPENSTACK_CINDER_FEATURES = {
    'enable_backup': True,
}

# 配置时区
TIME_ZONE = "Asia/Shanghai"
'''
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen dashboard
   bind 172.21.48.100:80
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:80 check inter 3000 fall 2 rise 5

systemctl reload haproxy

18. 控制节点cinder

  • controller01节点

  • 创建数据库与用户给予cinder组件使用

# 创建cinder数据库
CREATE DATABASE cinder;

# 创建cinder用户
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinderang';
  • 创建cinder用户
openstack user create --domain default --password cinder cinder
  • 添加cinder用户到admin角色
openstack role add --project service --user cinder admin
  • 创建cinder服务实体
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
  • 创建cinder服务API端点
openstack endpoint create --region RegionOne volumev3 public http://openstack.vip.org:8776/v3/%\(project_id\)s
  
openstack endpoint create --region RegionOne volumev3 internal http://openstack.vip.org:8776/v3/%\(project_id\)s
  
openstack endpoint create --region RegionOne volumev3 admin http://openstack.vip.org:8776/v3/%\(project_id\)s
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg

listen cinder
   bind 172.21.48.100:8776
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8776 check inter 3000 fall 2 rise 5

systemctl reload haproxy.service
  • controller01节点

  • 安装cinder相关服务

apt install -y cinder-api cinder-scheduler
  • 配置cinder.conf文件
# 备份文件
cp  /etc/cinder/cinder.conf{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf

# 完整配置
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
my_ip = 172.21.48.12
[database]
connection = mysql+pymysql://cinder:[email protected]/cinder
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000
auth_url = http://openstack.vip.org:5000
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
  • 填充数据库
su -s /bin/sh -c "cinder-manage db sync" cinder
  • 配置nova服务可调用cinder服务
vim /etc/nova/nova.conf
'''
[cinder]
os_region_name = RegionOne
'''
  • 重启nova服务生效cinder服务
service nova-api restart
  • 重新启动块存储服务
service cinder-scheduler restart
  • 平滑重启apache服务识别cinder页面
service apache2 reload

19. 计算节点cinder

  • compute01节点

  • 安装支持的实用程序包

apt install -y lvm2 thin-provisioning-tools
  • 创建LVM物理卷
    • 磁盘根据自己名称指定
pvcreate /dev/nvme0n1
  • 创建LVM卷组 cinder-volumes
vgcreate cinder-volumes /dev/nvme0n1
  • 修改lvm.conf文件
    • 作用:添加接受/dev/sdb设备并拒绝所有其他设备的筛选器
vim /etc/lvm/lvm.conf
devices {
...
filter = [ "a/nvme0n1/", "r/.*/"]
  • 安装cinder软件包
apt install -y cinder-volume tgt
  • 配置cinder.conf配置文件
# 备份配置文件
cp /etc/cinder/cinder.conf{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf

# 完整配置文件
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
my_ip = 172.21.48.14
enabled_backends = lvm
glance_api_servers = http://openstack.vip.org:9292
[database]
connection = mysql+pymysql://cinder:[email protected]/cinder
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000
auth_url = http://openstack.vip.org:5000
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadm
volume_backend_name = lvm
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
  • 指定卷路径
vim /etc/tgt/conf.d/tgt.conf
include /var/lib/cinder/volumes/*
  • 重新启动块存储卷服务,包括其依赖项
service tgt restart

service cinder-volume restart

20.组件高可用

20.1 keystone高可用

  • controller02节点

  • 安装服务

apt install -y keystone
  • controller01节点

cd /etc/keystone/

tar cvf keystone.tar.gz ./*

scp keystone.tar.gz root@controller02:/etc/keystone/
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen keystone
   bind 172.21.48.100:5000
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:5000 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:5000 check inter 3000 fall 2 rise 5

systemctl reload haproxy.service
  • controller02节点

cd /etc/keystone/

tar xvf keystone.tar.gz
  • 编辑/etc/apache2/apache2.conf文件并配置ServerName选项以引用控制器节点
echo "ServerName controller02" >> /etc/apache2/apache2.conf 
  • 重新启动Apache服务生效配置
service apache2 restart
  • 验证
openstack token issue

20.2 glance高可用

  • controller02节点

apt install -y glance
  • controller01节点

cd /etc/glance/

tar cvf glance.tar.gz ./*

scp glance.tar.gz root@controller02:/etc/glance/
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen glance
   bind 172.21.48.100:9292
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:9292 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:9292 check inter 3000 fall 2 rise 5
   
systemctl reload haproxy.service

20.2.1配置nfs共享存储

  • 由于没有ceph,这里只能采用nfs方式,极力推荐ceph,避免单节点坏掉导致数据丢失

  • haproxy01与所有控制节点

  • 配置离线nfs源

tar xvf nfs.tar.gz -C /opt/

vim /etc/apt/sources.list
deb [trusted=yes] file:// /opt/nfs/debs/
  • haproxy01服务端安装

apt-get install nfs-kernel-server -y

  • 控制节点客户端安装服务

apt-get install nfs-common -y
  • haproxy01服务器配置

  • 创建用户

    • id值可能会不一样,在控制节点查看64062(/etc/passwd),更改为一致
useradd glance

usermod -u 64062 glance

groupmod -g 64062 glance
  • 配置nfs
mkdir /glance/images -pv

vim /etc/exports
#在文件的最后添加想要共享的文件
/glance/images *(rw,sync,no_root_squash)

chown -R glance.glance /glance/
  • 重启服务生效
/etc/init.d/nfs-kernel-server restart
  • 控制节点操作

  • 可能会删除失败,因为调度到了glance没有镜像的节点,多执行两遍,没有输出结果为成功

openstack image delete cirros
  • 挂载使用
mount -t nfs 172.21.48.10:/glance/images /var/lib/glance/images/ -o nolock
  • 配置永久挂载
vim /etc/fstab
172.21.48.10:/glance/images /var/lib/glance/images/ nfs defaults,_netdev 0 0
  • controller02节点

tar xvf glance.tar.gz

systemctl restart glance-api.service

20.3 placement高可用

  • controller02节点

apt install -y placement-api
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen placement
   bind 172.21.48.100:8778
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8778 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:8778 check inter 3000 fall 2 rise 5

systemctl reload haproxy.service
  • controller01节点

cd /etc/placement/

tar cvf placement.tar.gz ./*

scp placement.tar.gz  root@controller02:/etc/placement/
  • controller02节点

cd /etc/placement/

tar xvf placement.tar.gz

systemctl restart apache2.service

20.4 nova高可用

  • controller02节点

apt install -y nova-api nova-conductor nova-novncproxy nova-scheduler
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen nova_vnc
   bind 172.21.48.100:6080
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:6080 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:6080 check inter 3000 fall 2 rise 5

listen nova
   bind 172.21.48.100:8774
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8774 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:8774 check inter 3000 fall 2 rise 5

listen nova_metadata
   bind 172.21.48.100:8775
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8775 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:8775 check inter 3000 fall 2 rise 5

systemctl reload haproxy.service
  • controller01节点

cd /etc/nova/

tar cvf nova.tar.gz ./*

scp nova.tar.gz  root@controller02:/etc/nova/
  • controller02节点

cd /etc/nova/

tar xvf nova.tar.gz

vim nova.conf
'''
my_ip = 172.21.48.13
'''
  • 重启服务生效
# 处理api服务
service nova-api restart
# 处理资源调度服务
service nova-scheduler restart
# 处理数据库服务
service nova-conductor restart
# 处理vnc远程窗口服务
service nova-novncproxy restart

20.5 neutron高可用

  • controller02节点

apt install -y neutron-server neutron-plugin-ml2  neutron-l3-agent neutron-dhcp-agent  neutron-metadata-agent neutron-openvswitch-agent
  • controller01节点

cd /etc/neutron/

tar cvf neutron.tar.gz ./*

scp neutron.tar.gz  root@controller02:/etc/neutron/
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen neutron
   bind 172.21.48.100:9696
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:9696 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:9696 check inter 3000 fall 2 rise 5


systemctl reload haproxy.service
  • controller02节点

cd /etc/neutron/

tar xvf neutron.tar.gz

vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
'''
local_ip = 172.21.48.13
'''
  • 配置内核转发
cat >> /etc/sysctl.conf << EOF
# 用于控制系统是否开启对数据包源地址的校验,关闭
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
# 开启二层转发设备
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
EOF
  • 加载模块
    • 作用:桥接流量转发到iptables链
modprobe br_netfilter
  • 生效内核配置
sysctl -p
  • 新建一个外部网络桥接
ovs-vsctl add-br br-eth1
  • 将外部网络桥接映射到网卡
    • 这里绑定第二张网卡,属于业务网卡
ovs-vsctl add-port br-eth1 eth1
  • 重启neutron相关服务生效配置
# 提供neutron服务
service neutron-server restart
# 提供ovs服务
service neutron-openvswitch-agent restart
# 提供地址动态服务
service neutron-dhcp-agent restart
# 提供元数据服务
service neutron-metadata-agent restart
# 提供三层网络服务
service neutron-l3-agent restart

20.6 dashboard高可用

  • controller02节点

  • 安装服务

apt install -y openstack-dashboard
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen dashboard
   bind 172.21.48.100:80
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:80 check inter 3000 fall 2 rise 5
   server controller01 172.21.48.13:80 check inter 3000 fall 2 rise 5

systemctl reload haproxy.service
  • controller01节点

cd /etc/openstack-dashboard/

scp local_settings.py   root@controller02:/etc/openstack-dashboard/
  • controller02节点

vim local_settings.py
'''
OPENSTACK_HOST = "controller02"
'''

systemctl restart apache2.service

20.7 cinder高可用

  • controller02节点

apt install -y cinder-api cinder-scheduler
  • haproxy01节点

vim /etc/haproxy/haproxy.cfg
listen cinder
   bind 172.21.48.100:8776
   mode tcp
   log global
   balance random
   server controller01 172.21.48.12:8776 check inter 3000 fall 2 rise 5
   server controller02 172.21.48.13:8776 check inter 3000 fall 2 rise 5

systemctl reload haproxy.service
  • controller02节点

# 备份文件
cp  /etc/cinder/cinder.conf{,.bak}

# 过滤覆盖文件
grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf

# 完整配置
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
my_ip = 172.21.48.13
[database]
connection = mysql+pymysql://cinder:[email protected]/cinder
[keystone_authtoken]
www_authenticate_uri = http://openstack.vip.org:5000
auth_url = http://openstack.vip.org:5000
memcached_servers = controller01:11211,controller02:11211,compute01:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
  • 配置nova服务可调用cinder服务
vim /etc/nova/nova.conf
'''
[cinder]
os_region_name = RegionOne
'''
  • 重启nova服务生效cinder服务
service nova-api restart
  • 重新启动块存储服务
service cinder-scheduler restart
  • 平滑重启apache服务识别cinder页面
service apache2 reload

21.代理02上线

  • haproxy02节点

scp root@haproxy01:/etc/haproxy/haproxy.cfg /etc/haproxy/

systemctl restart haproxy

标签:service,nova,etc,conf,openstack,HA,OpenStackYoga,neutron
From: https://www.cnblogs.com/wsxier/p/17429790.html

相关文章

  • HDFS文件因Hadoop版本原因导致append操作失败的问题
    问题重现:2023.05.24练习B站尚硅谷Hadoop3里的HDFS的Shell操作(append)[[email protected]]$hadoopfs-appendToFileliubei.txt/sa点击查看代码[[email protected]]$hadoopfs-appendToFileliubei.txt/sa2023-05-2420:30:37,303WARNhdfs.......
  • Caused by: io.netty.channel.AbstractChannel$AnnotatedConnectException: Connectio
    现象:今天在启动项目时,本项目使用了Elasticsearch服务,发现后台报这个错误:Causedby:io.netty.channel.AbstractChannel$AnnotatedConnectException:Connectionrefused:nofurtherinformation:/127.0.0.1:9300错误信息提示如下图:原因:本项目使用了Elasticsearch搜索服务,而报错信......
  • 【异常】This application has no explicit mapping for /error, so you are seeing t
    本文目录一、错误提示二、错误原因2.1原因一:启动类的位置不对2.2原因二:控制器的URL路径书写问题 2.3原因三:配置文件中视图解析器的配置问题三、解决方案3.1解决方案一3.2解决方案二3.3解决方案三背景:使用springboot+vue构建的微信点餐系统一、错误提示最近在做一个项目,......
  • [PHP](MD5、sha1)比较漏洞-笔记
    PhP(MD5、sha1)比较漏洞(弱比较、强比较、强碰撞)弱比较md5和sha1弱比较都是利用php解析哈希值以“0E”开头的特性,结果都为0符合参数1的字符串值和参数2的字符串值不相等,但md5值相等。如:240610708,aabg7XSs,aabC9RqS,s878926199a这四段字符串MD5编码后结果分别对应240610708:0E462097......
  • pycharm中创建python包失效
    testDemo是我创建的python包,但图标一直显示为文件夹,导致后期使用无法导入。   解决: 在设置中找到项目结构 选择对应文件夹并标记为源即可......
  • HTB ACADEMY-Hacking WordPress WRITE UP
    YouhavebeencontractedtoperformanexternalpenetrationtestagainstthecompanyINLANEFREIGHTthatishostingoneoftheirmainpublic-facingwebsitesonWordPress.Enumeratethetargetthoroughlyusingtheskillslearnedinthismoduletofindavar......
  • 千万级的数据用hashmap存储需要考虑哪些问题?
    答案:一般会预先初始化一个大容量的map解释hashmap默认初始化容量为16,在不断添加key-value时,使用率达到75%会触发扩容,此时hashmap容量会增大一倍,同时会进行key-value的拷贝及重新计算hash映射,当map中存储的key-value越来越多时扩容将导致内存溢出,所以要存储上百万或千万数据时一......
  • python hmac_sha256 转为 java
    Javahmacsha256packagecom.example;importjava.security.InvalidKeyException;importjava.security.NoSuchAlgorithmException;importjavax.crypto.Mac;importjavax.crypto.spec.SecretKeySpec;importjavax.xml.bind.DatatypeConverter;publicclassMain......
  • 最新版本firefox浏览器 显示echarts图表会卡死,但是Chrome浏览器或者Edge浏览器是正常
    如果您的Firefox浏览器最新版本也出现了无法正常显示Echarts图表的问题,可以尝试以下几个方法:1.禁用硬件加速:在一些特定的系统或者硬件环境下,启用Firefox的硬件加速功能可能会导致Echarts图表卡死。您可以尝试通过以下步骤禁用硬件加速:-在Firefox的地址栏中输入abou......
  • AtCoder Regular Contest 132 F Takahashi The Strongest
    洛谷传送门AtCoder传送门没见过这种在新运算下做卷积的题,感觉挺新奇的。考虑Takahashi成为绝对赢家的必要条件,发现前提是Aoki和Snuke出的要相同。不妨将每种策略映射到一个四进制数(\(P\to1,R\to2,S\to3\)),定义运算\(x\otimesy=\begin{cases}x&x=y\\0......