目录
(2)修改ip为10.6.20.100/24,SSH 服务端口为 2025
(3)安装 qemu-kvm、libvirt、virt-install、sshpass(重要)
(5)创建桥网卡名字为br0,root用户密码为Key-1122
(4)利用bind给Linux1配置主dns给Linux2配置备用dns
1,系统安装
(1)!(由于系统安装没有可演示性就此省略)
(2)修改ip为10.6.20.100/24,SSH 服务端口为 2025
nmcli con m enp1s0 ipv4.method manual ipv4.addresses 10.4.220.100/24
我这里的ip不对应是因为我的互通没有做20网段,下面所有的ip以实际为准
修改ssh端口号为2025
sudo vim /etc/ssh/sshd_config
#修改port值,默认为22
Port 2025
(3)安装 qemu-kvm、libvirt、virt-install、sshpass(重要)
yum install -y qemu* libvirt* virt*
#我这里是通过yum安装,正式比赛时候可能是给个包
通过包安装也是一样,先解压在安装
(4)虚拟机存储目录有默认目录改为/home/vmfs/
这里就要求我们在给虚拟机划分磁盘的时候给home多划一点,要不然到后面用kvm创虚拟机会报错
virt-install --name linux0 --ram 4096 --disk path =/homevmfs/linux0.qcow2,size=40 --vcpus 2 --os-vatiant auto --network brifge=[桥接网卡] --cdrom=/opt/[镜像名称] --graphics vnc,liste=0.0.0.0
(5)创建桥网卡名字为br0,root用户密码为Key-1122
[root@servwe2 ~] nmcli c add type bridge con-name br0 ifname br0
Connection 'br0' (63473856-0f1e-4c7b-b42d-edf684d40058) successfully added.
[root@servwe2 ~] nmcli c add type bridge-slave ifname ens160 master br0
Connection 'bridge-slave-ens160' (2d8e5035-782e-490b-a944-d5bbe8555c0d) successfully added.
/*开始网络桥接*/
[root@servwe2 ~]# cd /etc/NetworkManager/system-connections/
[root@servwe2 ~]system-connections]#vim vi ens160.nmconnection
[ipv4]
method=auto
[root@servwe2 ~]system-connections]#vim vi br0.nmconnection
[ipv4]
method=manual
address1=10.4.220.100,ipv4.10.4.220.1
[root@servwe2 ~] systemctl start libvirtd
[root@servwe2 ~] systemctl enabled libvirtd
[root@servwe2 ~] systemctl status libvirtd
[root@servwe2 ~] reboot
(6)创建快照
virsh snapshot-create-as linux0 linux0-snapshot
(7)克隆虚拟机并设置为开机自启
#克隆
virt-clone --origianl linux0 --name linux1 --fiel /home/vmfs/linux1.qcow2 --auto-clone
#开机自启
sudo virsh edit [虚拟机名称]
#<on_poweroff>内改为restart
<devices>
<!-- 其他设备配置 -->
</devices>
<on_poweroff>restart</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
#启用开机自启
sudo virsh autostart vm_name
2,配置默认软件仓库
(1)使用httpd82端口提供服务
yum install -y httpd*
[root@server2 ~]# vim /etc/httpd/conf/httpd.conf
Listen 82
[root@server2 ~]# systemctl restart httpd
[root@server2 ~]# systemctl --now enable httpd
禁止禁用防火墙
可以通过屏蔽防火墙服务达到禁用防火墙效果
sudo systemctl mask firewalld
确保SELinex保护机制在Engorcing模式
sudo vi /etc/selinux/config
SELINUX=disabled
改为enforcing
SELINUX=enforcing
(2)server2上配置软件仓库共虚拟机使用
将iso文件挂载到apache根目录下
vim /etc/fstab
/opt/Rocky-x86_64-dvd.iso /var/www/html iso9660 defaults 0 0
systeemctl daemon-reload
mount-a
[root@server2 ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 4.0M 0 4.0M 0% /dev
tmpfs tmpfs 872M 0 872M 0% /dev/shm
tmpfs tmpfs 349M 5.2M 344M 2% /run
/dev/mapper/rl-root xfs 39G 13G 27G 32% /
/dev/loop0 iso9660 11G 11G 0 100% /var/www/html #挂载成功
/dev/mapper/rl-home xfs 19G 166M 19G 1% /home
/dev/sda1 xfs 960M 262M 699M 28% /boot
tmpfs tmpfs 175M 0 175M 0% /run/user/0
[root@server2 ~]# ls /var/www/html/
AppStream BaseOS EFI images isolinux LICENSE media.repo
[root@server2 ~]# vim /etc/yum.repos.d/rocky.repo
[BaseOS]
name=BaseOS
baseurl=http://server2.sdskills.lan:82/BaseOS
enabled=1
gpgcheck=0
[AppStream]
name=AppStream
baseurl=http://server2.sdskills.lan:82/AppStream
enabled=1
gpgcheck=0
[root@server2 ~]# yum clean all
0 files removed
[root@server2 ~]# yum makecache
BaseOS 22 MB/s | 2.2 MB 00:00 #yum配置成功
AppStream 59 MB/s | 7.9 MB 00:00
Metadata cache created.
[root@server2 ~]#
3,dns服务
(1)修改ip
nmcli con m enp1s0 ipv4.method manual ipv4.addresses 10.4.220.101 ipv4.gateway 10.4.220.1 ipv4.dns 10.4.220.101,10.4.220.102 #根据赛题修改
1.1修改主机名称和时钟同步
[root@linux0 ~]# for i in {1..9}
> do
> ssh 10.4.220.10$i "hostnamectl set-hostname linux$i"
> ssh 10.4.220.10$i "timedatectl set-timezone Asia/Shanghai"
> done
[root@linux0 ~]# bash
测试ssh免密和时钟
[root@linux1 ~]# ssh root@10.4.220.102
Activate the web console with: systemctl enable --now cockpit.socket ssh免密成功
Last login: Thu Dec 19 16:39:05 2024
[root@linux2 ~]# timedatectl 时钟同步成功
Local time: Thu 2024-12-19 18:21:15 CST
Universal time: Thu 2024-12-19 10:21:15 UTC
RTC time: Thu 2024-12-19 10:21:16
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: no
NTP service: active
RTC in local TZ: no
[root@linux2 ~]#
(2)端口
在做每一个服务的时候开启
(3)linux主机之间root用户实现密钥ssh认证
[root@linux0 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa
Your public key has been saved in /root/.ssh/id_rsa.pub
The key fingerprint is:
SHA256:mZRNmGfYf4/p5niYJNfeKhvwc3esAuwQ/6KAxrfvkpw root@linux0
The key's randomart image is:
+---[RSA 3072]----+
| =. |
| +++ |
| oo.. |
| ..o . . |
| S+. o + | #linux1上生成密钥
| . . . =oo +..|
| +.oo o *+=..+|
| . .Eo o *==oo|
| .++. .oB+. |
+----[SHA256]-----+
[root@linux0 ~]# for i in {1..9}
> do
> ssh-copy-id 10.4.220.10$i #将密钥文件传给其他主机
> scp -r /root/.ssh/ 10.4.220.10$i:/root/
> done
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '10.4.220.101 (10.4.220.101)' can't be established.
ED25519 key fingerprint is SHA256:YyuzF7mDk/ibBXDvijWBwX7IKCWJYrRZ+4PklDH29Og.
This key is not known by any other names
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@10.4.220.101's password:
(4)利用bind给Linux1配置主dns给Linux2配置备用dns
允许连接
options {
listen-on port 53 { any; };
listen-on-v6 port 53 { ::1; };
directory "/var/named";
dump-file "/var/named/data/cache_dump.db";
statistics-file "/var/named/data/named_stats.txt";
memstatistics-file "/var/named/data/named_mem_stats.txt";
secroots-file "/var/named/data/named.secroots";
recursing-file "/var/named/data/named.recursing";
allow-query { any; };
vim/etc/named.rfc1912.zones
添加正反项配置文件
zone "sdskills.lan" IN {
type master;
file "named.sdskills"; #正向区域
allow-update { none; };
};
zone "220.4.10.in-addr.arpa" IN {
type master;
file "named.2024"; #反向区域
allow-update { none; };
};
"/etc/named.rfc1912.zones" 55L, 1216B
添加正反向区域
[root@linux1 named]# cp -p named.localhost named.sdskills
[root@linux1 named]# cp -p named.loopback named.2024
正向配置文件
named.sdskills
$TTL 1D
@ IN SOA linux1.sdskills.lan. rname.invalid. (
0 ; serial
1D ; refresh
1H ; retry
1W ; expire
3H ) ; minimum
NS linux1.sdskills.lan.
A 127.0.0.1<VirtualHost *:80>
AAAA ::1
linux1 A 10.4.220.101
linux2 A 10.4.220.102
linux3 A 10.4.220.103
linux4 A 10.4.220.104
linux5 A 10.4.220.105
linux6 A 10.4.220.106
linux7 A 10.4.220.107
linux8 A 10.4.220.108
linux9 A 10.4.220.109
www A 10.4.220.90
反向配置文件
$TTL 1D
@ IN SOA linux1.sdskills.lan. rname.invalid. (
0 ; serial
1D ; refresh
1H ; retry
1W ; expire
3H ) ; minimum
NS linux1.sdskills.lan.
A 127.0.0.1
AAAA ::1
PTR sdskills.lan.
101 PTR linux1.sdskills.lan.
102 PTR linux2.sdskills.lan.
103 PTR linux3.sdskills.lan.
104 PTR linux4.sdskills.lan.
105 PTR linux5.sdskills.lan.
106 PTR linux6.sdskills.lan.
107 PTR linux7.sdskills.lan.
108 PTR linux8.sdskills.lan.
109 PTR linux9.sdskills.lan.
90 PTR www.sdskills.lan.
重启服务开放53端口
Linux2配置辅助dns
辅助dns(linux2)
zone "sdskills.lan" IN {
type slave;
file "named.sdskills";
masters { 10.4.220.101; };
};
zone "220.4.10.in-addr.arpa" IN {
type slave;
file "named.2024";
masters { 10.4.220.101; };
};
4,ansible
安装ansible
yum install -y ansible
(1)在 linux1 上安装并配置 Ansible 服务,~/ansible 创建 ansible.cfg 的配置文件并配置名为 inventory 的静态 inventory 文件,以满足以下要求:linuxl是master 主机组的成员;linux2-linux7是slave 主机组的成员;
~/ansible 创建 ansible.cfg 的配置文件并配置名为 inventory 的静态 inventory 文件
[root@linux1 ~]# mkdir ~/ansible
配置名为inventory的inventory的静态文件
[root@linux1 ~]# vim ~/ansible/ansible.cfg
[defaults]
inventory = ~/ansible/inventory
[root@linux1 ]# vim ~/ansible/inventory
[defaults]
inventory = ~/ansible/inventory
Linux1是master组成员
Linux2-Linux7是slave组成员
[root@linux1 ]# vim ~/ansible/inventory
[master]
10.4.220.101
[slave]
10.4.220.102
10.4.220.103
10.4.220.104
10.4.220.105
10.4.220.106
10.4.220.107
(2)安装系统角色,~/ansible 下创建符合以下条件的timesync.yml的playbook:在所有受管节点上运行且使用 timesync 角色;配置该角色使用 linuxl.sdskills.lan 为时间服务器;配置该角色启用 iburst 参数;
[root@linux1 ]# vim ~/ansible/timesync.yml
---
- name: Configure time synchronization on all nodes
hosts: all
roles:
- role: timesync #调用timesync这个角色
vars:
ntp_servers: #定义一个NTP时间变量
- linux1.sdskills.lan #NTP时间主服务器
- ntp_iburst:true #调用iburst参数
[root@linux1 ~]# ansible-playbook /root/ansible/timesync.yml
PLAY [Configure time synchronization on all nodes] ***************************************************************************************************************************************************
TASK [Gathering Facts] *******************************************************************************************************************************************************************************
ok: [10.4.220.104]
ok: [10.4.220.102]
ok: [10.4.220.103]
ok: [10.4.220.105]
ok: [10.4.220.108]
ok: [10.4.220.106]
ok: [10.4.220.109]
ok: [10.4.220.107]
ok: [10.4.220.101]
PLAY RECAP *******************************************************************************************************************************************************************************************
10.4.220.101 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.102 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.103 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.104 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.105 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.106 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.107 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.108 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.109 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
(3)(3)根据以下要求,在~/ansible/roles 中创建名为 apache 的角色:安装 httpd 软件包,并启动 httpd 服务且下次开机启动;创建 index.html.j2 文件,该模板文件用于输出如下内容:Welcome to HOSTNAME ON IPADDRESSHOSTNAME 是受控节点的 FODN,IPADDRESS 则是受控节点的IP 地址创建 playbook~/ansible/apache.yml,在 slave 主机组使用 apache 的角色
创建需要的文件夹
[root@linux1 ]# mkdir -p ~/ansible/roles/apache/tasks
[root@linux1 ]# mkdir -p ~/ansible/roles/apache/templates
[root@linux1 ]# vim ~/ansible/roles/apache/tasks/mail.yml
---
- name: install httpd package
yum:
name: httpd #用yum安装httpd
state: present
- name: start and enable httpd service
service:
name: httpd
state: started #设置开机自启动
enabled: true
在这个目录下面创建一个index.html.j2文件
[root@linux1 ]# vim ~/ansible/roles/apache/templates/index.html.j2
#按题目编写文件
Welcome to {{ ansible_hostname }} on {{ ansible_default_ipv4.address}}
定义FQDN全名 定义受控节点ip地址
创建 playbook~/ansible/apache.yml,在 slave 主机组使用 apache 的角色
[root@linux1 ]# vi /root/ansible/apache.yml
---
- name: Deploy apache on slave hosts
hosts: slave
roles:
- apache 指定apache角色
执行剧本验证对错
[root@linux1 tasks]# ansible-playbook /root/ansible/apache.yml
PLAY [Deploy apache on slave hosts] ******************************************************************************************************************************************************************
TASK [Gathering Facts] *******************************************************************************************************************************************************************************
ok: [10.4.220.102]
ok: [10.4.220.103]
ok: [10.4.220.104]
ok: [10.4.220.106]
ok: [10.4.220.105]
ok: [10.4.220.108]
ok: [10.4.220.109]
ok: [10.4.220.107]
PLAY RECAP *******************************************************************************************************************************************************************************************
10.4.220.102 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.103 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.104 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.105 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.106 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.107 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.108 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
10.4.220.109 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
5,keepalive
(1)iscsi服务端
给Linux3添加4块硬盘,每块5G,名称为Linux3-1.qcow2
[root@Server2 images]# for i in {1..4}; do qemu-img create -f qcow2 /var/lib/libvirt/images/linux3-$i.qcow2 5G; done
Formatting '/var/lib/libvirt/images/linux3-1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=5368709120 lazy_refcounts=off refcount_bits=16
Formatting '/var/lib/libvirt/images/linux3-2.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=5368709120 lazy_refcounts=off refcount_bits=16
Formatting '/var/lib/libvirt/images/linux3-3.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=5368709120 lazy_refcounts=off refcount_bits=16
Formatting '/var/lib/libvirt/images/linux3-4.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compession_type=zlib size=5368709120 lazy_refcounts=off refcount_bits=16
[root@Server2 images]# ls
linux0.qcow2 linux2.qcow2 linux3-2.qcow2 linux3-4.qcow2 linux4.qcow2 linux6.qcow2 linux8.qcow2
linux1.qcow2 linux3-1.qcow2 linux3-3.qcow2 linux3.qcow2 linux5.qcow2 linux7.qcow2 linux9.qcow2
#用 virsh edit linux3将磁盘添加进去
#安装target*
yum install -y target*
[linux3]
[root@linux3 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sr0 11:0 1 1024M 0 rom
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 600M 0 part /boot/efi
├─vda2 252:2 0 1G 0 part /boot
└─vda3 252:3 0 98.4G 0 part
├─rl-root 253:0 0 63.5G 0 lvm /
├─rl-swap 253:1 0 3.9G 0 lvm [SWAP]
└─rl-home 253:2 0 31G 0 lvm /home
vdb 252:16 0 5G 0 disk
vdc 252:32 0 5G 0 disk
vdd 252:48 0 5G 0 disk 添加4块5G磁盘
vde 252:64 0 5G 0 disk
[root@linux3 ~]# pvcreate /dev/vd{b..e}
Physical volume "/dev/vdb" successfully created.
Physical volume "/dev/vdc" successfully created.
Physical volume "/dev/vdd" successfully created. #创建pv卷
Physical volume "/dev/vde" successfully created.
[root@linux3 ~]# vgcreate vg1 /dev/vd{b..e}
Volume group "vg1" successfully created #创建vg卷
[root@linux3 ~]# lvcreate -n lv1 -l 100%free vg1 #创建lv卷
Logical volume "lv1" created.
[root@linux3 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vda3 rl lvm2 a-- 98.41g 0
/dev/vdb vg1 lvm2 a-- <5.00g 0
/dev/vdc vg1 lvm2 a-- <5.00g 0 #pv卷创建成功
/dev/vdd vg1 lvm2 a-- <5.00g 0
/dev/vde vg1 lvm2 a-- <5.00g 0
[root@linux3 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
rl 1 3 0 wz--n- 98.41g 0
vg1 4 1 0 wz--n- 19.98g 0 #vg卷创建成功
[root@linux3 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
home rl -wi-ao---- 30.99g
root rl -wi-ao---- <63.48g
swap rl -wi-ao---- 3.94g
lv1 vg1 -wi-a----- 19.98g #lv卷创建成功
格式化为ext4
[root@linux3 ~]# mkfs.ext
mkfs.ext2 mkfs.ext3 mkfs.ext4
[root@linux3 ~]# mkfs.ext4 /dev/vg1/lv1 #格式化为ext4格式
mke2fs 1.46.5 (30-Dec-2021)
Discarding device blocks: done
Creating filesystem with 5238784 4k blocks and 1310720 inodes
Filesystem UUID: 633b5792-6c50-429a-9d80-2c298d85eac9
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
服务端配置iSCSI
[root@linux3 ~]# targetcli
Warning: Could not load preferences file /root/.targetcli/prefs.bin.
targetcli shell version 2.1.53
Copyright 2011-2013 by Datera, Inc and others.
For help on commands, type 'help'.
/> ls
o- / ......................................................................................................................... [...]
o- backstores .............................................................................................................. [...]
| o- block .................................................................................................. [Storage Objects: 0]
| o- fileio ................................................................................................. [Storage Objects: 0]
| o- pscsi .................................................................................................. [Storage Objects: 0]
| o- ramdisk ................................................................................................ [Storage Objects: 0]
o- iscsi ............................................................................................................ [Targets: 0]
o- loopback ......................................................................................................... [Targets: 0]
/> /backstores/block create dev=/dev/vg1/lv1 name=d1
Created block storage object d1 using /dev/vg1/lv1. 导入卷组
/> /iscsi create iqn.2024-12.lan.sdskills:server
Created target iqn.2024-12.lan.sdskills:server. #创建服务端发起程序
Created TPG 1.
Global pref auto_add_default_portal=true
Created default portal listening on all IPs (0.0.0.0), port 3260.
/> /iscsi/iqn.2024-12.lan.sdskills:server/tpg1/acls create iqn
.2024-12.lan.sdskills:client #客户端发起程序
Created Node ACL for iqn.2024-12.lan.sdskills:client
/> /iscsi/iqn.2024-12.lan.sdskills:server/tpg1/luns create /backstores/block/d1
Created LUN 0. 服务端连接磁盘
Created LUN 0->0 mapping in node ACL iqn.2024-12.lan.sdskills:client
/> /iscsi/iqn.2024-12.lan.sdskills:server/tpg1/ set attribute authentication=1
Parameter authentication is now '1'. #启用连接
/> /iscsi/ set
discovery_auth global group=
/> /iscsi/ set discovery_auth enable=1 userid=IncomingUser password=IncomingPass mutual_userid=OutgoingUser mutual_password=OutgoingPass 创建CHAP双向验证
Parameter enable is now 'True'.
Parameter userid is now 'IncomingUser'.
Parameter password is now 'IncomingPass'.
Parameter mutual_userid is now 'OutgoingUser'.
Parameter mutual_password is now 'OutgoingPass'.
/> /iscsi/iqn.2024-12.lan.sdskills:server/tpg1/acls/iqn.2024-12.lan.sdskills:client/ set auth
mutual_password= mutual_userid= password= userid=
/> /iscsi/iqn.2024-12.lan.sdskills:server/tpg1/acls/iqn.2024-12.lan.sdskills:client/ set auth userid=IncomingUser password=IncomingPass mutual_userid=OutgoingUser mutual_password=OutgoingPass
Parameter userid is now 'IncomingUser'.
Parameter password is now 'IncomingPass'.
Parameter mutual_userid is now 'OutgoingUser'.
Parameter mutual_password is now 'OutgoingPass'.
/> exit
#重启服务
[root@linux3 ~]# systemctl --now enable target
Created symlink /etc/systemd/system/multi-user.target.wants/target.service → /usr/lib/systemd/system/target.service.
[root@linux3 ~]# systemctl restart target
#开放3260端口
[root@linux3 ~]# firewall-cmd --add-port=3260/tcp --add-port=3260/udp --permanent
success
[root@linux3 ~]# firewall-cmd --reload
success
(2)配置客户端链接
[root@linux4 ~]# systemctl --now enable iscsid.service 启用服务
Created symlink /etc/systemd/system/multi-user.target.wants/iscsid.service → /usr/lib/systemd/system/iscsid.service.
[root@linux4 ~]# vim /etc/iscsi/initiatorname.iscsi 修改客户端发起程序
InitiatorName=iqn.2024-12.lan.sdskills:client
CHAP双向验证
node.session.auth.authmethod = CHAP 修改CHAP双向验证密码
# To configure which CHAP algorithms to enable set
# node.session.auth.chap_algs to a comma seperated list.
# The algorithms should be listen with most prefered first.
# Valid values are MD5, SHA1, SHA256, and SHA3-256.
# The default is MD5.
#node.session.auth.chap_algs = SHA3-256,SHA256,SHA1,MD5
# To set a CHAP username and password for initiator
# authentication by the target(s), uncomment the following lines:
node.session.auth.username = IncomingUser
node.session.auth.password = IncomingPass
# To set a CHAP username and password for target(s)
# authentication by the initiator, uncomment the following lines:
node.session.auth.username_in = OutgoingUser
node.session.auth.password_in = OutgoingPass
# To enable CHAP authentication for a discovery session to the target
# set discovery.sendtargets.auth.authmethod to CHAP. The default is None.
discovery.sendtargets.auth.authmethod = CHAP
# To set a discovery session CHAP username and password for the initiator
# authentication by the target(s), uncomment the following lines:
discovery.sendtargets.auth.username = IncomingUser
discovery.sendtargets.auth.password = IncomingPass
# To set a discovery session CHAP username and password for target(s)
# authentication by the initiator, uncomment the following lines:
discovery.sendtargets.auth.username_in = OutgoingUser
discovery.sendtargets.auth.password_in = OutgoingPass
[root@linux4 ~]# systemctl restart iscsid.service 重启服务
[root@linux4 ~]# iscsiadm -m discovery -t st -p 10.4.220.103
10.4.220.103:3260,1 iqn.2024-12.lan.sdskills:server #发现门户
[root@linux4 ~]# iscsiadm -m node -l #连接服务端
Logging in to [iface: default, target: iqn.2024-12.lan.sdskills:server, portal: 10.4.220.103,3260]
Login to [iface: default, target: iqn.2024-12.lan.sdskills:server, portal: 10.4.220.103,3260] successful.
[root@linux4 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
loop0 7:0 0 8.2G 0 loop /mnt
sda 8:0 0 20G 0 disk #连接成功
sr0 11:0 1 1024M 0 rom
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 600M 0 part /boot/efi
├─vda2 252:2 0 1G 0 part /boot
└─vda3 252:3 0 98.4G 0 part
├─rl-root 253:0 0 63.5G 0 lvm /
├─rl-swap 253:1 0 3.9G 0 lvm [SWAP]
└─rl-home 253:2 0 31G 0 lvm /home
[root@linux4 ~]#
linux5同样的操作
(3)keepalived
Linux4和Linux5安装keepalive和httpd
【Linux4】
[root@linux4 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface enp1s0 网卡名称
virtual_router_id 51
priority 100 优先级
advert_int 1
authentication {
auth_type PASS
auth_pass 1122 密码
}
virtual_ipaddress {
10.4.220.90 虚拟地址
}
}
【Linux5】
[root@linux5 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface enp1s0 网卡
virtual_router_id 51
priority 80 优先级低于服务端
advert_int 1
authentication {
auth_type PASS
auth_pass 1122 密码
}
virtual_ipaddress {
10.4.220.90 虚拟地址
}
}
重启服务
[root@linux4 ~]# systemctl --now enable keepalived.service
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
[root@linux4 ~]# systemctl restart keepalived.service
[root@linux4 ~]# nmcli
enp1s0: connected to enp1s0
"Red Hat Virtio"
ethernet (virtio_net), 52:54:00:46:F2:B3, hw, mtu 1500
ip4 default
inet4 10.4.220.90/32 =========配置虚拟地址会被指定的网卡识别到
inet4 10.4.220.104/24
route4 default via 10.4.220.1 metric 100
route4 10.4.220.0/24 metric 100
inet6 fe80::5054:ff:fe46:f2b3/64
route6 fe80::/64 metric 1024
Linux4和Linux5配置apahce服务
<VirtualHost *:80>
ServerName www.sdskills.lan
DocumentRoot /var/www/html
<Location />
require all granted
</Location>
</VirtualHost>
访问
[root@linux4 ~]# curl http://www.sdskills.lan =====这个域名在前面dns配置过
HelloLinuxCluster #访问成功
6,Kubernetes
!由于K8s的内容较多较难在这里不在多写(主要是应为作者懒)
7,Base Shell脚本
generate_password() {
openssl rand -base64 8 #调用openssl函数生成一个Base64编码字符串 6字节随机数据
} #Base编码会将3个字节转换成4个字符,题目要求共8位密码所以在这
里设置6个字节刚好对应8个字符
for i in {1..10}; do
USERNAME="user$i" 创建用户user1-user10
PASSWORD=$(generate_password) #调用上面的函数
useradd -m -s /bin/bash "$USERNAME" #分配主组
echo "$USERNAME:$PASSWORD" | chpasswd #写入用户名和密码
echo "Created $USERNAME with password: $PASSWORD" #在终端输输出信息
done
标签:lan,ok,17,运维,2024,ansible,sdskills,root,10.4 From: https://blog.csdn.net/2301_79365266/article/details/144599686