本文共 12699 字,大约阅读时间需要 42 分钟。
实验环境
server7 nginx主机
server8 http
server9 http
server10 nginx
[root@server7 ~]# tar zxf nginx-1.12.0.tar.gz
[root@server7 ~]# lsnginx-1.12.0 nginx-1.12.0.tar.gz varnish[root@server7 ~]# cd nginx-1.12.0[root@server7 nginx-1.12.0]# lsauto CHANGES.ru configure html man srcCHANGES conf contrib LICENSE README[root@server7 nginx-1.12.0]# cd src/core/[root@server7 core]# vim nginx.h #去掉版本号[root@server7 core]# cd /root/nginx-1.12.0/auto/cc/[root@server7 cc]# vim gcc 注释debug[root@server7 cc]# yum install -y pcre-devel openssl-devel zlib-devel gcc#安装依赖性[root@server7 nginx-1.12.0]# ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module[root@server7 nginx-1.12.0]# make && make install #编译完成[root@server7 nginx-1.12.0]# cd /usr/local/nginx/sbin/[root@server7 sbin]# lsnginx[root@server7 sbin]# ./nginx 查看端口:浏览器访问
添加用户[root@server7 sbin]# useradd -u 800 nginx[root@server7 sbin]# id nginx uid=800(nginx) gid=800(nginx) 组=800(nginx)[root@server7 sbin]# cd /usr/local/nginx/conf/[root@server7 conf]# vim nginx.conf做软连接[root@server7 conf]# cd /usr/local/nginx/sbin/
[root@server7 sbin]# lsnginx[root@server7 sbin]# ln -s /usr/local/nginx/sbin/nginx /usr/local/sbin/[root@server7 sbin]# which nginx /usr/local/sbin/nginx[root@server7 sbin]# nginx -s reload[root@server7 ~]# vim /usr/local/nginx/conf/nginx.conf[root@server7 ~]# vim /etc/security/limits.conf
[nginx@server7 ~]$ ulimit -a
core file size (blocks, -c) 0data seg size (kbytes, -d) unlimitedscheduling priority (-e) 0file size (blocks, -f) unlimitedpending signals (-i) 7812max locked memory (kbytes, -l) 64max memory size (kbytes, -m) unlimitedopen files (-n) 65535pipe size (512 bytes, -p) 8POSIX message queues (bytes, -q) 819200real-time priority (-r) 0stack size (kbytes, -s) 10240cpu time (seconds, -t) unlimitedmax user processes (-u) 1024virtual memory (kbytes, -v) unlimitedfile locks (-x) unlimited[root@server7 ~]# vim /usr/local/nginx/conf/nginx.conf制作证书[root@server7 ~]# cd /etc/pki/tls/certs/[root@server7 certs]# lsca-bundle.crt ca-bundle.trust.crt make-dummy-cert Makefile renew-dummy-cert[root@server7 certs]# pwd/etc/pki/tls/certs[root@server7 certs]# make cert.pem[root@server7 tls]# mv -r cert.pem /usr/local/nginx/conf/[root@server7 tls]# nginx -s reload #发现报错nginx: [emerg] SSL_CTX_use_PrivateKey_file("/usr/local/nginx/conf/cert.key") failed (SSL: error:02001002:system library:fopen:No such file or directory:fopen('/usr/local/nginx/conf/cert.key','r') error:20074002:BIO routines:FILE_CTRL:system lib error:140B0002:SSL routines:SSL_CTX_use_PrivateKey_file:system lib)nginx: configuration file /usr/local/nginx/conf/nginx.conf test failed原来配置文件证书中默认是cert.key 这里生成cert.pem 故改之[root@server7 tls]# nginx -tnginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is oknginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful[root@server7 tls]# nginx -s reload #成功浏览器访问加模块[root@server7 tls]# cd /usr/local/nginx/conf/[root@server7 conf]# vim nginx.conf[root@server7 conf]# nginx -s reload加入http虚拟主机[root@server7 conf]# vim nginx.conf新建目录,重新加载服务
[root@server7 conf]# mkdir /www1[root@server7 conf]# cd /www1/[root@server7 www1]# ls[root@server7 www1]# vim index.html[root@server7 www1]# nginx -s reload物理机加解析测试:[root@localhost ~]# curl www.cara.orgcara1.............[root@server7 conf]# vim nginx.conf[root@server7 conf]# nginx -s reload物理机加解析测试:[root@localhost ~]# curl bbs.cara.orgcara2.............#负载均衡[root@server7 conf]# nginx -s reloadserver9.server8 安装httpd服务:[root@server8 ~]# vim /var/www/html/index.html [root@server8 ~]# cat /var/www/html/index.html <h1>server8<h1>[root@server9 html]# cat /var/www/html/index.html <h1>server 9 </h1>物理机测试:可通过加入不同的参数,实现不同的需求高可用[root@server7 local]# scp -r nginx/ server10:/usr/local
[root@server10 ~]# cd /usr/local/[root@server10 local]# lsbin etc games include lib lib64 libexec nginx sbin share src[root@server10 local]# useradd -u 800 nginx[root@server10 local]# id nginxuid=800(nginx) gid=800(nginx) 组=800(nginx)server7.10均安装ricci服务(系统自带高可用包,需yum源添加),设置密码,设置为开机启动[root@server7 local]# yum install -y ricci
[root@server7 local]# passwd ricci更改用户 ricci 的密码 。新的 密码:无效的密码: 它基于字典单词无效的密码: 过于简单重新输入新的 密码:passwd: 所有的身份验证令牌已经成功更新。[root@server7 local]# /etc/init.d/ricci startStarting oddjobd: [ OK ]generating SSL certificates... doneGenerating NSS database... done启动 ricci: [确定][root@server7 local]# chkconfig ricci on[root@server7 local]# yum install -y luci [root@server7 local]# /etc/init.d/luci startAdding following auto-detected host IDs (IP addresses/domain names), corresponding toserver7' address, to the configuration of self-managed certificate
/var/lib/luci/etc/cacert.config' (you can change them by editing /var/lib/luci/etc/cacert.config', removing the generated certificate
/var/lib/luci/certs/host.pem' and restarting luci):(none suitable found, you can still do it manually as mentioned above) Generating a 2048 bit RSA private key
writing new private key to '/var/lib/luci/certs/host.pem'Start luci... [确定]Point your web browser to (or equivalent) to access luci[root@server7 local]# chkconfig luci on浏览器访问,做好解析,用root用户进入,添加节点 查看集群用物理机安装fence 控制断电。物理机安装:[root@localhost ~]# rpm -qa |grep fencelibxshmfence-1.2-1.el7.x86_64fence-virtd-multicast-0.3.0-16.el7.x86_64fence-virtd-libvirt-0.3.0-16.el7.x86_64fence-virtd-0.3.0-16.el7.x86_64[root@localhost ~]# fence_virtd -cModule search path [/usr/lib64/fence-virt]:Available backends:
libvirt 0.1Available listeners:multicast 1.2Listener modules are responsible for accepting requests
from fencing clients.Listener module [multicast]:
The multicast listener module is designed for use environments
where the guests and hosts may communicate over a network usingmulticast.The multicast address is the address that a client will use to
send fencing requests to fence_virtd.Multicast IP Address [225.0.0.12]:
Using ipv4 as family.
Multicast IP Port [1229]:
Setting a preferred interface causes fence_virtd to listen only
on that interface. Normally, it listens on all interfaces.In environments where the virtual machines are using the hostmachine as a gateway, this must be set (typically to virbr0).Set to 'none' for no interface.Interface [virbr0]: br0
The key file is the shared key information which is used to
authenticate fencing requests. The contents of this file mustbe distributed to each physical host and virtual machine withina cluster.Key File [/etc/cluster/fence_xvm.key]:
Backend modules are responsible for routing requests to
the appropriate hypervisor or management layer.Backend module [libvirt]:
Configuration complete.
=== Begin Configuration ===
backends { libvirt { uri = "qemu:///system";}}
listeners {
multicast { port = "1229";family = "ipv4";interface = "br0";address = "225.0.0.12";key_file = "/etc/cluster/fence_xvm.key";}}
fence_virtd {
module_path = "/usr/lib64/fence-virt";backend = "libvirt";listener = "multicast";}=== End Configuration ===
Replace /etc/fence_virt.conf with the above [y/N]? y[root@localhost ~]# mkdir -p /etc/cluster/ [root@localhost ~]# cd /etc/cluster/[root@localhost cluster]# dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=128 count=1 记录了1+0 的读入记录了1+0 的写出128字节(128 B)已复制,0.000668548 秒,191 kB/秒[root@localhost cluster]# lsfence_xvm.key[root@localhost cluster]# scp fence_xvm.key server7:/etc/cluster/root@server7's password: fence_xvm.key 100% 128 0.1KB/s 00:00 [root@localhost cluster]# scp fence_xvm.key server10:/etc/cluster/root@server10's password: fence_xvm.key 100% 128 0.1KB/s 00:00 建立fence:回到 Nodes,并选择 server7粘贴uuidserver10同server7[root@server7 cluster]# cd [root@server7 ~]# vim /etc/init.d/nginx#!/bin/sh##
#
. /etc/rc.d/init.d/functions
if [ -L $0 ]; then
initscript=/bin/readlink -f $0
elseinitscript=$0fi #sysconfig=/bin/basename $initscript
#if [ -f /etc/sysconfig/$sysconfig ]; then
#fi
nginx=${NGINX-/usr/local/nginx/sbin/nginx}
prog=/bin/basename $nginx
conffile=${CONFFILE-/usr/local/nginx/conf/nginx.conf}lockfile=${LOCKFILE-/var/lock/subsys/nginx}pidfile=${PIDFILE-/usr/local/nginx/logs/nginx.pid}SLEEPMSEC=${SLEEPMSEC-200000}UPGRADEWAITLOOPS=${UPGRADEWAITLOOPS-5}RETVAL=0 start() {
echo -n $"Starting $prog: "daemon --pidfile=${pidfile} ${nginx} -c ${conffile}RETVAL=$?echo[ $RETVAL = 0 ] && touch ${lockfile}return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "killproc -p ${pidfile} ${prog}RETVAL=$?echo[ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}}reload() {
echo -n $"Reloading $prog: "killproc -p ${pidfile} ${prog} -HUPRETVAL=$?echo}upgrade() {
oldbinpidfile=${pidfile}.oldbinconfigtest -q || returnecho -n $"Starting new master $prog: "killproc -p ${pidfile} ${prog} -USR2echofor i in `/usr/bin/seq $UPGRADEWAITLOOPS`; do /bin/usleep $SLEEPMSEC if [ -f ${oldbinpidfile} -a -f ${pidfile} ]; then echo -n $"Graceful shutdown of old $prog: " killproc -p ${oldbinpidfile} ${prog} -QUIT RETVAL=$? echo return fidoneecho $"Upgrade failed!"RETVAL=1
}
configtest() {
if [ "$#" -ne 0 ] ; thencase "$1" in-q)FLAG=$1;;*);;esacshiftfi${nginx} -t -c ${conffile} $FLAGRETVAL=$?return $RETVAL}rh_status() {
status -p ${pidfile} ${nginx}}case "$1" in
start)rh_status >/dev/null 2>&1 && exit 0start;;stop)stop;;status)rh_statusRETVAL=$?;;restart)configtest -q || exit $RETVALstopstart;;upgrade)rh_status >/dev/null 2>&1 || exit 0upgrade;;condrestart|try-restart)if rh_status >/dev/null 2>&1; thenstopstartfi;;force-reload|reload)reload;;configtest)configtest;;*)echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|upgrade|reload|status|help|configtest}"RETVAL=2esacexit $RETVAL
[root@server7 ~]# chmod +x /etc/init.d/nginx
[root@server7 ~]# /etc/init.d/nginx start正在启动 nginx: [确定][root@server7 init.d]# scp nginx server10:/etc/init.d/[root@server7 ~]# clustatCluster Status for luci @ Wed Jul 4 22:00:02 2018Member Status: QuorateMember Name ID Status
server7 1 Online, Local, rgmanager
server10 2 Online, rgmanagerService Name Owner (Last) State
service:nginx server7 started
[root@server7 ~]# clusvcadm -r nginx -m server10 Trying to relocate service:nginx to server10...Successservice:nginx is now running on server10##将服务转移到server10上 粗体nginx为建立时的组名[root@server7 ~]# clustat
Cluster Status for luci @ Wed Jul 4 22:07:31 2018Member Status: QuorateMember Name ID Status
server7 1 Online, Local, rgmanager
server10 2 Online, rgmanagerService Name Owner (Last) State
service:nginx server10 started
网页来回测试[root@server10 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00inet 127.0.0.1/8 scope host loinet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000link/ether 52:54:00:6d:4b:07 brd ff:ff:ff:ff:ff:ffinet 172.25.35.10/24 brd 172.25.35.255 scope global eth0inet 172.25.35.200/24 scope global secondary eth0inet6 fe80::5054:ff:fe6d:4b07/64 scope link valid_lft forever preferred_lft forever私有vip地址会随master漂移##使内核崩溃,测试fence是否生效
[root@server10 ~]# echo c > /proc/sysrq-trigger 自动回到7上[root@server7 ~]# clustat Cluster Status for luci @ Wed Jul 4 22:22:25 2018Member Status: QuorateMember Name ID Status
server7 1 Online, Local, rgmanager
server10 2 Online, rgmanagerService Name Owner (Last) State
service:nginx server7 started
[root@server7 ~]# ip ad1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00inet 127.0.0.1/8 scope host loinet6 ::1/128 scope host valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000link/ether 52:54:00:09:2d:4d brd ff:ff:ff:ff:ff:ffinet 172.25.35.7/24 brd 172.25.35.255 scope global eth0inet 172.25.35.200/24 scope global secondary eth0inet6 fe80::5054:ff:fe09:2d4d/64 scope link valid_lft forever preferred_lft forevervip地址也漂移过来了转载于:https://blog.51cto.com/13810716/2134044