salt
[[email protected] images]# cp -r rhel6 /var/www/html/
server1,server2,server3:
[[email protected] ~]# vim /etc/yum.repos.d/rhel-source.repo
[salt]
baseurl=http://172.25.61.250/rhel6
gpgcheck=0
[[email protected] ~]# yum install salt-minion -y
[[email protected] ~]# cd /etc/salt/
[[email protected] salt]# vim minion
master: 172.25.61.1
[[email protected] ~]# /etc/init.d/salt-minion start
Server1:
[[email protected] ~]# vim /etc/yum.repos.d/rhel-source.repo
[salt]
baseurl=http://172.25.61.250/rhel6
gpgcheck=0
[[email protected] ~]# yum install -y salt-master
[[email protected] ~]# /etc/init.d/salt-master start
[[email protected] ~]# salt-key -L
Accepted Keys:
Denied Keys:
Unaccepted Keys:
server2
Rejected Keys:
[[email protected] ~]# salt-key -A
The following keys are going to be accepted:
Unaccepted Keys:
server2
Proceed? [n/Y] Y
Key for minion server2 accepted.
[[email protected] ~]# salt-key -L
Accepted Keys:
server2
Denied Keys:
Unaccepted Keys:
Rejected Keys:
[[email protected] ~]# salt server2 test.ping
server2:
True
[[email protected] srv]# vim /etc/salt/master
file_roots:
base:
- /srv/salt
[[email protected] srv]# mkdir /srv/salt/
[[email protected] srv]# /etc/init.d/salt-master restart
Stopping salt-master daemon: [ OK ]
Starting salt-master daemon: [ OK ]
[[email protected] pkgs]# vim /srv/salt/pkgs/make.sls
gcc:
pkg.installed:
- pkgs:
- gcc
- pcre-devel
- openssl-devel
apache:
[[email protected] apache]# vim /srv/salt/apache/install.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /var/www/html/index.php
- source: salt://apache/files/index.php
- mode: 644
- user: root
- group: root
[[email protected] apache]# vim /srv/salt/apache/service.sls
include:
- apache.install
apache-service:
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://apache/files/httpd.conf
service.running:
- name: httpd
- enable: True
- reload: True
- watch:
- file: apache-service
[[email protected] apache]# vim /srv/salt/apache/files/httpd.conf
[[email protected] apache]# vim /srv/salt/apache/files/index.php
<?php
phpinfo()
?>
[[email protected] apache]# salt server3 state.sls apache.service
haproxy:
[[email protected] haproxy]# vim /srv/salt/users/haproxy.sls
haproxy-group:
group.present:
- name: haproxy
- gid: 200
haproxy:
user.present:
- uid: 200
- gid: 200
- home: /usr/local/haproxy
- createhome: False
- shell: /sbin/nologin
[[email protected] haproxy]# vim /srv/salt/haproxy/install.sls
include:
- pkgs.make
- users.haproxy
haproxy-install:
file.managed:
- name: /mnt/haproxy-1.6.11.tar.gz
- source: salt://haproxy/files/haproxy-1.6.11.tar.gz
cmd.run:
- name: cd /mnt && tar zxf haproxy-1.6.11.tar.gz && cd haproxy-1.6.11 && make TARGET=linux2628 UES_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 PREFIX=/usr/local/haproxy && make TARGET=linux2628 UES_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 PREFIX=/usr/local/haproxy install
- creates: usr/local/haproxy
/etc/haproxy:
file.directory:
- mode: 755
/usr/sbin/haproxy:
file.symlink:
- target: /usr/local/haproxy/sbin/haproxy
[[email protected] haproxy]# vim /srv/salt/haproxy/service.sls
include:
- haproxy.install
/etc/haproxy/haproxy.cfg:
file.managed:
- source: salt://haproxy/files/haproxy.cfg
haproxy-service:
file.managed:
- name: /etc/init.d/haproxy
- source: salt://haproxy/files/haproxy.init
- mode: 755
service.running:
- name: haproxy
- enable: True
- reload: True
- watch:
- file: /etc/haproxy/haproxy.cfg
[[email protected] haproxy]# vim /srv/salt/haproxy/files/haproxy.init
#!/bin/sh
#
# chkconfig: - 85 15
# description: HA-Proxy is a TCP/HTTP reverse proxy which is particularly suited \
# for high availability environments.
# processname: haproxy
# config: /etc/haproxy/haproxy.cfg
# pidfile: /var/run/haproxy.pid
# Script Author: Simon Matter <[email protected]>
# Version: 2004060600
# Source function library.
if [ -f /etc/init.d/functions ]; then
. /etc/init.d/functions
elif [ -f /etc/rc.d/init.d/functions ] ; then
. /etc/rc.d/init.d/functions
else
exit 0
fi
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ ${NETWORKING} = "no" ] && exit 0
# This is our service name
BASENAME=`basename $0`
if [ -L $0 ]; then
BASENAME=`find $0 -name $BASENAME -printf %l`
BASENAME=`basename $BASENAME`
fi
BIN=/usr/sbin/$BASENAME
CFG=/etc/$BASENAME/$BASENAME.cfg
[ -f $CFG ] || exit 1
PIDFILE=/var/run/$BASENAME.pid
LOCKFILE=/var/lock/subsys/$BASENAME
RETVAL=0
start() {
quiet_check
if [ $? -ne 0 ]; then
echo "Errors found in configuration file, check it with '$BASENAME check'."
return 1
fi
echo -n "Starting $BASENAME: "
daemon $BIN -D -f $CFG -p $PIDFILE
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch $LOCKFILE
return $RETVAL
}
stop() {
echo -n "Shutting down $BASENAME: "
killproc $BASENAME -USR1
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f $LOCKFILE
[ $RETVAL -eq 0 ] && rm -f $PIDFILE
return $RETVAL
}
restart() {
quiet_check
if [ $? -ne 0 ]; then
echo "Errors found in configuration file, check it with '$BASENAME check'."
return 1
fi
stop
start
}
reload() {
if ! [ -s $PIDFILE ]; then
return 0
fi
quiet_check
if [ $? -ne 0 ]; then
echo "Errors found in configuration file, check it with '$BASENAME check'."
return 1
fi
$BIN -D -f $CFG -p $PIDFILE -sf $(cat $PIDFILE)
}
check() {
$BIN -c -q -V -f $CFG
}
quiet_check() {
$BIN -c -q -f $CFG
}
rhstatus() {
status $BASENAME
}
condrestart() {
[ -e $LOCKFILE ] && restart || :
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
reload)
reload
;;
condrestart)
condrestart
;;
status)
rhstatus
;;
check)
check
;;
*)
echo $"Usage: $BASENAME {start|stop|restart|reload|condrestart|status|check}"
exit 1
esac
exit $?
[[email protected] haproxy]# vim /srv/salt/haproxy/files/haproxy.cfg
#
# This is a sample configuration. It illustrates how to separate static objects
# traffic from dynamic traffic, and how to dynamically regulate the server load.
#
# It listens on 192.168.1.10:80, and directs all requests for Host 'img' or
# URIs starting with /img or /css to a dedicated group of servers. URIs
# starting with /admin/stats deliver the stats page.
#
global
maxconn 10000
stats socket /var/run/haproxy.stat mode 600 level admin
log 127.0.0.1 local0
uid 200
gid 200
chroot /var/empty
daemon
defaults
mode http
log global
option httplog
option dontlognull
monitor-uri /monitoruri
maxconn 8000
timeout client 30s
stats uri /admin/stats
retries 2
option redispatch
timeout connect 5s
timeout server 5s
timeout queue 30s
# The public 'www' address in the DMZ
frontend public
bind *:80 name clear
#bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem
#use_backend static if { hdr_beg(host) -i img }
#use_backend static if { path_beg /img /css }
default_backend static
# The static backend backend for 'Host: img', /img and /css.
backend static
balance roundrobin
#option httpchk HEAD /favicon.ico
server statsrv1 172.25.61.2:80 check inter 1000
server statsrv2 172.25.61.3:80 check inter 1000
# the application servers go here
backend dynamic
mode http
balance roundrobin
retries 2
option redispatch
timeout connect 5s
timeout server 30s
timeout queue 30s
option httpchk HEAD /login.php
cookie DYNSRV insert indirect nocache
fullconn 4000 # the servers will be used at full load above this number of connections
server dynsrv1 192.168.1.1:80 minconn 50 maxconn 500 cookie s1 check inter 1000
server dynsrv2 192.168.1.2:80 minconn 50 maxconn 500 cookie s2 check inter 1000
server dynsrv3 192.168.1.3:80 minconn 50 maxconn 500 cookie s3 check inter 1000
server dynsrv4 192.168.1.4:80 minconn 50 maxconn 500 cookie s4 check inter 1000
[[email protected] files]# salt server1 states.sls haproxy.service
nginx:
[[email protected] nginx]# vim /srv/salt/nginx/install.sls
include:
- pkgs.make
nginx-install:
pkg.installed:
- pkgs:
- gcc
- pcre-devel
- openssl-devel
file.managed:
- name: /mnt/nginx-1.14.0.tar.gz
- source: salt://nginx/files/nginx-1.14.0.tar.gz
cmd.run:
- name: cd /mnt && tar zxf nginx-1.14.0.tar.gz && cd nginx-1.14.0 && sed -i.bak 's/CFLAGS="$CFLAGS -g"/#CFLAGS="$CFLAGS -g"/g' auto/cc/gcc && sed -i.bak 's/#define NGINX_VER "nginx\/" NGINX_VERSION/#define NGINX_VER "nginx"/g' src/core/nginx.h && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module --with-threads --with-file-aio &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/nginx
[[email protected] nginx]# vim /srv/salt/nginx/service.sls
include:
- nginx.install
/usr/local/nginx/conf/nginx.conf:
file.managed:
- source: salt://nginx/files/nginx.conf
nginx-service:
file.managed:
- name: /etc/init.d/nginx
- source: salt://nginx/files/nginx
- mode: 755
service.running:
- name: nginx
- enable: True
- reload: True
- watch:
- file: /usr/local/nginx/conf/nginx.conf
[[email protected] nginx]# vim /srv/salt/nginx/files/nginx
#!/bin/sh
#
# nginx - this script starts and stops the nginx daemon
#
# chkconfig: - 85 15
# description: Nginx is an HTTP(S) server, HTTP(S) reverse \
# proxy and IMAP/POP3 proxy server
# processname: nginx
# config: /usr/local/nginx/conf/nginx.conf
# pidfile: /usr/local/nginx/logs/nginx.pid
# Source function library.
. /etc/rc.d/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ "$NETWORKING" = "no" ] && exit 0
nginx="/usr/local/nginx/sbin/nginx"
prog=$(basename $nginx)
lockfile="/var/lock/subsys/nginx"
pidfile="/usr/local/nginx/logs/${prog}.pid"
NGINX_CONF_FILE="/usr/local/nginx/conf/nginx.conf"
start() {
[ -x $nginx ] || exit 5
[ -f $NGINX_CONF_FILE ] || exit 6
echo -n $"Starting $prog: "
daemon $nginx -c $NGINX_CONF_FILE
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
configtest_q || return 6
stop
start
}
reload() {
configtest_q || return 6
echo -n $"Reloading $prog: "
killproc -p $pidfile $prog -HUP
echo
}
configtest() {
$nginx -t -c $NGINX_CONF_FILE
}
configtest_q() {
$nginx -t -q -c $NGINX_CONF_FILE
}
rh_status() {
status $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
# Upgrade the binary with no downtime.
upgrade() {
local oldbin_pidfile="${pidfile}.oldbin"
configtest_q || return 6
echo -n $"Upgrading $prog: "
killproc -p $pidfile $prog -USR2
retval=$?
sleep 1
if [[ -f ${oldbin_pidfile} && -f ${pidfile} ]]; then
killproc -p $oldbin_pidfile $prog -QUIT
success $"$prog online upgrade"
echo
return 0
else
failure $"$prog online upgrade"
echo
return 1
fi
}
# Tell nginx to reopen logs
reopen_logs() {
configtest_q || return 6
echo -n $"Reopening $prog logs: "
killproc -p $pidfile $prog -USR1
retval=$?
echo
return $retval
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart|configtest|reopen_logs)
$1
;;
force-reload|upgrade)
rh_status_q || exit 7
upgrade
;;
reload)
rh_status_q || exit 7
$1
;;
status|status_q)
rh_$1
;;
condrestart|try-restart)
rh_status_q || exit 7
restart
;;
*)
echo $"Usage: $0 {start|stop|reload|configtest|status|force-reload|upgrade|restart|reopen_logs}"
exit 2
esac
[[email protected] nginx]# vim /srv/salt/nginx/files/nginx
[[email protected] files]# salt server3 states.sls nginx.service
[[email protected] salt]# vim /srv/salt/top.sls
base:
'server1':
- haproxy.service
'server3':
- nginx.service
'server2':
- apache.service
[[email protected] salt]# salt '*' state.highstate
grains:
[[email protected] files]# salt server2 grains.item os
server2:
----------
os:
RedHat
[[email protected] files]# salt server3 grains.item fqdn
server3:
----------
fqdn:
server3
[[email protected] files]# salt server3 grains.item roles
server3:
----------
roles:
[[email protected] ~]# vim /etc/salt/minion
grains:
roles:
- apache
[[email protected] ~]# /etc/init.d/salt-minion restart
Stopping salt-minion:root:server3 daemon: OK
Starting salt-minion:root:server3 daemon: OK
[[email protected] files]# salt server3 grains.item roles
server3:
----------
roles:
- apache
[[email protected] files]# salt server2 grains.item roles
server2:
----------
roles:
[[email protected] ~]# cd /etc/salt/
[[email protected] salt]# vim grains
roles: nginx
[[email protected] salt]# /etc/init.d/salt-minion restart
Stopping salt-minion:root:server2 daemon: OK
Starting salt-minion:root:server2 daemon: OK
[[email protected] files]# salt server2 grains.item roles
server2:
----------
roles:
nginx
[[email protected] salt]# mkdir _grains
[[email protected] salt]# cd _grains/
[[email protected] _grains]# ls
[[email protected] _grains]# vim my_grain.py
#!/usr/bin/env python
def my_grain():
grains = {}
grains['hello'] = 'world'
return grains
[[email protected] _grains]# salt '*' state.highstate
[[email protected] _grains]# salt server1 grains.item hello
server1:
----------
hello:
world
pirrar:
[[email protected] _grains]# vim /etc/salt/master
pillar_roots:
base:
- /srv/pillar
[[email protected] _grains]# /etc/init.d/salt-master restart
Stopping salt-master daemon: [ OK ]
Starting salt-master daemon: [ OK ]
[[email protected] _grains]# mkdir /srv/pillar
[[email protected] _grains]# cd /srv/pillar/
[[email protected] pillar]# mkdir web
[[email protected] pillar]# cd web/
[[email protected] pillar]# vim install.sls
{% if grains['fqdn'] == 'server2' %}
webserver: nginx
{% elif grains['fqdn'] == 'server3' %}
webserver: apache
{% endif %}
[[email protected] pillar]# vim top.sls
base:
'*':
- web.install
[[email protected] pillar]# salt '*' saltutil.refresh_pillar
server1:
True
server2:
True
server3:
True
[[email protected] pillar]# salt '*' pillar.item webserver
server1:
----------
webserver:
server3:
----------
webserver:
apache
server2:
----------
webserver:
nginx