-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcontroller-managed.scenario
149 lines (114 loc) · 6.17 KB
/
controller-managed.scenario
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
= VARIABLES =
PHD_VAR_deployment
PHD_VAR_osp_configdir
PHD_VAR_network_domain
PHD_VAR_network_internal
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
if [ $PHD_VAR_deployment = segregated ]; then
echo "We don't document managed compute nodes in a segregated environment yet"
# Certainly none of the location constraints would work and the
# resource-discovery options are mostly redundant
exit 1
fi
mkdir -p /etc/pacemaker
cp $PHD_VAR_osp_configdir/pcmk-authkey /etc/pacemaker/authkey
....
target=$PHD_ENV_nodes1
....
# Take down the ODP control plane
pcs resource disable keystone
# Force services to run only on nodes with osprole = controller
#
# Importantly it also tells Pacemaker not to even look for the services on other
# nodes. This helps reduce noise and collisions with services that fill the same role
# on compute nodes.
for i in $(cibadmin -Q --xpath //primitive --node-path | tr ' ' '\n' | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq | grep -v "\-fence") ; do pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller ; done
# Now (because the compute nodes have roles assigned to them and keystone is
# stopped) it is safe to define the services that will run on the compute nodes
# We use --force to allow the resources to be created even they don't exist on the local machine
# Since we know they do exist on the compute nodes
pcs resource create neutron-openvswitch-agent-compute systemd:neutron-openvswitch-agent --clone interleave=true --disabled --force
pcs constraint location neutron-openvswitch-agent-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs resource create libvirtd-compute systemd:libvirtd --clone interleave=true --disabled --force
pcs constraint location libvirtd-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs resource create ceilometer-compute systemd:openstack-ceilometer-compute --clone interleave=true --disabled --force
pcs constraint location ceilometer-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
# NovaCompute will attempt to suck in common authorization details from nova.conf
#
# Depending on how your environment is set up, you may need to supply
# these details as parameters to the nova-compute resource instead
pcs resource create nova-compute ocf:openstack:NovaCompute user_name=admin tenant_name=admin password=keystonetest domain=${PHD_VAR_network_domain} --clone interleave=true notify=true --disabled --force
pcs resource op add nova-compute notify timeout=600s
pcs constraint location nova-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs constraint order start neutron-server-clone then neutron-openvswitch-agent-compute-clone require-all=false
pcs constraint order start neutron-openvswitch-agent-compute-clone then libvirtd-compute-clone
pcs constraint colocation add libvirtd-compute-clone with neutron-openvswitch-agent-compute-clone
pcs constraint order start libvirtd-compute-clone then ceilometer-compute-clone
pcs constraint colocation add ceilometer-compute-clone with libvirtd-compute-clone
pcs constraint order start ceilometer-notification-clone then ceilometer-compute-clone require-all=false
pcs constraint order start ceilometer-compute-clone then nova-compute-clone
pcs constraint colocation add nova-compute-clone with ceilometer-compute-clone
pcs constraint order start nova-conductor-clone then nova-compute-clone require-all=false
# Take advantage of the fact that control nodes will already be part of the cluster
# At this step, we need to teach the cluster about the compute nodes
#
# This requires running commands on the cluster based on the names of the compute nodes
controllers=$(cibadmin -Q -o nodes | grep uname | sed s/.*uname..// | awk -F\" '{print $1}')
#pcs stonith create fence-compute fence_apc ipaddr=east-apc login=apc passwd=apc pcmk_host_map="east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;" --force
pcs stonith create fence-compute fence_apc_snmp ipaddr=apc-ap7941-l2h3.mgmt.lab.eng.bos.redhat.com power_wait=10 pcmk_host_map="mrg-07:10;mrg-08:12;mrg-09:14"
for node in ${PHD_ENV_nodes}; do
found=0
short_node=$(echo ${node} | sed s/\\..*//g)
for controller in ${controllers}; do
if [ ${short_node} = ${controller} ]; then
pcs property set --node ${short_node} osprole=controller
found=1
fi
done
if [ $found = 0 ]; then
# We only want to execute the following _for_ the compute nodes, not _on_ the compute nodes
# Rather annoying
pcs resource create ${short_node} ocf:pacemaker:remote
pcs property set --node ${short_node} osprole=compute
# This step will /entirely/ depend on the hardware you have
# If you have IPMI, it might be like so:
# pcs stonith create fence-compute-${short_node} fence_ipmilan login="root" passwd="supersecret" ipaddr="192.168.1.1" pcmk_host_list="$node"
# Personally, I have an APC switch that can control all the compute nodes at once (so I created it outside of this loop already)
fi
done
pcs resource enable keystone
pcs resource enable neutron-openvswitch-agent-compute
pcs resource enable libvirtd-compute
pcs resource enable ceilometer-compute
pcs resource enable nova-compute
....