-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathgluster-demo.yml
315 lines (278 loc) · 8.53 KB
/
gluster-demo.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
# Pre-requisistes: user=flannery && smbuser=flannery
---
- hosts: all
gather_facts: no
tasks:
- pause:
prompt: "NEXT STEP - Install Gluster RPMs"
tags: [pause]
- name: "yum install glusterfs-server"
command: rpm -q redhat-storage-server
- pause:
prompt: |
Verify with: rpm -q redhat-storage-server
NEXT STEP - Configure firewall
tags: [pause]
- systemd:
name: firewalld
enabled: yes
state: started
- firewalld:
service: "{{ item }}"
permanent: true
immediate: true
state: enabled
with_items:
- ssh
- glusterfs
- nfs
- samba
- pause:
prompt: |
Verify with: firewall-cmd --list-all
NEXT STEP - Start Gluster daemon
tags: [pause]
- systemd:
name: glusterd
enabled: yes
state: started
- pause:
prompt: |
Verify with: systemctl status glusterd
NEXT STEP - Create the Gluster Cluster (Trusted Storage Pool)
tags: [pause]
- name: "Create the Gluster Cluster (Trusted Storage Pool)"
command: gluster peer probe {{ item }}
run_once: yes
with_items:
- "{{ play_hosts }}"
- pause:
prompt: |
Verify with: gluster pool list
NEXT STEP - Create our PVs, VGs, and LVs
tags: [pause]
- lvg:
vg: vg_{{ item }}
pvs: /dev/{{ item }}
#TODO vg_options:
#TODO pv_options:
with_items: [sdb,sdc,sdd]
- lvol:
vg: vg_{{ item }}
lv: tp_{{ item }}
size: 100%FREE
opts: --thin
with_items: [sdb,sdc,sdd]
#TODO - name: Create lvmcache device
- command: lvcreate --virtualsize 10GB -T vg_{{ item }}/tp_{{ item }} -n lv_{{ item }}
with_items: [sdb,sdc,sdd]
- pause:
prompt: |
Verify with: pvs && vgs && lvs
NEXT STEP - Format and mount the bricks
tags: [pause]
- filesystem:
fstype: xfs
dev: /dev/vg_{{ item }}/lv_{{ item }}
opts: -i size=512
with_items: [sdb,sdc,sdd]
- mount:
src: /dev/vg_{{ item }}/lv_{{ item }}
path: /rhgs/{{ item }}
fstype: xfs
opts: inode64,noatime,nodiratime
state: mounted
with_items: [sdb,sdc,sdd]
- file:
path: /rhgs/{{ item }}/{{ item }}
state: directory
with_items: [sdb,sdc,sdd]
- pause:
prompt: |
Verify with: mount | grep rhgs && tree /rhgs
NEXT STEP - Create a 3x replicated volume
tags: [pause]
- hosts: gluster1
gather_facts: no
tasks:
- name: "Create a 3x replicated volume"
gluster_volume:
state: present
name: vol1
# jcall - Disable auto-sharing for vol1, set 3x variables per Admin Guide
options:
features.uss: "on"
server.allow-insecure: "on"
performance.cache-samba-metadata: "on"
storage.batch-fsync-delay-usec: "0"
features.show-snapshot-directory: "on"
performance.stat-prefetch: "off"
replicas: 3
bricks: /rhgs/sdb/sdb
cluster: [ gluster1, gluster2, gluster3 ]
- name: "Set global snapshot variables"
command: gluster snapshot config snap-max-hard-limit 4 --mode=script
- command: gluster snapshot config auto-delete enable --mode=script
- command: gluster snapshot config activate-on-create enable --mode=script
- name: "Update /etc/samba/smb.conf with 'vol1' details"
blockinfile:
path: /etc/samba/smb.conf
insertafter: EOF
block: |
[gluster-vol1]
comment = Created by John Call
guest ok = Yes
path = /
read only = No
vfs objects = shadow_copy2 glusterfs
shadow:basedir = /
shadow:snapdir = /.snaps
shadow:snapprefix = ^.*$
shadow:format = _GMT-%Y.%m.%d-%H.%M.%S
glusterfs:loglevel = 7
glusterfs:logfile = /var/log/samba/glusterfs-vol1.%M.log
glusterfs:volume = vol1
backup: no
- systemd: unit=smb.service state=restarted
- pause:
prompt: |
Verify with: gluster vol list
NEXT STEP - Mount our volume
tags: [pause]
- mount:
src: gluster1:vol1
path: /gluster/vol1
fstype: glusterfs
opts: acl
state: mounted
- acl:
path: /gluster/vol1
entry: "user:flannery:rwX"
state: present
- acl:
path: /gluster/vol1
entry: "default:user:flannery:rwX"
state: present
- file:
owner: flannery
group: flannery
path: /gluster/vol1
- pause:
prompt: |
Verify with: mount | grep gluster && df -h /gluster/vol1
goto windows and map the network drive \\gluster1\gluster-vol1
NEXT STEP - Copy our pictures into the volume
tags: [pause]
- copy:
src: /root/pictures
dest: /gluster/vol1
owner: flannery
group: flannery
- pause:
prompt: |
Verify with: ls -al /gluster/vol1/pictures/
NEXT STEP - Make our volume bigger
tags: [pause]
- name: "Make our volume bigger"
command: |
gluster vol add-brick vol1 \
gluster1:/rhgs/sdc/sdc \
gluster2:/rhgs/sdc/sdc \
gluster3:/rhgs/sdc/sdc \
- command: gluster vol stop vol1 --mode=script
- command: gluster vol start vol1 --mode=script
- name: "Update /etc/samba/smb.conf with 'vol1' details"
blockinfile:
path: /etc/samba/smb.conf
insertafter: EOF
block: |
[gluster-vol1]
comment = Created by John Call
guest ok = Yes
path = /
read only = No
vfs objects = shadow_copy2 glusterfs
shadow:basedir = /
shadow:snapdir = /.snaps
shadow:snapprefix = ^.*$
shadow:format = _GMT-%Y.%m.%d-%H.%M.%S
glusterfs:loglevel = 7
glusterfs:logfile = /var/log/samba/glusterfs-vol1.%M.log
glusterfs:volume = vol1
backup: no
- systemd: unit=smb.service state=restarted
- pause:
prompt: |
Verify with: df -h /gluster/vol1
NEXT STEP - Create a snapshot
tags: [pause]
- name: "Create a snapshot"
command: gluster snapshot create demo vol1
register: result
- set_fact:
snapName: "{{ result.stdout | regex_search('(demo.*[0-9])') }}"
- debug: var=snapName
- name: "Mount the snapshot"
mount:
src: gluster1:/snaps/{{ snapName }}/vol1
path: /gluster/vol1-snaps
fstype: glusterfs
opts: acl
state: mounted
- pause:
prompt: |
Verify with: gluster snapshot list vol1 && tree /gluster/vol1*
rm -f /gluster/vol1/pictures/N* && tree /gluster/vol1*
NEXT STEP - Enable self-service snapshots
tags: [pause]
- name: "Enable self-service snapshots"
command: gluster vol set vol1 features.uss enable
- name: "Re-mount the volume"
mount: src=gluster1:vol1 path=/gluster/vol1 fstype=glusterfs state=unmounted
- name: "Re-mount the volume"
mount: src=gluster1:vol1 path=/gluster/vol1 fstype=glusterfs state=mounted
- systemd: unit=smb.service state=restarted
delegate_to: gluster1
- pause:
prompt: |
Verify with: tree /gluster/vol1/.snaps/{{ snapName }}
windows explorer \\gluster1\gluster-vol1\.snaps\{{ snapName }}
windows explorer right click on pictures/restore previous versions
NEXT STEP - Convert the snapshot to a writeable clone
tags: [pause]
- name: "Convert the snapshot to a writeable clone"
command: gluster snapshot clone vol1-clone {{ snapName }}
register: result
until: "'success' in result.stdout"
retries: 3
delay: 10
- name: "Enable SMB/CIFS auto-share on clone"
command: gluster vol set vol1-clone user.cifs enable
- name: "Start the clone"
command: gluster vol start vol1-clone
- name: "Mount the clone"
mount:
src: gluster1:vol1-clone
path: /gluster/vol1-clone
fstype: glusterfs
state: mounted
- systemd: unit=smb.service state=restarted
- pause:
prompt: |
Verify with: tree /gluster/vol1-clone
windows add network location \\gluster1\gluster-vol1-clone
ALL DONE!
tags: [pause]
#### TODO ####
# grow cluster
# show failover with glusterfs
# add ctdb and show node failover for CIFS
# show disk failure
# add geo-replicate
# add compression/dedupe?
## Cleanup smb.conf
# egrep -v '^;|^$|^#|^[[:space:]]#' /etc/samba/smb.conf
## Configure RHVM to look for bricks in our directory
# engine-config -g GlusterDefaultBrickMountPoint (default: /rhgs)
# engine-config -s GlusterDefaultBrickMountPoint=/gluster_bricks
# systemctl restart ovirt-engine