From f7037a4a83cb026609a64e43047f99253975a644 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 13:27:01 -0600 Subject: [PATCH 01/55] [kvm_tacc] Add config keystone DB replication --- kolla/node_custom_config/galera.cnf | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 kolla/node_custom_config/galera.cnf diff --git a/kolla/node_custom_config/galera.cnf b/kolla/node_custom_config/galera.cnf new file mode 100644 index 00000000..b459ee1a --- /dev/null +++ b/kolla/node_custom_config/galera.cnf @@ -0,0 +1,13 @@ +[mysqld] +server-id = {{ db_replication_id }} +binlog-do-db = keystone +binlog-format = ROW +replicate-ignore-table = keystone.revocation_event +replicate-ignore-table = keystone.trust +replicate-ignore-table = keystone.trust_role +replicate-ignore-table = keystone.endpoint +replicate-ignore-table = keystone.endpoint_group +replicate-ignore-table = keystone.project_endpoint_group +replicate-ignore-table = keystone.service +replicate-ignore-table = keystone.region +replicate-ignore-db = mysql From dc72dd929cd87dad2d5b6aa3e5474ce8d5316e08 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 14:01:01 -0600 Subject: [PATCH 02/55] [kvm] Add toggles for blazar and serialconsole options in Nova --- kolla/node_custom_config/nova.conf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index cf7c662b..1b2801f5 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -64,10 +64,12 @@ rpc_conn_pool_size = 300 max_age = 0 until_refresh = 0 reservation_expire = 86400 +{% if enable_blazar | bool %} # Remove quota limits for hosts; these are handled via Blazar instances = -1 cores = -1 ram = -1 +{% endif %} # https://docs.openstack.org/nova/rocky/configuration/config.html#quota.recheck_quota # > This defaults to True (recheck quota after resource creation) but can be set # to False to avoid additional load if allowing quota to be exceeded because @@ -85,6 +87,7 @@ max_attempts = 50 # service polls the Bare Metal service for node information. discover_hosts_in_cells_interval = 120 +{% if enable_nova_serialconsole_proxy | bool %} [serial_console] base_url = wss://{{ kolla_external_fqdn }}:{{ nova_serialproxy_port }}/ @@ -92,6 +95,7 @@ base_url = wss://{{ kolla_external_fqdn }}:{{ nova_serialproxy_port }}/ [console] allowed_origins = "{{ nova_console_allowed_origins }}" {% endif %} +{% endif %} {% if service_name == "nova-api" %} # Custom vendordata service From 158e357c116d0f814f4961876d5f29dab0635042 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 14:04:27 -0600 Subject: [PATCH 03/55] [kvm] Remove "create_volume" as default for instance launch --- kolla/node_custom_config/horizon/custom_local_settings | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kolla/node_custom_config/horizon/custom_local_settings b/kolla/node_custom_config/horizon/custom_local_settings index 4a0b343f..2eb5e5af 100644 --- a/kolla/node_custom_config/horizon/custom_local_settings +++ b/kolla/node_custom_config/horizon/custom_local_settings @@ -74,12 +74,6 @@ SSO_CALLBACK_HOST = '{{ kolla_external_fqdn }}' # A dictionary of settings which can be used to provide the default values for # properties found in the Launch Instance modal. LAUNCH_INSTANCE_DEFAULTS = { - 'config_drive': False, - 'enable_scheduler_hints': True, - 'disable_image': False, - 'disable_instance_snapshot': True, - 'disable_volume': True, - 'disable_volume_snapshot': True, 'create_volume': False, } From c9712c55b2b69c72480d6a46a365be1162429030 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 14:07:42 -0600 Subject: [PATCH 04/55] [kvm] Add detection for Neutron options tunnel_types and mechanism_drivers to support KVM --- kolla/node_custom_config/neutron/ml2_conf.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kolla/node_custom_config/neutron/ml2_conf.ini b/kolla/node_custom_config/neutron/ml2_conf.ini index 9b8261a4..4f12cea9 100644 --- a/kolla/node_custom_config/neutron/ml2_conf.ini +++ b/kolla/node_custom_config/neutron/ml2_conf.ini @@ -1,8 +1,8 @@ [agent] -tunnel_types = +tunnel_types = {% if not enable_ironic | bool %}vxlan{% endif %} [ml2] -mechanism_drivers = openvswitch,genericswitch{% if enable_ironic_neutron_agent | bool %},baremetal{% endif %} +mechanism_drivers = openvswitch{% if neutron_ml2_generic_switch_configs is defined %},genericswitch{%endif %}{% if enable_ironic_neutron_agent | bool %},baremetal{% endif %},l2population [ml2_type_vlan] {% if neutron_networks is defined %} From ebf172e20043bae936eaf5f97a737ae4f5198f98 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 5 Dec 2019 10:36:40 -0600 Subject: [PATCH 05/55] [kvm] Add Ceph keyring files. --- .../cinder/cinder-backup/ceph.client.cinder-backup.keyring | 2 ++ .../cinder/cinder-volume/ceph.client.cinder.keyring | 2 ++ .../node_custom_config/glance/ceph.client.kvm-images.keyring | 2 ++ kolla/node_custom_config/nova/ceph.client.cinder.keyring | 4 ++++ kolla/node_custom_config/nova/ceph.client.nova.keyring | 2 ++ 5 files changed, 12 insertions(+) create mode 100644 kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring create mode 100644 kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring create mode 100644 kolla/node_custom_config/glance/ceph.client.kvm-images.keyring create mode 100644 kolla/node_custom_config/nova/ceph.client.cinder.keyring create mode 100644 kolla/node_custom_config/nova/ceph.client.nova.keyring diff --git a/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring new file mode 100644 index 00000000..5f152077 --- /dev/null +++ b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring @@ -0,0 +1,2 @@ +[client.cinder-backup] + key = {% cephx_key_cinder_backup %} diff --git a/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring new file mode 100644 index 00000000..634263ab --- /dev/null +++ b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring @@ -0,0 +1,2 @@ +[client.cinder] + key = {% cephx_key_cinder_volume %} \ No newline at end of file diff --git a/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring new file mode 100644 index 00000000..ac7e483e --- /dev/null +++ b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring @@ -0,0 +1,2 @@ +[client.kvm-images] + key = {% cephx_key_kvm_images %} diff --git a/kolla/node_custom_config/nova/ceph.client.cinder.keyring b/kolla/node_custom_config/nova/ceph.client.cinder.keyring new file mode 100644 index 00000000..a8f5f9bc --- /dev/null +++ b/kolla/node_custom_config/nova/ceph.client.cinder.keyring @@ -0,0 +1,4 @@ +[client.cinder] + key = {% cephx_key_cinder_volume %} + caps mon = "profile rbd" + caps osd = "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=kvm-images" diff --git a/kolla/node_custom_config/nova/ceph.client.nova.keyring b/kolla/node_custom_config/nova/ceph.client.nova.keyring new file mode 100644 index 00000000..676d7899 --- /dev/null +++ b/kolla/node_custom_config/nova/ceph.client.nova.keyring @@ -0,0 +1,2 @@ +[client.nova] + key = {% cephx_key_nova %} From 496689c71b38bd1d94119d7f7ba9e7fbf41734d1 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 18 Dec 2019 11:13:45 -0600 Subject: [PATCH 06/55] Add ceph volume config to cinder --- kolla/node_custom_config/cinder.conf | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 7993ecce..217a1303 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -1,2 +1,22 @@ [DEFAULT] -default_volume_type = iscsi \ No newline at end of file +default_volume_type = iscsi +{% if cinder_backend_ceph | bool %} +enabled_backends = rbd-1 +{% endif %} + +{% if cinder_backend_ceph | bool %} +[rbd-1] +volume_driver = cinder.volume.drivers.rbd.RBDDriver +volume_backend_name = rbd-1 +rbd_pool = {{ ceph_cinder_pool_name }} +rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_flatten_volume_from_snapshot = false +rbd_max_clone_depth = 5 +rbd_store_chunk_size = 4 +rados_connect_timeout = 5 +rbd_user = cinder +rbd_secret_uuid = {{ cinder_rbd_secret_uuid }} +report_discard_supported = True +image_upload_use_cinder_backend = True +#glance_api_version = 2 +{% endif %} From 93d6765bc29b8f83dda8e6cf530a4b19373c4a5e Mon Sep 17 00:00:00 2001 From: root Date: Mon, 6 Jan 2020 13:23:45 -0600 Subject: [PATCH 07/55] Fix ceph config in nova --- kolla/node_custom_config/nova.conf | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 1b2801f5..e15e5c8a 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -117,3 +117,28 @@ password = {{ nova_keystone_password }} os_region_name = "{{ openstack_region_name }}" {% endif %} {% endif %} + + +# TODO: [codyhammock 2019-09-18] +# Override some options that otherwise assume "enable_ceph" is true +{% if nova_compute_virt_type in ['kvm', 'qemu'] %} +[libvirt] +connection_uri = "qemu+tcp://{{ api_interface_address }}/system" +{% if nova_backend == "rbd" %} +images_type = rbd +images_rbd_pool = {{ ceph_nova_pool_name }} +images_rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_user = nova +disk_cachemodes="network=writeback" +{% if nova_hw_disk_discard != '' %} +hw_disk_discard = {{ nova_hw_disk_discard }} +{% endif %} +{% endif %} +{% if nova_backend == "rbd" and external_ceph_cephx_enabled | bool %} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif %} +virt_type = {{ nova_compute_virt_type }} +{% endif %} +{% if nova_libvirt_cpu_mode %} +cpu_mode = {{ nova_libvirt_cpu_mode }} +{% endif %} From 9a6b8830f8df5ccdd44d9ccdec62ae6e9f33cc31 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 20 Feb 2020 08:30:57 -0600 Subject: [PATCH 08/55] [kvm] Add encrypted ceph client key files --- .../cinder/ceph.client.cinder.keyring | 9 +++++++++ .../ceph.client.cinder-backup.keyring | 11 ++++++++-- .../cinder-volume/ceph.client.cinder.keyring | 11 ++++++++-- .../glance/ceph.client.kvm-images.keyring | 11 ++++++++-- .../nova/ceph.client.cinder.keyring | 20 +++++++++++++++---- .../nova/ceph.client.nova.keyring | 11 ++++++++-- 6 files changed, 61 insertions(+), 12 deletions(-) create mode 100644 kolla/node_custom_config/cinder/ceph.client.cinder.keyring diff --git a/kolla/node_custom_config/cinder/ceph.client.cinder.keyring b/kolla/node_custom_config/cinder/ceph.client.cinder.keyring new file mode 100644 index 00000000..014eb4aa --- /dev/null +++ b/kolla/node_custom_config/cinder/ceph.client.cinder.keyring @@ -0,0 +1,9 @@ +$ANSIBLE_VAULT;1.1;AES256 +32376162313135653430333733376535633530353862636665373430336236363561623031386531 +6134613333303165663363383261623362616338616263640a636561656139663065373861326664 +36663965626265666339653166653935313738393730313962353038356464626631326239353738 +6365323564396436320a373437336339633233316239653330306564336462646230353730366666 +33623461366166323734653533383736613539653934646134356634656531623365623735353836 +39373433633961646238363739366536636336323033306236316263346362393933663436333336 +38656134363461626461373832663934333333343764323637343763373234656430646564373561 +62626631396432353062 diff --git a/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring index 5f152077..69d78732 100644 --- a/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring +++ b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring @@ -1,2 +1,9 @@ -[client.cinder-backup] - key = {% cephx_key_cinder_backup %} +$ANSIBLE_VAULT;1.1;AES256 +35626237353166396233653633613766303033663366363635653337306530663361636635333332 +3931303337366565393764393430393366663439643663330a333665323833393031366432353062 +64323036363838616261356662613135326264373039636331643634623561366133363935613336 +6266616332643033360a306262343766643633343266666261343764636263386332376232353532 +33626661333963623437383064303565376531656261666166306365366431623464363336626237 +31356434363332636435373033353266343735646435643537613937626662366661393764636261 +31383062626564663061396464343537303936366531646530393561353763386337626133393864 +36646465383765333637 diff --git a/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring index 634263ab..fb810a8e 100644 --- a/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring +++ b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring @@ -1,2 +1,9 @@ -[client.cinder] - key = {% cephx_key_cinder_volume %} \ No newline at end of file +$ANSIBLE_VAULT;1.1;AES256 +32393735303036616434633236623635326533373536663537393937333465363963383064306432 +3063383930333233643937356166386664383862623566370a643736663764306534666334633036 +32373738326430313566363630353666636133343333623037373334313531326662623036373965 +6136343562326637340a313030323761356263303933363136393230633434356665663035353039 +39633737663733666533363461313262656534663933336130393661343561373937363633616233 +34383861646632343334393064663362616135393738666536616266303264633330326334636137 +62353939666666383439646533346637363435363961316630356333616564656461346365343937 +62396235363638376230 diff --git a/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring index ac7e483e..26053889 100644 --- a/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring +++ b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring @@ -1,2 +1,9 @@ -[client.kvm-images] - key = {% cephx_key_kvm_images %} +$ANSIBLE_VAULT;1.1;AES256 +65363435636632386261666565333262316337653864626465663935393361326231636135333131 +6463626363393231366161626261363362336262363031620a323331326431666535343335343839 +38636362613539356164663835656436376262653034316466363962316437653366646436383636 +3866643830376238610a636130363335343730343261346530326337316164613233356263333734 +66343536373330313631356332343736303233383636653466346462306666313564323565333935 +61653739636333353138373332643838623734623939633564336532636462313963313632656564 +32383038663536383430373137393033626636333830383830363133613438623032376162616637 +35386633306365323166 diff --git a/kolla/node_custom_config/nova/ceph.client.cinder.keyring b/kolla/node_custom_config/nova/ceph.client.cinder.keyring index a8f5f9bc..51212964 100644 --- a/kolla/node_custom_config/nova/ceph.client.cinder.keyring +++ b/kolla/node_custom_config/nova/ceph.client.cinder.keyring @@ -1,4 +1,16 @@ -[client.cinder] - key = {% cephx_key_cinder_volume %} - caps mon = "profile rbd" - caps osd = "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=kvm-images" +$ANSIBLE_VAULT;1.1;AES256 +38393064616566346637373034336537323933383265653037346662303135326539303331663162 +3538613638396465613534323737363337643831653462630a303166363461633235666563646432 +38366237316630643335323136656136656362343033623965633234646233626135333366646637 +3864623466326164360a613436623236666234663037666633363265333466343663616165613937 +65323233653534646337626565393433323139316431613265383930303735373461313839336635 +38303566666563366632633733373262323832383732643432633535656462383431356563643264 +63666536643732326237643438343830653262626339363637636162656537343039613764636232 +63363934643038616633616138353664363939623165623639663936396262663938393430333337 +35626662623165646439636536393762373861643666613665333163616538613938653962393538 +63393236363034663839366364613637396461663630373836613262336638346330643630666439 +61383464363136356135656339343931373835396435636664303839356133396534373563386532 +62643361616330363839633234396338613131393137613538313065653430393132333533383561 +65306166633063616664393533323132636630386365616362383332393664376335353961323564 +31336139313533613835303963633330316664643033373837333538653537383839623233386161 +646661346164376131323763303964353664 diff --git a/kolla/node_custom_config/nova/ceph.client.nova.keyring b/kolla/node_custom_config/nova/ceph.client.nova.keyring index 676d7899..1472ab5f 100644 --- a/kolla/node_custom_config/nova/ceph.client.nova.keyring +++ b/kolla/node_custom_config/nova/ceph.client.nova.keyring @@ -1,2 +1,9 @@ -[client.nova] - key = {% cephx_key_nova %} +$ANSIBLE_VAULT;1.1;AES256 +36386335353136303231363762633034353162333864636532323337323038306433643239663562 +3338373335353664303733306564366463373736643064660a636634366534303263653232323832 +32363466306364303935313561366531656666383633353436333164633730666636353763323932 +3434393161316531330a613838336333633835356265313834633936343863386139623039653737 +63626262386131663038653466353137386433666136356237643939613762623737343834663464 +34326437376433333262323964636136313132626132363739316239653266306232323032616263 +30323838353737656665383962333765666666663164663336636466653062626431326236333265 +35316232366262353038 From e2113c1915265f19296b2abad295c70b514d2b1c Mon Sep 17 00:00:00 2001 From: codyhammock Date: Mon, 4 May 2020 13:49:54 -0500 Subject: [PATCH 09/55] Update to allow prometheus server and alertmanager to use haproxy for SSL termination --- kolla/defaults.yml | 2 +- playbooks/prometheus.yml | 96 +------------------ roles/chameleon_prometheus/defaults/main.yml | 64 ++++++++++--- roles/chameleon_prometheus/tasks/main.yml | 9 +- .../templates/prometheus.yml.j2 | 2 +- 5 files changed, 63 insertions(+), 110 deletions(-) diff --git a/kolla/defaults.yml b/kolla/defaults.yml index a6b3681c..dfb8f716 100644 --- a/kolla/defaults.yml +++ b/kolla/defaults.yml @@ -141,7 +141,7 @@ nova_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_api_port }}/v2.1" # Prometheus -enable_prometheus: no +enable_prometheus: "{{ inventory_hostname in groups['prometheus'] }}" prometheus_bind_address: "{{ lookup('vars', 'ansible_' + network_interface).ipv4.address }}" # Redfish Monitor diff --git a/playbooks/prometheus.yml b/playbooks/prometheus.yml index d0a09cda..99b5b6a7 100644 --- a/playbooks/prometheus.yml +++ b/playbooks/prometheus.yml @@ -11,99 +11,11 @@ - chameleon_prometheus tasks: - set_fact: - prometheus_alertmanager_port: "{{ prometheus_services['alertmanager'].port }}" + prometheus_alertmanager_port: "{{ prometheus_services['prometheus-alertmanager'].port }}" prometheus_alertmanager_users: "{{ prometheus_alertmanager_users }}" - when: inventory_hostname in groups[prometheus_services['alertmanager'].group] + when: inventory_hostname in groups[prometheus_services['prometheus-alertmanager'].group] - set_fact: - prometheus_server_port: "{{ prometheus_services['server'].port }}" + prometheus_server_port: "{{ prometheus_services['prometheus-server'].port }}" prometheus_server_users: "{{ prometheus_server_users }}" prometheus_monitoring_user: "{{ prometheus_monitoring_user }}" - when: inventory_hostname in groups[prometheus_services['server'].group] - -- hosts: frontends - tasks: - - name: Configure virtual host for Prometheus server. - block: - - set_fact: - prometheus_server: "{{ hostvars[groups['prometheus'][0]] }}" - - name: Create password for readonly user. - htpasswd: - path: "/etc/httpd/prometheus_server.htpasswd" - name: "{{ prometheus_server.prometheus_monitoring_user }}" - password: "{{ prometheus_monitoring_password }}" - mode: 0640 - - name: Configure virtual host. - include_role: - name: apache - tasks_from: host - vars: - apache_server_alias: prometheus_server - apache_server_name: "{{ prometheus_server_hostname }}" - apache_server_https_port: 9090 - apache_server_http_port: null - apache_server_conf: | - - Order deny,allow - Allow from all - - AuthType Basic - AuthName "Prometheus authentication" - AuthBasicProvider file socache external - AuthUserFile /etc/httpd/prometheus_server.htpasswd - AuthExternal keystone-user - AuthExternalProvideCache On - AuthnCacheProvideFor keystone-user - - # preserve Host header to avoid cross-origin problems - ProxyPreserveHost on - # proxy to Alertmanager - ProxyPass http://{{ groups['prometheus'][0] }}:{{ prometheus_server.prometheus_server_port }}/ - ProxyPassReverse http://{{ groups['prometheus'][0] }}:{{ prometheus_server.prometheus_server_port }}/ - - # Read access to monitoring user + admins - - Require user {{ prometheus_server.prometheus_monitoring_user }} {{ prometheus_server.prometheus_server_users | join(' ') }} - - - # TODO: Write access to operators by group membership - - Require user {{ prometheus_server.prometheus_server_users | join(' ') }} - - - when: - - prometheus_server_hostname is defined - - external_vip_address is defined - - name: Configure virtual host for Prometheus Alertmanager. - block: - - set_fact: - prometheus_alertmanager: "{{ hostvars[groups['prometheus-alertmanager'][0]] }}" - - name: Configure virtual host. - include_role: - name: apache - tasks_from: host - vars: - apache_server_alias: prometheus_alertmanager - apache_server_name: "{{ prometheus_alertmanager_hostname }}" - apache_server_conf: | - - Order deny,allow - Allow from all - - AuthType Basic - AuthName "Alertmanager authentication" - AuthBasicProvider socache external - AuthExternal keystone-user - AuthExternalProvideCache On - AuthnCacheProvideFor keystone-user - - Require user {{ prometheus_alertmanager.prometheus_alertmanager_users | join(' ') }} - - # preserve Host header to avoid cross-origin problems - ProxyPreserveHost on - # proxy to Alertmanager - ProxyPass http://{{ groups['prometheus-alertmanager'][0] }}:{{ prometheus_alertmanager.prometheus_alertmanager_port }}/ - ProxyPassReverse http://{{ groups['prometheus-alertmanager'][0] }}:{{ prometheus_alertmanager.prometheus_alertmanager_port }}/ - - when: - - prometheus_alertmanager_hostname is defined - - external_vip_address is defined + when: inventory_hostname in groups[prometheus_services['prometheus-server'].group] diff --git a/roles/chameleon_prometheus/defaults/main.yml b/roles/chameleon_prometheus/defaults/main.yml index 6337e271..5a49616a 100644 --- a/roles/chameleon_prometheus/defaults/main.yml +++ b/roles/chameleon_prometheus/defaults/main.yml @@ -1,3 +1,6 @@ +--- +project_name: "chameleon_prometheus" + prometheus_docker_network_name: prometheus prometheus_docker_network_subnet: 172.18.0.0/24 @@ -7,7 +10,6 @@ prometheus_users: [] prometheus_monitoring_user: monitoring prometheus_server_users: "{{ prometheus_users }}" -enable_prometheus_alertmanager: yes # This should be encrypted! It is a secret value. prometheus_alertmanager_slack_api_url: "{{ slack_api_url }}" prometheus_alertmanager_users: "{{ prometheus_users }}" @@ -17,41 +19,69 @@ prometheus_mysql_exporter_user: mysqld-exporter prometheus_openstack_exporter_user: admin prometheus_services: - server: + prometheus-server: service_name: prometheus_server image: prom/prometheus:v2.10.0 group: prometheus + enabled: "{{ enable_prometheus }}" restart_handler: restart prometheus port: 9090 config_dir: /etc/prometheus scrape_target: yes - service_args: - - "--config.file=/etc/prometheus/prometheus.yml" - - "--storage.tsdb.path=/prometheus" - - "--web.console.libraries=/usr/share/prometheus/console_libraries" - - "--web.console.templates=/usr/share/prometheus/consoles" - - "--web.enable-lifecycle" - - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" +# service_args: +# - "--config.file=/etc/prometheus/prometheus.yml" +# - "--storage.tsdb.path=/prometheus" +# - "--web.console.libraries=/usr/share/prometheus/console_libraries" +# - "--web.console.templates=/usr/share/prometheus/consoles" +# - "--web.enable-lifecycle" +# - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" volumes: - "prometheus-data:/prometheus" - "/etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml" - "/etc/prometheus/rules:/etc/prometheus/rules" - "/etc/prometheus/secrets:/etc/prometheus/secrets" - alertmanager: + haproxy: + prometheus_server: + enabled: "{{ enable_prometheus }}" + mode: "http" + external: false + port: 9090 + prometheus_server_external: + enabled: "{{ enable_prometheus }}" + mode: "http" + external: true + port: 9090 + prometheus-alertmanager: service_name: prometheus_alertmanager image: prom/alertmanager:v0.15.2 group: prometheus-alertmanager restart_handler: restart alertmanager port: 9093 config_dir: /etc/alertmanager - service_args: - - "--web.external-url=https://{{ prometheus_alertmanager_hostname }}" - scrape_target: no + container_name: "prometheus_alertmanager" + enabled: "{{ enable_prometheus_alertmanager }}" volumes: - "alertmanager-data:/alertmanager" - "/etc/alertmanager:/etc/alertmanager" + scrape_target: no + haproxy: + prometheus_alertmanager: + enabled: "{{ enable_prometheus_alertmanager }}" + mode: "http" + external: false + port: "{{ prometheus_alertmanager_port }}" + auth_user: "{{ prometheus_alertmanager_user }}" + auth_pass: "{{ prometheus_alertmanager_password }}" + prometheus_alertmanager_external: + enabled: "{{ enable_prometheus_alertmanager }}" + mode: "http" + external: true + port: "{{ prometheus_alertmanager_port }}" + auth_user: "{{ prometheus_alertmanager_user }}" + auth_pass: "{{ prometheus_alertmanager_password }}" jupyterhub-exporter: group: jupyterhub + enabled: true port: 8075 scrape_target: yes bearer_token_file: /etc/prometheus/secrets/jupyterhub @@ -59,6 +89,7 @@ prometheus_services: service_name: prometheus_mysql_exporter image: prom/mysqld-exporter:v0.11.0 group: prometheus-mysqld-exporter + enabled: true restart_handler: restart mysql exporter port: 9104 config_dir: /etc/prometheus/mysqld-exporter @@ -68,6 +99,7 @@ prometheus_services: node-exporter: service_name: prometheus_node_exporter group: prometheus-node-exporter + enabled: true restart_handler: restart node exporter custom_service: yes port: 9100 @@ -78,6 +110,7 @@ prometheus_services: service_name: prometheus_openstack_exporter image: docker.chameleoncloud.org/chameleoncloud/prometheus-openstack-exporter:latest group: prometheus-openstack-exporter + enabled: true restart_handler: restart openstack exporter port: 9103 config_dir: /etc/prometheus @@ -90,6 +123,7 @@ prometheus_services: image: prom/snmp-exporter:v0.15.0 generator_image: prom/snmp-generator:master group: prometheus-snmp-exporter + enabled: true restart_handler: restart snmp exporter port: 9116 config_dir: /etc/prometheus/snmp-exporter @@ -99,21 +133,25 @@ prometheus_services: # The Ceph exporters use the built-in Prometheus metrics agent Ceph provides ceph-exporter: group: prometheus-ceph-exporter + enabled: true port: 9283 scrape_target: yes push-gateway: service_name: prometheus_pushgateway image: prom/pushgateway:v0.8.0 group: prometheus-push-gateway + enabled: true restart_handler: restart pushgateway port: 9091 config_dir: /etc/prometheus scrape_target: yes precis-event-listener-exporter: group: precis + enabled: true port: 8913 scrape_target: yes precis-endpoint-exporter: group: precis + enabled: true port: 8912 scrape_target: yes diff --git a/roles/chameleon_prometheus/tasks/main.yml b/roles/chameleon_prometheus/tasks/main.yml index 6b73e4b3..2358e10b 100644 --- a/roles/chameleon_prometheus/tasks/main.yml +++ b/roles/chameleon_prometheus/tasks/main.yml @@ -7,6 +7,7 @@ | map(attribute='value') | selectattr('image', 'defined') | map(attribute='group') | list }} + enable_prometheus_alertmanager: inventory_hostname in groups['prometheus-alertmanager'] - name: Create Docker network. docker_network: @@ -44,11 +45,11 @@ file: name: "/etc/prometheus/secrets" state: directory - when: inventory_hostname in groups[prometheus_services['server'].group] + when: inventory_hostname in groups[prometheus_services['prometheus-server'].group] - include_tasks: alertmanager.yml vars: - service: "{{ prometheus_services['alertmanager'] }}" + service: "{{ prometheus_services['prometheus-alertmanager'] }}" when: inventory_hostname in groups[service.group] - include_tasks: jupyterhub_exporter.yml @@ -76,12 +77,14 @@ service: "{{ prometheus_services['snmp-exporter'] }}" when: inventory_hostname in groups[service.group] +- include_tasks: loadbalancer.yml + # Prometheus server task should go last as it may be using # credentials generated in above steps. - include_tasks: server.yml vars: - service: "{{ prometheus_services['server'] }}" + service: "{{ prometheus_services['prometheus-server'] }}" when: inventory_hostname in groups[service.group] - name: Create Docker services. diff --git a/roles/chameleon_prometheus/templates/prometheus.yml.j2 b/roles/chameleon_prometheus/templates/prometheus.yml.j2 index 275d09ff..3c828d81 100644 --- a/roles/chameleon_prometheus/templates/prometheus.yml.j2 +++ b/roles/chameleon_prometheus/templates/prometheus.yml.j2 @@ -1,4 +1,4 @@ -{% set alertmanager_service = prometheus_services['alertmanager'] %} +{% set alertmanager_service = prometheus_services['prometheus-alertmanager'] %} {% set alertmanager_vars = hostvars[groups[alertmanager_service.group][0]] %} {% set snmp_exporter_service = prometheus_services['snmp-exporter'] %} {% set snmp_exporter_vars = hostvars[groups[snmp_exporter_service.group][0]] %} From 16e30c2ddd6d802e38edfbca4e07e74aeff703b5 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:50:14 -0500 Subject: [PATCH 10/55] [kvm] Remove BlazarFilter --- kolla/node_custom_config/nova.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 7e6407c1..a4ebc76b 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -24,7 +24,7 @@ workers = 10 [filter_scheduler] # Override default filters (just remove filters not relevant to baremetal-only) # default: AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter -enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,BlazarFilter +enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter # https://docs.openstack.org/ironic/train/install/configure-compute.html # > Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. @@ -59,6 +59,7 @@ reservation_expire = 86400 instances = -1 cores = -1 ram = -1 +{% endif %} # https://docs.openstack.org/nova/train/configuration/config.html#quota.recheck_quota # > This defaults to True (recheck quota after resource creation) but can be set # to False to avoid additional load if allowing quota to be exceeded because From 19b51bb444d4f916fab6abcbf30926d0ba42d6f4 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:51:33 -0500 Subject: [PATCH 11/55] [kvm] Correct default volume type --- kolla/node_custom_config/cinder.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 217a1303..257d040c 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -1,5 +1,5 @@ [DEFAULT] -default_volume_type = iscsi +default_volume_type = ceph-rbd {% if cinder_backend_ceph | bool %} enabled_backends = rbd-1 {% endif %} From b5dd16317c3981beef02394a412d080f18fc4721 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:52:43 -0500 Subject: [PATCH 12/55] [kvm] Enable federated auth --- kolla/defaults.yml | 47 +++++++++++++- .../horizon/custom_local_settings | 64 ++++++++++++++----- 2 files changed, 94 insertions(+), 17 deletions(-) diff --git a/kolla/defaults.yml b/kolla/defaults.yml index dfb8f716..73ae10ec 100644 --- a/kolla/defaults.yml +++ b/kolla/defaults.yml @@ -11,6 +11,10 @@ kolla_install_type: source virtualenv: /etc/ansible/venv chameleon_portal_url: https://www.chameleoncloud.org +chameleon_reference_api_url: https://api.chameleoncloud.org +# Whether to show a dropdown in the Horizon GUI that provides links to other +# Chameleon testbed sites. +enable_chameleon_multisite: yes enable_mariadb: yes enable_chrony: yes @@ -66,9 +70,12 @@ enable_heat: yes # Horizon enable_horizon: yes enable_horizon_chameleon_websso: yes +# Keep this to 'no' until we have switched from old multi-region to new +# multi-keystone model (with federation). +horizon_use_keystone_internal_url: no horizon_help_url: https://chameleoncloud.readthedocs.io/en/latest/technical/baremetal.html horizon_redirect_root: /dashboard -horizon_chameleon_websso_host: https://www.chameleoncloud.org +horizon_chameleon_websso_host: "{{ chameleon_portal_url }}" horizon_regions: - region_name: "{{ openstack_region_name }}" chameleon_site_name: "{{ chameleon_site_name }}" @@ -78,6 +85,22 @@ horizon_regions: blazar_database_user: "{{ blazar_database_user }}" blazar_database_password: "{{ blazar_database_password }}" +# Keystone IdP federation +identity_provider_url: "{{ keycloak_url }}/auth/realms/{{ keycloak_realm_name }}" +identity_provider_name: chameleon +identity_provider_domain_name: chameleon +keystone_identity_providers: + - name: "{{ identity_provider_name }}" + protocol: openid + identifier: "{{ identity_provider_url }}" + public_name: Login with Chameleon + client_id: "{{ keystone_idp_client_id }}" + client_secret: "{{ keystone_idp_client_secret }}" + attribute_mapping: chameleon_mapping +keystone_identity_mappings: + - name: chameleon_mapping + file: "{{ node_custom_config }}/keystone/idp_mapping.json" + # Ironic enable_ironic: yes ironic_provisioning_network: ironic-provisioning @@ -97,8 +120,12 @@ ironic_pxe_append_params: "nofb nomodeset vga=normal console=tty0 console=ttyS0, # Keycloak enable_keycloak: no +enable_keycloak_tas_sync: no enable_keycloak_external: "{{ enable_keycloak }}" enable_keycloak_external_frontend: no +keycloak_realm_name: chameleon +keycloak_hostname: auth.chameleoncloud.org +keycloak_url: "https://{{ keycloak_hostname }}" # Keystone enable_keystone: yes @@ -140,8 +167,24 @@ nova_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_a nova_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2.1" nova_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_api_port }}/v2.1" +# Portal +enable_portal: no +# This should be in defaults b/c it may be used by other roles, +# not just the portal role +portal_nginx_conf_dir: /opt/chameleon/conf.d +# The docker-compose network name for the Portal stack +portal_network: portal +chameleon_portal_mysql_host: udcc-db01.tacc.utexas.edu +chameleon_portal_mysql_user: chameleon_ro +chameleon_portal_mysql_database: chameleon_prod + # Prometheus -enable_prometheus: "{{ inventory_hostname in groups['prometheus'] }}" +enable_prometheus: no +prometheus_port: "9090" +prometheus_jupyterhub_exporter_token: +prometheus_server_external_url: "{{ public_protocol }}://{{ prometheus_external_fqdn }}:{{ prometheus_port }}" +prometheus_alertmanager_external_url: "{{ public_protocol }}://{{ proemtheus_external_fqdn }}:{{ prometheus_alertmanager_port }}" +# Legacy chameleon_prometheus role vars prometheus_bind_address: "{{ lookup('vars', 'ansible_' + network_interface).ipv4.address }}" # Redfish Monitor diff --git a/kolla/node_custom_config/horizon/custom_local_settings b/kolla/node_custom_config/horizon/custom_local_settings index a93f63e4..4bc03606 100644 --- a/kolla/node_custom_config/horizon/custom_local_settings +++ b/kolla/node_custom_config/horizon/custom_local_settings @@ -10,9 +10,27 @@ OPENSTACK_ENDPOINT_TYPE = 'publicURL' # Default user to specific region on login. DEFAULT_SERVICE_REGIONS = { - '{{ horizon_keystone_url }}': '{{ openstack_region_name }}', + '*': '{{ openstack_region_name }}', } +# Used by server instance page to create links from the instance overview +# to the resource catalog. +CHAMELEON_PORTAL_API_BASE_URL = '{{ chameleon_portal_url }}' +CHAMELEON_REFERENCE_API_URL = '{{ chameleon_reference_api_url }}' +CHAMELEON_SITE_ID = '{{ chameleon_site_name }}' +{% if enable_chameleon_multisite | bool %} +# Hide the region dropdown +OPENSTACK_KEYSTONE_MULTIREGION_SUPPORT = False +# Show the site dropdown +CHAMELEON_MULTISITE_SUPPORT = False +{% else %} +CHAMELEON_SITES = { +{% for conf in horizon_regions %} + '{{ conf.region_name }}': '{{ conf.chameleon_site_name }}', +{% endfor %} +} +{% endif %} + SITE_BRANDING = 'ChameleonCloud' # Override help menu to point to our docs. @@ -52,23 +70,39 @@ AVAILABLE_THEMES = [ ] DEFAULT_THEME = 'chameleoncloud' -# Used by server instance page to create links from the instance overview -# to the resource catalog. -CHAMELEON_PORTAL_API_BASE_URL = 'https://www.chameleoncloud.org' -CHAMELEON_SITES = { -{% for conf in horizon_regions %} - '{{ conf.region_name }}': '{{ conf.chameleon_site_name }}', +# NOTE(jason): This is lifted from local_settings.j2 in Kolla-Ansible; we have +# to override it to add in a kludgy entry for Portal SSO, which isn't a "true" +# SSO in the same way as the others, which have e.g. Keystone mappings and +# assume an identity provider is registered in Keystone. We also remove the +# local login. +{% if enable_keystone_federation | bool %} +WEBSSO_ENABLED = True +WEBSSO_KEYSTONE_URL = "{{ keystone_public_url }}/v3" +WEBSSO_CHOICES = ( + ("portal", "Log in with TACC account (Legacy)"), +{% for idp in keystone_identity_providers %} + ("{{ idp.name }}", "{{ idp.public_name }}"), +{% endfor %} +) +WEBSSO_IDP_MAPPING = { + "portal": (None, None), +{% for idp in keystone_identity_providers %} + "{{ idp.name }}": ("{{ idp.name }}", "{{ idp.protocol }}"), {% endfor %} } - +WEBSSO_DEFAULT_REDIRECT = True +# This really shouldn't have to be set, but it's set at configuration parse +# time and derived from a value (OPENSTACK_KEYSTONE_URL) that is overriden by +# our configuration. +WEBSSO_DEFAULT_REDIRECT_REGION = '{{ keystone_public_url }}/v3' +WEBSSO_DEFAULT_REDIRECT_PROTOCOL = 'openid' {% if enable_horizon_chameleon_websso | bool %} -# Single-sign on support -WEBSSO_ENABLED = True -AUTHENTICATION_URLS = ['openstack_dashboard.cc_web_sso_urls','openstack_auth.urls',] -CHAMELEON_PORTAL_SSO_BASE_URL = '{{ horizon_chameleon_websso_host }}' -CHAMELEON_PORTAL_SSO_LOGIN_PATH = '/sso/horizon/' -CHAMELEON_PORTAL_SSO_LOGOUT_PATH = '/logout/' -SSO_CALLBACK_HOST = '{{ kolla_external_fqdn }}' +AUTHENTICATION_URLS = ['openstack_dashboard.cc_web_sso_urls', 'openstack_auth.urls'] +WEBSSO_DEFAULT_REDIRECT_URL = '{{ horizon_chameleon_websso_host }}/sso/horizon' +WEBSSO_DEFAULT_REDIRECT_LOGOUT = '{{ horizon_chameleon_websso_host }}/logout' +# Show users interstitial page upon logout instead of logging them out of Portal immediately +WEBSSO_DEFAULT_REDIRECT_LOGOUT_CONFIRM = True +{% endif %} {% endif %} # A dictionary of settings which can be used to provide the default values for From 7405d33c55d448d72a1fe5a9e3016150819faaba Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:57:06 -0500 Subject: [PATCH 13/55] [kvm] Configure Glance image conversion --- .../node_custom_config/glance/glance-api.conf | 23 +++++++++++++++++++ .../glance/glance-image-import.conf | 5 ++++ 2 files changed, 28 insertions(+) create mode 100644 kolla/node_custom_config/glance/glance-api.conf create mode 100644 kolla/node_custom_config/glance/glance-image-import.conf diff --git a/kolla/node_custom_config/glance/glance-api.conf b/kolla/node_custom_config/glance/glance-api.conf new file mode 100644 index 00000000..060a36ad --- /dev/null +++ b/kolla/node_custom_config/glance/glance-api.conf @@ -0,0 +1,23 @@ +[DEFAULT] +show_image_direct_url = True +client_socket_timeout = 0 + +[glance_store] +stores = rbd,file +default_store = rbd +rbd_store_pool = kvm-images +rbd_store_user = kvm-images +rbd_store_ceph_conf = /etc/ceph/ceph.conf +rbd_store_chunk_size = 8 + +[image_import_opts] +image_import_plugins = ['image_conversion'] + +[image_conversion] +output_format = raw + +[keystone_authtoken] +service_token_roles_required = True + +[taskflow_executor] +max_workers = 2 diff --git a/kolla/node_custom_config/glance/glance-image-import.conf b/kolla/node_custom_config/glance/glance-image-import.conf new file mode 100644 index 00000000..3f0efffe --- /dev/null +++ b/kolla/node_custom_config/glance/glance-image-import.conf @@ -0,0 +1,5 @@ +[image_import_opts] +image_import_plugins = ['image_conversion'] + +[image_conversion] +output_format = raw From 7d0e3a86597143d171d2ff901581c2009dd36a6f Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:58:03 -0500 Subject: [PATCH 14/55] [kvm] More federated auth configs --- .../keystone/idp_mapping.json | 40 +++++++++++++++++++ kolla/node_custom_config/keystone/policy.yaml | 8 ++++ 2 files changed, 48 insertions(+) create mode 100644 kolla/node_custom_config/keystone/idp_mapping.json create mode 100644 kolla/node_custom_config/keystone/policy.yaml diff --git a/kolla/node_custom_config/keystone/idp_mapping.json b/kolla/node_custom_config/keystone/idp_mapping.json new file mode 100644 index 00000000..c5674f65 --- /dev/null +++ b/kolla/node_custom_config/keystone/idp_mapping.json @@ -0,0 +1,40 @@ +[ + { + "local": [ + { + "user": { + "name": "{0}", + "email": "{1}" + } + }, + { + "projects": [ + { + "name": "{2}", + "roles": [ + { + "name": "member" + } + ] + } + ] + } + ], + "remote": [ + { + "type": "OIDC-preferred_username" + }, + { + "type": "OIDC-email" + }, + { + "type": "OIDC-groups", + "blacklist": [ + ".*-admins$", + ".*-managers$" + ], + "regex": true + } + ] + } +] diff --git a/kolla/node_custom_config/keystone/policy.yaml b/kolla/node_custom_config/keystone/policy.yaml new file mode 100644 index 00000000..e3947b7a --- /dev/null +++ b/kolla/node_custom_config/keystone/policy.yaml @@ -0,0 +1,8 @@ +#identity:change_password: '!' +#identity:update_user: '!' +#identity:list_endpoints: role:reader and system_scope:all + +#identity:list_roles: 'role:reader and system_scope:all' +#identity:list_roles: 'role:reader' + +#identity:list_services: role:reader and system_scope:all From 09aa0b080aee5dbeffe76c59fe4984bccc4ca94a Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 10:29:43 -0500 Subject: [PATCH 15/55] [kvm] Ceph configs for Cinder, Glance, Nova --- kolla/node_custom_config/cinder/ceph.conf | 17 +++++++++++++++++ kolla/node_custom_config/glance/ceph.conf | 7 +++++++ kolla/node_custom_config/nova/ceph.conf | 17 +++++++++++++++++ 3 files changed, 41 insertions(+) create mode 100644 kolla/node_custom_config/cinder/ceph.conf create mode 100644 kolla/node_custom_config/glance/ceph.conf create mode 100644 kolla/node_custom_config/nova/ceph.conf diff --git a/kolla/node_custom_config/cinder/ceph.conf b/kolla/node_custom_config/cinder/ceph.conf new file mode 100644 index 00000000..c333b3bb --- /dev/null +++ b/kolla/node_custom_config/cinder/ceph.conf @@ -0,0 +1,17 @@ +[global] +log file = /var/log/kolla/ceph/$cluster-$name.log +log to syslog = false +err to syslog = false +log to stderr = false +err to stderr = false + +fsid = {{ ceph_fsid }} +mon_initial_members = {{ ceph_mon_hostname }} +mon_host = {{ ceph_mon_address }} +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx + +osd pool default size = 1 +osd pool default min size = 1 + diff --git a/kolla/node_custom_config/glance/ceph.conf b/kolla/node_custom_config/glance/ceph.conf new file mode 100644 index 00000000..d9b73926 --- /dev/null +++ b/kolla/node_custom_config/glance/ceph.conf @@ -0,0 +1,7 @@ +[global] +fsid = {{ ceph_fsid }} +#mon_initial_members = {{ ceph_mon_hostname }} +mon_host = {{ ceph_mon_address }} +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx diff --git a/kolla/node_custom_config/nova/ceph.conf b/kolla/node_custom_config/nova/ceph.conf new file mode 100644 index 00000000..c333b3bb --- /dev/null +++ b/kolla/node_custom_config/nova/ceph.conf @@ -0,0 +1,17 @@ +[global] +log file = /var/log/kolla/ceph/$cluster-$name.log +log to syslog = false +err to syslog = false +log to stderr = false +err to stderr = false + +fsid = {{ ceph_fsid }} +mon_initial_members = {{ ceph_mon_hostname }} +mon_host = {{ ceph_mon_address }} +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx + +osd pool default size = 1 +osd pool default min size = 1 + From 93a87a8b9b2c5fb61417b1b266cb2cdd16683aaf Mon Sep 17 00:00:00 2001 From: codyhammock Date: Thu, 17 Sep 2020 08:47:02 -0500 Subject: [PATCH 16/55] [kvm] Merge from master --- kolla/node_custom_config/nova.conf | 12 ++--- roles/chameleon_prometheus/defaults/main.yml | 57 ++++++++------------ 2 files changed, 27 insertions(+), 42 deletions(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index a4ebc76b..06f0d888 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -23,9 +23,9 @@ workers = 10 [filter_scheduler] # Override default filters (just remove filters not relevant to baremetal-only) -# default: AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter -enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter -# https://docs.openstack.org/ironic/train/install/configure-compute.html +# default: RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter +enabled_filters = {{ nova_enabled_filters }} +# https://docs.openstack.org/ironic/rocky/install/configure-compute.html # > Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. track_instance_changes = false @@ -129,6 +129,6 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif %} virt_type = {{ nova_compute_virt_type }} {% endif %} -{% if nova_libvirt_cpu_mode %} -cpu_mode = {{ nova_libvirt_cpu_mode }} -{% endif %} +#{% if nova_libvirt_cpu_mode %} +#cpu_mode = {{ nova_libvirt_cpu_mode }} +#{% endif %} diff --git a/roles/chameleon_prometheus/defaults/main.yml b/roles/chameleon_prometheus/defaults/main.yml index 5a49616a..8df55c60 100644 --- a/roles/chameleon_prometheus/defaults/main.yml +++ b/roles/chameleon_prometheus/defaults/main.yml @@ -28,13 +28,13 @@ prometheus_services: port: 9090 config_dir: /etc/prometheus scrape_target: yes -# service_args: -# - "--config.file=/etc/prometheus/prometheus.yml" -# - "--storage.tsdb.path=/prometheus" -# - "--web.console.libraries=/usr/share/prometheus/console_libraries" -# - "--web.console.templates=/usr/share/prometheus/consoles" -# - "--web.enable-lifecycle" -# - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" + service_args: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--web.console.libraries=/usr/share/prometheus/console_libraries" + - "--web.console.templates=/usr/share/prometheus/consoles" + - "--web.enable-lifecycle" + - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" volumes: - "prometheus-data:/prometheus" - "/etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml" @@ -80,7 +80,7 @@ prometheus_services: auth_user: "{{ prometheus_alertmanager_user }}" auth_pass: "{{ prometheus_alertmanager_password }}" jupyterhub-exporter: - group: jupyterhub + group: prometheus-jupyterhub-exporter enabled: true port: 8075 scrape_target: yes @@ -99,7 +99,6 @@ prometheus_services: node-exporter: service_name: prometheus_node_exporter group: prometheus-node-exporter - enabled: true restart_handler: restart node exporter custom_service: yes port: 9100 @@ -110,7 +109,6 @@ prometheus_services: service_name: prometheus_openstack_exporter image: docker.chameleoncloud.org/chameleoncloud/prometheus-openstack-exporter:latest group: prometheus-openstack-exporter - enabled: true restart_handler: restart openstack exporter port: 9103 config_dir: /etc/prometheus @@ -123,35 +121,22 @@ prometheus_services: image: prom/snmp-exporter:v0.15.0 generator_image: prom/snmp-generator:master group: prometheus-snmp-exporter - enabled: true restart_handler: restart snmp exporter port: 9116 config_dir: /etc/prometheus/snmp-exporter scrape_target: no volumes: - "/etc/prometheus/snmp-exporter:/etc/snmp_exporter" - # The Ceph exporters use the built-in Prometheus metrics agent Ceph provides - ceph-exporter: - group: prometheus-ceph-exporter - enabled: true - port: 9283 - scrape_target: yes - push-gateway: - service_name: prometheus_pushgateway - image: prom/pushgateway:v0.8.0 - group: prometheus-push-gateway - enabled: true - restart_handler: restart pushgateway - port: 9091 - config_dir: /etc/prometheus - scrape_target: yes - precis-event-listener-exporter: - group: precis - enabled: true - port: 8913 - scrape_target: yes - precis-endpoint-exporter: - group: precis - enabled: true - port: 8912 - scrape_target: yes +# # The Ceph exporters use the built-in Prometheus metrics agent Ceph provides +# ceph-exporter: +# group: prometheus-ceph-exporter +# port: 9283 +# scrape_target: yes +# push-gateway: +# service_name: prometheus_pushgateway +# image: prom/pushgateway:v0.8.0 +# group: prometheus-push-gateway +# restart_handler: restart pushgateway +# port: 9091 +# config_dir: /etc/prometheus +# scrape_target: yes From 711b66495d01fd1ef4c05c313f70370ba5e11541 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Thu, 17 Sep 2020 12:06:12 -0500 Subject: [PATCH 17/55] [kvm] Update requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 627ed068..a54e0355 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ansible~=2.8.0 +ansible~=2.8 docker From 2a108f95a78fd9487dbcf97f77d6e8c555dde47e Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 13:27:01 -0600 Subject: [PATCH 18/55] [kvm_tacc] Add config keystone DB replication --- kolla/node_custom_config/galera.cnf | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 kolla/node_custom_config/galera.cnf diff --git a/kolla/node_custom_config/galera.cnf b/kolla/node_custom_config/galera.cnf new file mode 100644 index 00000000..b459ee1a --- /dev/null +++ b/kolla/node_custom_config/galera.cnf @@ -0,0 +1,13 @@ +[mysqld] +server-id = {{ db_replication_id }} +binlog-do-db = keystone +binlog-format = ROW +replicate-ignore-table = keystone.revocation_event +replicate-ignore-table = keystone.trust +replicate-ignore-table = keystone.trust_role +replicate-ignore-table = keystone.endpoint +replicate-ignore-table = keystone.endpoint_group +replicate-ignore-table = keystone.project_endpoint_group +replicate-ignore-table = keystone.service +replicate-ignore-table = keystone.region +replicate-ignore-db = mysql From 61b2a7836345cc2d4a06683ee089977d0597ed20 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 14:01:01 -0600 Subject: [PATCH 19/55] [kvm] Add toggles for blazar and serialconsole options in Nova --- kolla/node_custom_config/nova.conf | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 8fe98ef1..147c9bed 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -54,11 +54,13 @@ rpc_conn_pool_size = 300 max_age = 0 until_refresh = 0 reservation_expire = 86400 +{% if enable_blazar | bool %} # Remove quota limits for hosts; these are handled via Blazar instances = -1 cores = -1 ram = -1 -# https://docs.openstack.org/nova/train/configuration/config.html#quota.recheck_quota +{% endif %} +# https://docs.openstack.org/nova/rocky/configuration/config.html#quota.recheck_quota # > This defaults to True (recheck quota after resource creation) but can be set # to False to avoid additional load if allowing quota to be exceeded because # of racing requests is considered acceptable. @@ -75,6 +77,7 @@ max_attempts = 50 # service polls the Bare Metal service for node information. discover_hosts_in_cells_interval = 120 +{% if enable_nova_serialconsole_proxy | bool %} [serial_console] base_url = wss://{{ kolla_external_fqdn }}:{{ nova_serialproxy_port }}/ @@ -82,6 +85,7 @@ base_url = wss://{{ kolla_external_fqdn }}:{{ nova_serialproxy_port }}/ [console] allowed_origins = "{{ nova_console_allowed_origins }}" {% endif %} +{% endif %} {% if service_name == "nova-api" %} # Custom vendordata service From 51bfdb45e1b1b668105a3c6b40ab02ef8cf08415 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 14:04:27 -0600 Subject: [PATCH 20/55] [kvm] Remove "create_volume" as default for instance launch --- kolla/node_custom_config/horizon/custom_local_settings | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kolla/node_custom_config/horizon/custom_local_settings b/kolla/node_custom_config/horizon/custom_local_settings index 6ba2acce..2a7b6208 100644 --- a/kolla/node_custom_config/horizon/custom_local_settings +++ b/kolla/node_custom_config/horizon/custom_local_settings @@ -109,12 +109,6 @@ WEBSSO_DEFAULT_REDIRECT_LOGOUT_CONFIRM = True # A dictionary of settings which can be used to provide the default values for # properties found in the Launch Instance modal. LAUNCH_INSTANCE_DEFAULTS = { - 'config_drive': False, - 'enable_scheduler_hints': True, - 'disable_image': False, - 'disable_instance_snapshot': True, - 'disable_volume': True, - 'disable_volume_snapshot': True, 'create_volume': False, } From 592ed0d16685a9eb5c31c840f05dc1f9153ae925 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2019 14:07:42 -0600 Subject: [PATCH 21/55] [kvm] Add detection for Neutron options tunnel_types and mechanism_drivers to support KVM --- kolla/node_custom_config/neutron/ml2_conf.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kolla/node_custom_config/neutron/ml2_conf.ini b/kolla/node_custom_config/neutron/ml2_conf.ini index c640a53f..15b3a0e0 100644 --- a/kolla/node_custom_config/neutron/ml2_conf.ini +++ b/kolla/node_custom_config/neutron/ml2_conf.ini @@ -1,8 +1,8 @@ [agent] -tunnel_types = +tunnel_types = {% if not enable_ironic | bool %}vxlan{% endif %} [ml2] -mechanism_drivers = openvswitch,genericswitch{% if enable_ironic_neutron_agent | bool %},baremetal{% endif %} +mechanism_drivers = openvswitch{% if neutron_ml2_generic_switch_configs is defined %},genericswitch{%endif %}{% if enable_ironic_neutron_agent | bool %},baremetal{% endif %},l2population [ml2_type_vlan] {% if neutron_networks is defined %} From c6a00389e24e3d23f0d51ef2431a3ca20196de4b Mon Sep 17 00:00:00 2001 From: root Date: Thu, 5 Dec 2019 10:36:40 -0600 Subject: [PATCH 22/55] [kvm] Add Ceph keyring files. --- .../cinder/cinder-backup/ceph.client.cinder-backup.keyring | 2 ++ .../cinder/cinder-volume/ceph.client.cinder.keyring | 2 ++ .../node_custom_config/glance/ceph.client.kvm-images.keyring | 2 ++ kolla/node_custom_config/nova/ceph.client.cinder.keyring | 4 ++++ kolla/node_custom_config/nova/ceph.client.nova.keyring | 2 ++ 5 files changed, 12 insertions(+) create mode 100644 kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring create mode 100644 kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring create mode 100644 kolla/node_custom_config/glance/ceph.client.kvm-images.keyring create mode 100644 kolla/node_custom_config/nova/ceph.client.cinder.keyring create mode 100644 kolla/node_custom_config/nova/ceph.client.nova.keyring diff --git a/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring new file mode 100644 index 00000000..5f152077 --- /dev/null +++ b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring @@ -0,0 +1,2 @@ +[client.cinder-backup] + key = {% cephx_key_cinder_backup %} diff --git a/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring new file mode 100644 index 00000000..634263ab --- /dev/null +++ b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring @@ -0,0 +1,2 @@ +[client.cinder] + key = {% cephx_key_cinder_volume %} \ No newline at end of file diff --git a/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring new file mode 100644 index 00000000..ac7e483e --- /dev/null +++ b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring @@ -0,0 +1,2 @@ +[client.kvm-images] + key = {% cephx_key_kvm_images %} diff --git a/kolla/node_custom_config/nova/ceph.client.cinder.keyring b/kolla/node_custom_config/nova/ceph.client.cinder.keyring new file mode 100644 index 00000000..a8f5f9bc --- /dev/null +++ b/kolla/node_custom_config/nova/ceph.client.cinder.keyring @@ -0,0 +1,4 @@ +[client.cinder] + key = {% cephx_key_cinder_volume %} + caps mon = "profile rbd" + caps osd = "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=kvm-images" diff --git a/kolla/node_custom_config/nova/ceph.client.nova.keyring b/kolla/node_custom_config/nova/ceph.client.nova.keyring new file mode 100644 index 00000000..676d7899 --- /dev/null +++ b/kolla/node_custom_config/nova/ceph.client.nova.keyring @@ -0,0 +1,2 @@ +[client.nova] + key = {% cephx_key_nova %} From aafe071e2d00b0e4c6a1fe2e66baae49d5d98b6d Mon Sep 17 00:00:00 2001 From: root Date: Wed, 18 Dec 2019 11:13:45 -0600 Subject: [PATCH 23/55] Add ceph volume config to cinder --- kolla/node_custom_config/cinder.conf | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 7993ecce..217a1303 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -1,2 +1,22 @@ [DEFAULT] -default_volume_type = iscsi \ No newline at end of file +default_volume_type = iscsi +{% if cinder_backend_ceph | bool %} +enabled_backends = rbd-1 +{% endif %} + +{% if cinder_backend_ceph | bool %} +[rbd-1] +volume_driver = cinder.volume.drivers.rbd.RBDDriver +volume_backend_name = rbd-1 +rbd_pool = {{ ceph_cinder_pool_name }} +rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_flatten_volume_from_snapshot = false +rbd_max_clone_depth = 5 +rbd_store_chunk_size = 4 +rados_connect_timeout = 5 +rbd_user = cinder +rbd_secret_uuid = {{ cinder_rbd_secret_uuid }} +report_discard_supported = True +image_upload_use_cinder_backend = True +#glance_api_version = 2 +{% endif %} From ca63ea2804b2b58f3e0f66d838acc1606de770f2 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 6 Jan 2020 13:23:45 -0600 Subject: [PATCH 24/55] Fix ceph config in nova --- kolla/node_custom_config/nova.conf | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 147c9bed..474bb809 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -107,3 +107,28 @@ password = {{ nova_keystone_password }} os_region_name = "{{ openstack_region_name }}" {% endif %} {% endif %} + + +# TODO: [codyhammock 2019-09-18] +# Override some options that otherwise assume "enable_ceph" is true +{% if nova_compute_virt_type in ['kvm', 'qemu'] %} +[libvirt] +connection_uri = "qemu+tcp://{{ api_interface_address }}/system" +{% if nova_backend == "rbd" %} +images_type = rbd +images_rbd_pool = {{ ceph_nova_pool_name }} +images_rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_user = nova +disk_cachemodes="network=writeback" +{% if nova_hw_disk_discard != '' %} +hw_disk_discard = {{ nova_hw_disk_discard }} +{% endif %} +{% endif %} +{% if nova_backend == "rbd" and external_ceph_cephx_enabled | bool %} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif %} +virt_type = {{ nova_compute_virt_type }} +{% endif %} +{% if nova_libvirt_cpu_mode %} +cpu_mode = {{ nova_libvirt_cpu_mode }} +{% endif %} From b39adb81d285b28119f930986f9561a32d7e0bcf Mon Sep 17 00:00:00 2001 From: root Date: Thu, 20 Feb 2020 08:30:57 -0600 Subject: [PATCH 25/55] [kvm] Add encrypted ceph client key files --- .../cinder/ceph.client.cinder.keyring | 9 +++++++++ .../ceph.client.cinder-backup.keyring | 11 ++++++++-- .../cinder-volume/ceph.client.cinder.keyring | 11 ++++++++-- .../glance/ceph.client.kvm-images.keyring | 11 ++++++++-- .../nova/ceph.client.cinder.keyring | 20 +++++++++++++++---- .../nova/ceph.client.nova.keyring | 11 ++++++++-- 6 files changed, 61 insertions(+), 12 deletions(-) create mode 100644 kolla/node_custom_config/cinder/ceph.client.cinder.keyring diff --git a/kolla/node_custom_config/cinder/ceph.client.cinder.keyring b/kolla/node_custom_config/cinder/ceph.client.cinder.keyring new file mode 100644 index 00000000..014eb4aa --- /dev/null +++ b/kolla/node_custom_config/cinder/ceph.client.cinder.keyring @@ -0,0 +1,9 @@ +$ANSIBLE_VAULT;1.1;AES256 +32376162313135653430333733376535633530353862636665373430336236363561623031386531 +6134613333303165663363383261623362616338616263640a636561656139663065373861326664 +36663965626265666339653166653935313738393730313962353038356464626631326239353738 +6365323564396436320a373437336339633233316239653330306564336462646230353730366666 +33623461366166323734653533383736613539653934646134356634656531623365623735353836 +39373433633961646238363739366536636336323033306236316263346362393933663436333336 +38656134363461626461373832663934333333343764323637343763373234656430646564373561 +62626631396432353062 diff --git a/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring index 5f152077..69d78732 100644 --- a/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring +++ b/kolla/node_custom_config/cinder/cinder-backup/ceph.client.cinder-backup.keyring @@ -1,2 +1,9 @@ -[client.cinder-backup] - key = {% cephx_key_cinder_backup %} +$ANSIBLE_VAULT;1.1;AES256 +35626237353166396233653633613766303033663366363635653337306530663361636635333332 +3931303337366565393764393430393366663439643663330a333665323833393031366432353062 +64323036363838616261356662613135326264373039636331643634623561366133363935613336 +6266616332643033360a306262343766643633343266666261343764636263386332376232353532 +33626661333963623437383064303565376531656261666166306365366431623464363336626237 +31356434363332636435373033353266343735646435643537613937626662366661393764636261 +31383062626564663061396464343537303936366531646530393561353763386337626133393864 +36646465383765333637 diff --git a/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring index 634263ab..fb810a8e 100644 --- a/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring +++ b/kolla/node_custom_config/cinder/cinder-volume/ceph.client.cinder.keyring @@ -1,2 +1,9 @@ -[client.cinder] - key = {% cephx_key_cinder_volume %} \ No newline at end of file +$ANSIBLE_VAULT;1.1;AES256 +32393735303036616434633236623635326533373536663537393937333465363963383064306432 +3063383930333233643937356166386664383862623566370a643736663764306534666334633036 +32373738326430313566363630353666636133343333623037373334313531326662623036373965 +6136343562326637340a313030323761356263303933363136393230633434356665663035353039 +39633737663733666533363461313262656534663933336130393661343561373937363633616233 +34383861646632343334393064663362616135393738666536616266303264633330326334636137 +62353939666666383439646533346637363435363961316630356333616564656461346365343937 +62396235363638376230 diff --git a/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring index ac7e483e..26053889 100644 --- a/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring +++ b/kolla/node_custom_config/glance/ceph.client.kvm-images.keyring @@ -1,2 +1,9 @@ -[client.kvm-images] - key = {% cephx_key_kvm_images %} +$ANSIBLE_VAULT;1.1;AES256 +65363435636632386261666565333262316337653864626465663935393361326231636135333131 +6463626363393231366161626261363362336262363031620a323331326431666535343335343839 +38636362613539356164663835656436376262653034316466363962316437653366646436383636 +3866643830376238610a636130363335343730343261346530326337316164613233356263333734 +66343536373330313631356332343736303233383636653466346462306666313564323565333935 +61653739636333353138373332643838623734623939633564336532636462313963313632656564 +32383038663536383430373137393033626636333830383830363133613438623032376162616637 +35386633306365323166 diff --git a/kolla/node_custom_config/nova/ceph.client.cinder.keyring b/kolla/node_custom_config/nova/ceph.client.cinder.keyring index a8f5f9bc..51212964 100644 --- a/kolla/node_custom_config/nova/ceph.client.cinder.keyring +++ b/kolla/node_custom_config/nova/ceph.client.cinder.keyring @@ -1,4 +1,16 @@ -[client.cinder] - key = {% cephx_key_cinder_volume %} - caps mon = "profile rbd" - caps osd = "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=kvm-images" +$ANSIBLE_VAULT;1.1;AES256 +38393064616566346637373034336537323933383265653037346662303135326539303331663162 +3538613638396465613534323737363337643831653462630a303166363461633235666563646432 +38366237316630643335323136656136656362343033623965633234646233626135333366646637 +3864623466326164360a613436623236666234663037666633363265333466343663616165613937 +65323233653534646337626565393433323139316431613265383930303735373461313839336635 +38303566666563366632633733373262323832383732643432633535656462383431356563643264 +63666536643732326237643438343830653262626339363637636162656537343039613764636232 +63363934643038616633616138353664363939623165623639663936396262663938393430333337 +35626662623165646439636536393762373861643666613665333163616538613938653962393538 +63393236363034663839366364613637396461663630373836613262336638346330643630666439 +61383464363136356135656339343931373835396435636664303839356133396534373563386532 +62643361616330363839633234396338613131393137613538313065653430393132333533383561 +65306166633063616664393533323132636630386365616362383332393664376335353961323564 +31336139313533613835303963633330316664643033373837333538653537383839623233386161 +646661346164376131323763303964353664 diff --git a/kolla/node_custom_config/nova/ceph.client.nova.keyring b/kolla/node_custom_config/nova/ceph.client.nova.keyring index 676d7899..1472ab5f 100644 --- a/kolla/node_custom_config/nova/ceph.client.nova.keyring +++ b/kolla/node_custom_config/nova/ceph.client.nova.keyring @@ -1,2 +1,9 @@ -[client.nova] - key = {% cephx_key_nova %} +$ANSIBLE_VAULT;1.1;AES256 +36386335353136303231363762633034353162333864636532323337323038306433643239663562 +3338373335353664303733306564366463373736643064660a636634366534303263653232323832 +32363466306364303935313561366531656666383633353436333164633730666636353763323932 +3434393161316531330a613838336333633835356265313834633936343863386139623039653737 +63626262386131663038653466353137386433666136356237643939613762623737343834663464 +34326437376433333262323964636136313132626132363739316239653266306232323032616263 +30323838353737656665383962333765666666663164663336636466653062626431326236333265 +35316232366262353038 From 87dc1907a4deae13728e4697727ea2ab523e59a0 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Mon, 4 May 2020 13:49:54 -0500 Subject: [PATCH 26/55] Update to allow prometheus server and alertmanager to use haproxy for SSL termination --- kolla/defaults.yml | 3 +- playbooks/prometheus.yml | 96 +------------------ roles/chameleon_prometheus/defaults/main.yml | 64 ++++++++++--- roles/chameleon_prometheus/tasks/main.yml | 9 +- .../templates/prometheus.yml.j2 | 2 +- 5 files changed, 63 insertions(+), 111 deletions(-) diff --git a/kolla/defaults.yml b/kolla/defaults.yml index a3bbc794..d0331507 100644 --- a/kolla/defaults.yml +++ b/kolla/defaults.yml @@ -183,12 +183,11 @@ chameleon_portal_mysql_user: chameleon_ro chameleon_portal_mysql_database: chameleon_prod # Prometheus -enable_prometheus: no +enable_prometheus: "{{ inventory_hostname in groups['prometheus'] }}" prometheus_port: "9090" prometheus_jupyterhub_exporter_token: prometheus_server_external_url: "{{ public_protocol }}://{{ prometheus_external_fqdn }}:{{ prometheus_port }}" prometheus_alertmanager_external_url: "{{ public_protocol }}://{{ proemtheus_external_fqdn }}:{{ prometheus_alertmanager_port }}" -# Legacy chameleon_prometheus role vars prometheus_bind_address: "{{ lookup('vars', 'ansible_' + network_interface).ipv4.address }}" # Redfish Monitor diff --git a/playbooks/prometheus.yml b/playbooks/prometheus.yml index 9cbf5172..ab02de4c 100644 --- a/playbooks/prometheus.yml +++ b/playbooks/prometheus.yml @@ -12,99 +12,11 @@ - chameleon_prometheus tasks: - set_fact: - prometheus_alertmanager_port: "{{ prometheus_services['alertmanager'].port }}" + prometheus_alertmanager_port: "{{ prometheus_services['prometheus-alertmanager'].port }}" prometheus_alertmanager_users: "{{ prometheus_alertmanager_users }}" - when: inventory_hostname in groups[prometheus_services['alertmanager'].group] + when: inventory_hostname in groups[prometheus_services['prometheus-alertmanager'].group] - set_fact: - prometheus_server_port: "{{ prometheus_services['server'].port }}" + prometheus_server_port: "{{ prometheus_services['prometheus-server'].port }}" prometheus_server_users: "{{ prometheus_server_users }}" prometheus_monitoring_user: "{{ prometheus_monitoring_user }}" - when: inventory_hostname in groups[prometheus_services['server'].group] - -- hosts: frontends - tasks: - - name: Configure virtual host for Prometheus server. - block: - - set_fact: - prometheus_server: "{{ hostvars[groups['prometheus'][0]] }}" - - name: Create password for readonly user. - htpasswd: - path: "/etc/httpd/prometheus_server.htpasswd" - name: "{{ prometheus_server.prometheus_monitoring_user }}" - password: "{{ prometheus_monitoring_password }}" - mode: 0640 - - name: Configure virtual host. - include_role: - name: apache - tasks_from: host - vars: - apache_server_alias: prometheus_server - apache_server_name: "{{ prometheus_server_hostname }}" - apache_server_https_port: 9090 - apache_server_http_port: null - apache_server_conf: | - - Order deny,allow - Allow from all - - AuthType Basic - AuthName "Prometheus authentication" - AuthBasicProvider file socache external - AuthUserFile /etc/httpd/prometheus_server.htpasswd - AuthExternal keystone-user - AuthExternalProvideCache On - AuthnCacheProvideFor keystone-user - - # preserve Host header to avoid cross-origin problems - ProxyPreserveHost on - # proxy to Alertmanager - ProxyPass http://{{ groups['prometheus'][0] }}:{{ prometheus_server.prometheus_server_port }}/ - ProxyPassReverse http://{{ groups['prometheus'][0] }}:{{ prometheus_server.prometheus_server_port }}/ - - # Read access to monitoring user + admins - - Require user {{ prometheus_server.prometheus_monitoring_user }} {{ prometheus_server.prometheus_server_users | join(' ') }} - - - # TODO: Write access to operators by group membership - - Require user {{ prometheus_server.prometheus_server_users | join(' ') }} - - - when: - - prometheus_server_hostname is defined - - external_vip_address is defined - - name: Configure virtual host for Prometheus Alertmanager. - block: - - set_fact: - prometheus_alertmanager: "{{ hostvars[groups['prometheus-alertmanager'][0]] }}" - - name: Configure virtual host. - include_role: - name: apache - tasks_from: host - vars: - apache_server_alias: prometheus_alertmanager - apache_server_name: "{{ prometheus_alertmanager_hostname }}" - apache_server_conf: | - - Order deny,allow - Allow from all - - AuthType Basic - AuthName "Alertmanager authentication" - AuthBasicProvider socache external - AuthExternal keystone-user - AuthExternalProvideCache On - AuthnCacheProvideFor keystone-user - - Require user {{ prometheus_alertmanager.prometheus_alertmanager_users | join(' ') }} - - # preserve Host header to avoid cross-origin problems - ProxyPreserveHost on - # proxy to Alertmanager - ProxyPass http://{{ groups['prometheus-alertmanager'][0] }}:{{ prometheus_alertmanager.prometheus_alertmanager_port }}/ - ProxyPassReverse http://{{ groups['prometheus-alertmanager'][0] }}:{{ prometheus_alertmanager.prometheus_alertmanager_port }}/ - - when: - - prometheus_alertmanager_hostname is defined - - external_vip_address is defined + when: inventory_hostname in groups[prometheus_services['prometheus-server'].group] diff --git a/roles/chameleon_prometheus/defaults/main.yml b/roles/chameleon_prometheus/defaults/main.yml index 758b2fa9..cc8b17fc 100644 --- a/roles/chameleon_prometheus/defaults/main.yml +++ b/roles/chameleon_prometheus/defaults/main.yml @@ -1,3 +1,6 @@ +--- +project_name: "chameleon_prometheus" + prometheus_docker_network_name: prometheus prometheus_docker_network_subnet: 172.18.0.0/24 @@ -7,7 +10,6 @@ prometheus_users: [] prometheus_monitoring_user: monitoring prometheus_server_users: "{{ prometheus_users }}" -enable_prometheus_alertmanager: yes # This should be encrypted! It is a secret value. prometheus_alertmanager_slack_api_url: "{{ slack_api_url }}" prometheus_alertmanager_users: "{{ prometheus_users }}" @@ -17,41 +19,69 @@ prometheus_mysql_exporter_user: mysqld-exporter prometheus_openstack_exporter_user: admin prometheus_services: - server: + prometheus-server: service_name: prometheus_server image: prom/prometheus:v2.10.0 group: prometheus + enabled: "{{ enable_prometheus }}" restart_handler: restart prometheus port: 9090 config_dir: /etc/prometheus scrape_target: yes - service_args: - - "--config.file=/etc/prometheus/prometheus.yml" - - "--storage.tsdb.path=/prometheus" - - "--web.console.libraries=/usr/share/prometheus/console_libraries" - - "--web.console.templates=/usr/share/prometheus/consoles" - - "--web.enable-lifecycle" - - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" +# service_args: +# - "--config.file=/etc/prometheus/prometheus.yml" +# - "--storage.tsdb.path=/prometheus" +# - "--web.console.libraries=/usr/share/prometheus/console_libraries" +# - "--web.console.templates=/usr/share/prometheus/consoles" +# - "--web.enable-lifecycle" +# - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" volumes: - "prometheus-data:/prometheus" - "/etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml" - "/etc/prometheus/rules:/etc/prometheus/rules" - "/etc/prometheus/secrets:/etc/prometheus/secrets" - alertmanager: + haproxy: + prometheus_server: + enabled: "{{ enable_prometheus }}" + mode: "http" + external: false + port: 9090 + prometheus_server_external: + enabled: "{{ enable_prometheus }}" + mode: "http" + external: true + port: 9090 + prometheus-alertmanager: service_name: prometheus_alertmanager image: prom/alertmanager:v0.15.2 group: prometheus-alertmanager restart_handler: restart alertmanager port: 9093 config_dir: /etc/alertmanager - service_args: - - "--web.external-url=https://{{ prometheus_alertmanager_hostname }}" - scrape_target: no + container_name: "prometheus_alertmanager" + enabled: "{{ enable_prometheus_alertmanager }}" volumes: - "alertmanager-data:/alertmanager" - "/etc/alertmanager:/etc/alertmanager" + scrape_target: no + haproxy: + prometheus_alertmanager: + enabled: "{{ enable_prometheus_alertmanager }}" + mode: "http" + external: false + port: "{{ prometheus_alertmanager_port }}" + auth_user: "{{ prometheus_alertmanager_user }}" + auth_pass: "{{ prometheus_alertmanager_password }}" + prometheus_alertmanager_external: + enabled: "{{ enable_prometheus_alertmanager }}" + mode: "http" + external: true + port: "{{ prometheus_alertmanager_port }}" + auth_user: "{{ prometheus_alertmanager_user }}" + auth_pass: "{{ prometheus_alertmanager_password }}" jupyterhub-exporter: group: jupyterhub + enabled: true port: 8075 scrape_target: yes bearer_token_file: /etc/prometheus/secrets/jupyterhub @@ -59,6 +89,7 @@ prometheus_services: service_name: prometheus_mysql_exporter image: prom/mysqld-exporter:v0.11.0 group: prometheus-mysqld-exporter + enabled: true restart_handler: restart mysql exporter port: 9104 config_dir: /etc/prometheus/mysqld-exporter @@ -68,6 +99,7 @@ prometheus_services: node-exporter: service_name: prometheus_node_exporter group: prometheus-node-exporter + enabled: true restart_handler: restart node exporter custom_service: yes port: 9100 @@ -78,6 +110,7 @@ prometheus_services: service_name: prometheus_openstack_exporter image: docker.chameleoncloud.org/chameleoncloud/prometheus-openstack-exporter:latest group: prometheus-openstack-exporter + enabled: true restart_handler: restart openstack exporter port: 9103 config_dir: /etc/prometheus @@ -102,6 +135,7 @@ prometheus_services: image: prom/snmp-exporter:v0.15.0 generator_image: prom/snmp-generator:master group: prometheus-snmp-exporter + enabled: true restart_handler: restart snmp exporter port: 9116 config_dir: /etc/prometheus/snmp-exporter @@ -111,21 +145,25 @@ prometheus_services: # The Ceph exporters use the built-in Prometheus metrics agent Ceph provides ceph-exporter: group: prometheus-ceph-exporter + enabled: true port: 9283 scrape_target: yes push-gateway: service_name: prometheus_pushgateway image: prom/pushgateway:v0.8.0 group: prometheus-push-gateway + enabled: true restart_handler: restart pushgateway port: 9091 config_dir: /etc/prometheus scrape_target: yes precis-event-listener-exporter: group: precis + enabled: true port: 8913 scrape_target: yes precis-endpoint-exporter: group: precis + enabled: true port: 8912 scrape_target: yes diff --git a/roles/chameleon_prometheus/tasks/main.yml b/roles/chameleon_prometheus/tasks/main.yml index 6b73e4b3..2358e10b 100644 --- a/roles/chameleon_prometheus/tasks/main.yml +++ b/roles/chameleon_prometheus/tasks/main.yml @@ -7,6 +7,7 @@ | map(attribute='value') | selectattr('image', 'defined') | map(attribute='group') | list }} + enable_prometheus_alertmanager: inventory_hostname in groups['prometheus-alertmanager'] - name: Create Docker network. docker_network: @@ -44,11 +45,11 @@ file: name: "/etc/prometheus/secrets" state: directory - when: inventory_hostname in groups[prometheus_services['server'].group] + when: inventory_hostname in groups[prometheus_services['prometheus-server'].group] - include_tasks: alertmanager.yml vars: - service: "{{ prometheus_services['alertmanager'] }}" + service: "{{ prometheus_services['prometheus-alertmanager'] }}" when: inventory_hostname in groups[service.group] - include_tasks: jupyterhub_exporter.yml @@ -76,12 +77,14 @@ service: "{{ prometheus_services['snmp-exporter'] }}" when: inventory_hostname in groups[service.group] +- include_tasks: loadbalancer.yml + # Prometheus server task should go last as it may be using # credentials generated in above steps. - include_tasks: server.yml vars: - service: "{{ prometheus_services['server'] }}" + service: "{{ prometheus_services['prometheus-server'] }}" when: inventory_hostname in groups[service.group] - name: Create Docker services. diff --git a/roles/chameleon_prometheus/templates/prometheus.yml.j2 b/roles/chameleon_prometheus/templates/prometheus.yml.j2 index 8fbbb790..04faffac 100644 --- a/roles/chameleon_prometheus/templates/prometheus.yml.j2 +++ b/roles/chameleon_prometheus/templates/prometheus.yml.j2 @@ -1,4 +1,4 @@ -{% set alertmanager_service = prometheus_services['alertmanager'] %} +{% set alertmanager_service = prometheus_services['prometheus-alertmanager'] %} {% set alertmanager_vars = hostvars[groups[alertmanager_service.group][0]] %} {% set snmp_exporter_service = prometheus_services['snmp-exporter'] %} {% set snmp_exporter_vars = hostvars[groups[snmp_exporter_service.group][0]] %} From 90f9a5c0b89b2244a3a5f47e35d0dcedb2637e1f Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:50:14 -0500 Subject: [PATCH 27/55] [kvm] Remove BlazarFilter --- kolla/node_custom_config/nova.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 474bb809..a4ebc76b 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -24,7 +24,7 @@ workers = 10 [filter_scheduler] # Override default filters (just remove filters not relevant to baremetal-only) # default: AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter -enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,BlazarFilter +enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter # https://docs.openstack.org/ironic/train/install/configure-compute.html # > Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. @@ -60,7 +60,7 @@ instances = -1 cores = -1 ram = -1 {% endif %} -# https://docs.openstack.org/nova/rocky/configuration/config.html#quota.recheck_quota +# https://docs.openstack.org/nova/train/configuration/config.html#quota.recheck_quota # > This defaults to True (recheck quota after resource creation) but can be set # to False to avoid additional load if allowing quota to be exceeded because # of racing requests is considered acceptable. From bd96c02384d79d9a83586ea5232953e44b453d05 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:51:33 -0500 Subject: [PATCH 28/55] [kvm] Correct default volume type --- kolla/node_custom_config/cinder.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 217a1303..257d040c 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -1,5 +1,5 @@ [DEFAULT] -default_volume_type = iscsi +default_volume_type = ceph-rbd {% if cinder_backend_ceph | bool %} enabled_backends = rbd-1 {% endif %} From 960f075b49f9f620567cacf52a6460c04468c853 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:52:43 -0500 Subject: [PATCH 29/55] [kvm] Enable federated auth --- kolla/node_custom_config/horizon/custom_local_settings | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kolla/node_custom_config/horizon/custom_local_settings b/kolla/node_custom_config/horizon/custom_local_settings index 2a7b6208..182a23eb 100644 --- a/kolla/node_custom_config/horizon/custom_local_settings +++ b/kolla/node_custom_config/horizon/custom_local_settings @@ -22,7 +22,7 @@ CHAMELEON_SITE_ID = '{{ chameleon_site_name }}' # Hide the region dropdown OPENSTACK_KEYSTONE_MULTIREGION_SUPPORT = False # Show the site dropdown -CHAMELEON_MULTISITE_SUPPORT = True +CHAMELEON_MULTISITE_SUPPORT = False {% else %} CHAMELEON_SITES = { {% for conf in horizon_regions %} @@ -96,7 +96,6 @@ WEBSSO_DEFAULT_REDIRECT = True # our configuration. WEBSSO_DEFAULT_REDIRECT_REGION = '{{ keystone_public_url }}/v3' WEBSSO_DEFAULT_REDIRECT_PROTOCOL = 'openid' - {% if enable_horizon_chameleon_websso | bool %} AUTHENTICATION_URLS = ['openstack_dashboard.cc_web_sso_urls', 'openstack_auth.urls'] WEBSSO_DEFAULT_REDIRECT_URL = '{{ horizon_chameleon_websso_host }}/sso/horizon' From 6bb770c061bc6d8f785301b497aaf68a8c6214ff Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:57:06 -0500 Subject: [PATCH 30/55] [kvm] Configure Glance image conversion --- .../node_custom_config/glance/glance-api.conf | 23 +++++++++++++++++++ .../glance/glance-image-import.conf | 5 ++++ 2 files changed, 28 insertions(+) create mode 100644 kolla/node_custom_config/glance/glance-api.conf create mode 100644 kolla/node_custom_config/glance/glance-image-import.conf diff --git a/kolla/node_custom_config/glance/glance-api.conf b/kolla/node_custom_config/glance/glance-api.conf new file mode 100644 index 00000000..060a36ad --- /dev/null +++ b/kolla/node_custom_config/glance/glance-api.conf @@ -0,0 +1,23 @@ +[DEFAULT] +show_image_direct_url = True +client_socket_timeout = 0 + +[glance_store] +stores = rbd,file +default_store = rbd +rbd_store_pool = kvm-images +rbd_store_user = kvm-images +rbd_store_ceph_conf = /etc/ceph/ceph.conf +rbd_store_chunk_size = 8 + +[image_import_opts] +image_import_plugins = ['image_conversion'] + +[image_conversion] +output_format = raw + +[keystone_authtoken] +service_token_roles_required = True + +[taskflow_executor] +max_workers = 2 diff --git a/kolla/node_custom_config/glance/glance-image-import.conf b/kolla/node_custom_config/glance/glance-image-import.conf new file mode 100644 index 00000000..3f0efffe --- /dev/null +++ b/kolla/node_custom_config/glance/glance-image-import.conf @@ -0,0 +1,5 @@ +[image_import_opts] +image_import_plugins = ['image_conversion'] + +[image_conversion] +output_format = raw From 919fa842ce945c1c8b500a23dc9fbd87c271ac0e Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 09:58:03 -0500 Subject: [PATCH 31/55] [kvm] More federated auth configs --- kolla/node_custom_config/keystone/policy.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 kolla/node_custom_config/keystone/policy.yaml diff --git a/kolla/node_custom_config/keystone/policy.yaml b/kolla/node_custom_config/keystone/policy.yaml new file mode 100644 index 00000000..e3947b7a --- /dev/null +++ b/kolla/node_custom_config/keystone/policy.yaml @@ -0,0 +1,8 @@ +#identity:change_password: '!' +#identity:update_user: '!' +#identity:list_endpoints: role:reader and system_scope:all + +#identity:list_roles: 'role:reader and system_scope:all' +#identity:list_roles: 'role:reader' + +#identity:list_services: role:reader and system_scope:all From b9df6716d99bf606a629cdd9ae4fc8db5c25431a Mon Sep 17 00:00:00 2001 From: codyhammock Date: Wed, 9 Sep 2020 10:29:43 -0500 Subject: [PATCH 32/55] [kvm] Ceph configs for Cinder, Glance, Nova --- kolla/node_custom_config/cinder/ceph.conf | 17 +++++++++++++++++ kolla/node_custom_config/glance/ceph.conf | 7 +++++++ kolla/node_custom_config/nova/ceph.conf | 17 +++++++++++++++++ 3 files changed, 41 insertions(+) create mode 100644 kolla/node_custom_config/cinder/ceph.conf create mode 100644 kolla/node_custom_config/glance/ceph.conf create mode 100644 kolla/node_custom_config/nova/ceph.conf diff --git a/kolla/node_custom_config/cinder/ceph.conf b/kolla/node_custom_config/cinder/ceph.conf new file mode 100644 index 00000000..c333b3bb --- /dev/null +++ b/kolla/node_custom_config/cinder/ceph.conf @@ -0,0 +1,17 @@ +[global] +log file = /var/log/kolla/ceph/$cluster-$name.log +log to syslog = false +err to syslog = false +log to stderr = false +err to stderr = false + +fsid = {{ ceph_fsid }} +mon_initial_members = {{ ceph_mon_hostname }} +mon_host = {{ ceph_mon_address }} +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx + +osd pool default size = 1 +osd pool default min size = 1 + diff --git a/kolla/node_custom_config/glance/ceph.conf b/kolla/node_custom_config/glance/ceph.conf new file mode 100644 index 00000000..d9b73926 --- /dev/null +++ b/kolla/node_custom_config/glance/ceph.conf @@ -0,0 +1,7 @@ +[global] +fsid = {{ ceph_fsid }} +#mon_initial_members = {{ ceph_mon_hostname }} +mon_host = {{ ceph_mon_address }} +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx diff --git a/kolla/node_custom_config/nova/ceph.conf b/kolla/node_custom_config/nova/ceph.conf new file mode 100644 index 00000000..c333b3bb --- /dev/null +++ b/kolla/node_custom_config/nova/ceph.conf @@ -0,0 +1,17 @@ +[global] +log file = /var/log/kolla/ceph/$cluster-$name.log +log to syslog = false +err to syslog = false +log to stderr = false +err to stderr = false + +fsid = {{ ceph_fsid }} +mon_initial_members = {{ ceph_mon_hostname }} +mon_host = {{ ceph_mon_address }} +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx + +osd pool default size = 1 +osd pool default min size = 1 + From 389bda6f4496b6a10eb51c2e044fa7cad7afb83b Mon Sep 17 00:00:00 2001 From: codyhammock Date: Thu, 17 Sep 2020 08:47:02 -0500 Subject: [PATCH 33/55] [kvm] Merge from master --- kolla/node_custom_config/nova.conf | 12 ++--- roles/chameleon_prometheus/defaults/main.yml | 57 ++++++++------------ 2 files changed, 27 insertions(+), 42 deletions(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index a4ebc76b..06f0d888 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -23,9 +23,9 @@ workers = 10 [filter_scheduler] # Override default filters (just remove filters not relevant to baremetal-only) -# default: AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter -enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter -# https://docs.openstack.org/ironic/train/install/configure-compute.html +# default: RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter +enabled_filters = {{ nova_enabled_filters }} +# https://docs.openstack.org/ironic/rocky/install/configure-compute.html # > Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. track_instance_changes = false @@ -129,6 +129,6 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif %} virt_type = {{ nova_compute_virt_type }} {% endif %} -{% if nova_libvirt_cpu_mode %} -cpu_mode = {{ nova_libvirt_cpu_mode }} -{% endif %} +#{% if nova_libvirt_cpu_mode %} +#cpu_mode = {{ nova_libvirt_cpu_mode }} +#{% endif %} diff --git a/roles/chameleon_prometheus/defaults/main.yml b/roles/chameleon_prometheus/defaults/main.yml index cc8b17fc..99e0c6bc 100644 --- a/roles/chameleon_prometheus/defaults/main.yml +++ b/roles/chameleon_prometheus/defaults/main.yml @@ -28,13 +28,13 @@ prometheus_services: port: 9090 config_dir: /etc/prometheus scrape_target: yes -# service_args: -# - "--config.file=/etc/prometheus/prometheus.yml" -# - "--storage.tsdb.path=/prometheus" -# - "--web.console.libraries=/usr/share/prometheus/console_libraries" -# - "--web.console.templates=/usr/share/prometheus/consoles" -# - "--web.enable-lifecycle" -# - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" + service_args: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--web.console.libraries=/usr/share/prometheus/console_libraries" + - "--web.console.templates=/usr/share/prometheus/consoles" + - "--web.enable-lifecycle" + - "--web.external-url=https://{{ prometheus_server_hostname }}:9090" volumes: - "prometheus-data:/prometheus" - "/etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml" @@ -80,7 +80,7 @@ prometheus_services: auth_user: "{{ prometheus_alertmanager_user }}" auth_pass: "{{ prometheus_alertmanager_password }}" jupyterhub-exporter: - group: jupyterhub + group: prometheus-jupyterhub-exporter enabled: true port: 8075 scrape_target: yes @@ -99,7 +99,6 @@ prometheus_services: node-exporter: service_name: prometheus_node_exporter group: prometheus-node-exporter - enabled: true restart_handler: restart node exporter custom_service: yes port: 9100 @@ -110,7 +109,6 @@ prometheus_services: service_name: prometheus_openstack_exporter image: docker.chameleoncloud.org/chameleoncloud/prometheus-openstack-exporter:latest group: prometheus-openstack-exporter - enabled: true restart_handler: restart openstack exporter port: 9103 config_dir: /etc/prometheus @@ -135,35 +133,22 @@ prometheus_services: image: prom/snmp-exporter:v0.15.0 generator_image: prom/snmp-generator:master group: prometheus-snmp-exporter - enabled: true restart_handler: restart snmp exporter port: 9116 config_dir: /etc/prometheus/snmp-exporter scrape_target: no volumes: - "/etc/prometheus/snmp-exporter:/etc/snmp_exporter" - # The Ceph exporters use the built-in Prometheus metrics agent Ceph provides - ceph-exporter: - group: prometheus-ceph-exporter - enabled: true - port: 9283 - scrape_target: yes - push-gateway: - service_name: prometheus_pushgateway - image: prom/pushgateway:v0.8.0 - group: prometheus-push-gateway - enabled: true - restart_handler: restart pushgateway - port: 9091 - config_dir: /etc/prometheus - scrape_target: yes - precis-event-listener-exporter: - group: precis - enabled: true - port: 8913 - scrape_target: yes - precis-endpoint-exporter: - group: precis - enabled: true - port: 8912 - scrape_target: yes +# # The Ceph exporters use the built-in Prometheus metrics agent Ceph provides +# ceph-exporter: +# group: prometheus-ceph-exporter +# port: 9283 +# scrape_target: yes +# push-gateway: +# service_name: prometheus_pushgateway +# image: prom/pushgateway:v0.8.0 +# group: prometheus-push-gateway +# restart_handler: restart pushgateway +# port: 9091 +# config_dir: /etc/prometheus +# scrape_target: yes From 86def66fd539c2d18a258d44a5897c2735c066f9 Mon Sep 17 00:00:00 2001 From: codyhammock Date: Thu, 17 Sep 2020 12:06:12 -0500 Subject: [PATCH 34/55] [kvm] Update requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 627ed068..a54e0355 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ansible~=2.8.0 +ansible~=2.8 docker From 0a5cdeb1fa7da68fd1d1e20d560005fdc5e26ceb Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Tue, 3 Nov 2020 16:45:17 -0600 Subject: [PATCH 35/55] [kvm] Fixup cinder.conf if block --- kolla/node_custom_config/cinder.conf | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 257d040c..7e0ab69f 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -1,10 +1,8 @@ +{% if cinder_backend_ceph | bool %} [DEFAULT] default_volume_type = ceph-rbd -{% if cinder_backend_ceph | bool %} enabled_backends = rbd-1 -{% endif %} -{% if cinder_backend_ceph | bool %} [rbd-1] volume_driver = cinder.volume.drivers.rbd.RBDDriver volume_backend_name = rbd-1 From 08ee346539165f7970e6d96f96065b68b193f5f6 Mon Sep 17 00:00:00 2001 From: Jason Anderson Date: Tue, 3 Nov 2020 13:22:38 -0600 Subject: [PATCH 36/55] [keystone] Set project/project_names to optional --- kolla/node_custom_config/keystone/idp_mapping.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kolla/node_custom_config/keystone/idp_mapping.json b/kolla/node_custom_config/keystone/idp_mapping.json index 25c798e0..0452eac3 100644 --- a/kolla/node_custom_config/keystone/idp_mapping.json +++ b/kolla/node_custom_config/keystone/idp_mapping.json @@ -32,6 +32,7 @@ }, { "type": "projects", + "optional": true, "blacklist": { "id": [ ".*-admins$", @@ -72,6 +73,7 @@ }, { "type": "OIDC-project_names", + "optional": true, "blacklist": [ ".*-admins$", ".*-managers$" From 0600d962d4d7b310df4d6eee956920c7d7e22947 Mon Sep 17 00:00:00 2001 From: Zhuo Zhen Date: Mon, 15 Feb 2021 16:11:50 -0600 Subject: [PATCH 37/55] [chameleon_usage] add kvm usage report --- playbooks/chameleon_usage.yml | 25 -------- roles/chameleon_usage/defaults/main.yml | 9 +-- roles/chameleon_usage/tasks/config.yml | 19 ------ roles/chameleon_usage/tasks/heat_template.yml | 19 ------ roles/chameleon_usage/tasks/jupyter.yml | 38 ------------ roles/chameleon_usage/tasks/main.yml | 59 ++++++++++++++++++- roles/chameleon_usage/tasks/mysql_init.yml | 25 -------- roles/chameleon_usage/tasks/node.yml | 41 ------------- roles/chameleon_usage/tasks/user_project.yml | 24 -------- .../templates/chameleon-usage-extract-data.j2 | 3 +- ...chameleon-usage-heat-template-downloads.j2 | 8 --- ...meleon-usage-jupyter-usage-report-cache.j2 | 6 -- .../templates/chameleon-usage-jupyter.j2 | 7 --- .../chameleon-usage-sanity-checks.j2 | 7 --- .../chameleon-usage-user-project-report.j2 | 6 -- roles/chameleon_usage/templates/slack.json.j2 | 7 --- .../user_project_report_credentials.j2 | 6 -- 17 files changed, 59 insertions(+), 250 deletions(-) delete mode 100644 roles/chameleon_usage/tasks/config.yml delete mode 100644 roles/chameleon_usage/tasks/heat_template.yml delete mode 100644 roles/chameleon_usage/tasks/jupyter.yml delete mode 100644 roles/chameleon_usage/tasks/mysql_init.yml delete mode 100644 roles/chameleon_usage/tasks/node.yml delete mode 100644 roles/chameleon_usage/tasks/user_project.yml delete mode 100644 roles/chameleon_usage/templates/chameleon-usage-heat-template-downloads.j2 delete mode 100644 roles/chameleon_usage/templates/chameleon-usage-jupyter-usage-report-cache.j2 delete mode 100644 roles/chameleon_usage/templates/chameleon-usage-jupyter.j2 delete mode 100644 roles/chameleon_usage/templates/chameleon-usage-sanity-checks.j2 delete mode 100644 roles/chameleon_usage/templates/chameleon-usage-user-project-report.j2 delete mode 100644 roles/chameleon_usage/templates/slack.json.j2 delete mode 100644 roles/chameleon_usage/templates/user_project_report_credentials.j2 diff --git a/playbooks/chameleon_usage.yml b/playbooks/chameleon_usage.yml index 3a6c193e..c6db1680 100644 --- a/playbooks/chameleon_usage.yml +++ b/playbooks/chameleon_usage.yml @@ -2,28 +2,3 @@ - hosts: chameleon_usage roles: - role: chameleon_usage - vars: - action: 'mysql_init' - - role: chameleon_usage - vars: - action: 'config' - - role: chameleon_usage - vars: - action: 'node' - - role: chameleon_usage - when: enable_usage_appliance_report - vars: - action: 'heat_template' - - role: chameleon_usage - when: enable_user_project_report - vars: - action: 'user_project' - -- hosts: chameleon_usage_jupyter - roles: - - role: chameleon_usage - vars: - action: 'config' - - role: chameleon_usage - vars: - action: 'jupyter' diff --git a/roles/chameleon_usage/defaults/main.yml b/roles/chameleon_usage/defaults/main.yml index e6ffb992..fdd6fad4 100644 --- a/roles/chameleon_usage/defaults/main.yml +++ b/roles/chameleon_usage/defaults/main.yml @@ -2,12 +2,5 @@ chameleon_usage_docker_image: docker.chameleoncloud.org/chameleon_usage:latest chameleon_usage_config_dir: /etc/chameleon_usage chameleon_usage_mysql_user: cc_usage -ga_reporting_private_key_json_location: /root/ga-service-account-private-key.json -google_analytics_view_id: 90514617 -user_project_report_cron_name: user_project_report -user_project_report_cron_script: /usr/local/sbin/user_project_report.py -user_project_report_credentials: /usr/local/sbin/user_project_report_credentials -cron_notification: /usr/local/sbin/notification - -chameleon_usage_keycloak_client_id: user-group-import +usage_extract_command: extract_kvm_data diff --git a/roles/chameleon_usage/tasks/config.yml b/roles/chameleon_usage/tasks/config.yml deleted file mode 100644 index 88b16a86..00000000 --- a/roles/chameleon_usage/tasks/config.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Pull Docker image. - docker_image: - source: pull - name: "{{ chameleon_usage_docker_image }}" - force_source: yes - -- name: Create config directory. - file: - path: "{{ chameleon_usage_config_dir }}" - state: directory - -- name: Configure my.cnf - template: - src: my.cnf.j2 - dest: "{{ chameleon_usage_config_dir }}/my.cnf" - vars: - mysql_host: "{{ chameleon_usage_mysql_host }}" - root_password: "{{ database_password }}" diff --git a/roles/chameleon_usage/tasks/heat_template.yml b/roles/chameleon_usage/tasks/heat_template.yml deleted file mode 100644 index 5c018610..00000000 --- a/roles/chameleon_usage/tasks/heat_template.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Configure chameleon usage heat template download docker bash wapper - template: - src: chameleon-usage-heat-template-downloads.j2 - mode: a+x - dest: /usr/local/sbin/chameleon-usage-heat-template-downloads - -- name: Fill in up-to-date data to chameleon_usage - command: "/usr/local/sbin/chameleon-usage-heat-template-downloads" - -- name: Set up heat template downloads cron job - cron: - name: "heat_template_downloads" - minute: "0" - hour: "5" - day: "*" - month: "*" - weekday: "*" - job: "/usr/local/sbin/chameleon-usage-heat-template-downloads | /usr/bin/logger -t \"heat_template_downloads\"" diff --git a/roles/chameleon_usage/tasks/jupyter.yml b/roles/chameleon_usage/tasks/jupyter.yml deleted file mode 100644 index 511900e0..00000000 --- a/roles/chameleon_usage/tasks/jupyter.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Configure chameleon jupyter usage docker bash wapper - template: - src: chameleon-usage-jupyter.j2 - mode: a+x - dest: /usr/local/sbin/chameleon-usage-jupyter - -- name: Fill in current data to chameleon_usage - command: "/usr/local/sbin/chameleon-usage-jupyter" - -- name: Set up jupyter usage cron job - cron: - name: "chameleon-usage-jupyter" - minute: "*/5" - hour: "*" - day: "*" - month: "*" - weekday: "*" - job: "/usr/local/sbin/chameleon-usage-jupyter | /usr/bin/logger -t \"chameleon-usage-jupyter\"" - -- name: Configure chameleon jupyter usage report cache docker bash wrapper - template: - src: chameleon-usage-jupyter-usage-report-cache.j2 - mode: a+x - dest: /usr/local/sbin/chameleon-usage-jupyter-usage-report-cache - -- name: Create jupyter usage report cache table and fill in data - command: "/usr/local/sbin/chameleon-usage-jupyter-usage-report-cache" - -- name: Set up jupyter usage report cache cron job - cron: - name: "chameleon-usage-jupyter-usage-report-cache" - minute: "*" - hour: "6" - day: "*" - month: "*" - weekday: "*" - job: "/usr/local/sbin/chameleon-usage-jupyter-usage-report-cache | /usr/bin/logger -t \"chameleon-usage-jupyter-usage-report-cache\"" diff --git a/roles/chameleon_usage/tasks/main.yml b/roles/chameleon_usage/tasks/main.yml index bd8db0e4..65fde773 100644 --- a/roles/chameleon_usage/tasks/main.yml +++ b/roles/chameleon_usage/tasks/main.yml @@ -1,2 +1,57 @@ ---- -- include_tasks: "{{ action }}.yml" \ No newline at end of file +--- +- name: Pull Docker image. + docker_image: + source: pull + name: "{{ chameleon_usage_docker_image }}" + force_source: yes + +- name: Create config directory. + file: + path: "{{ chameleon_usage_config_dir }}" + state: directory + +- name: Configure my.cnf + template: + src: my.cnf.j2 + dest: "{{ chameleon_usage_config_dir }}/my.cnf" + vars: + mysql_host: "{{ chameleon_usage_mysql_host }}" + root_password: "{{ database_password }}" + +- name: Create MySQL user + local_action: + module: mysql_user + login_host: "{{ groups.mariadb[0] }}" + login_password: "{{ database_password }}" + name: "{{ chameleon_usage_mysql_user }}" + host: "%" + password: "{{ chameleon_usage_mysql_password }}" + priv: "{{ item }}" + append_privs: 'yes' + with_items: + - 'chameleon_usage.*:CREATE,SELECT,INSERT,UPDATE,DELETE' + - 'nova.*:SELECT' + - 'keystone.*:SELECT' + +- name: Initialize chameleon_usage database + shell: | + docker run --rm --net=host \ + -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ + {{ chameleon_usage_docker_image }} setup_database --mysql-conf /etc/mysql/my.cnf 2>&1 + +- name: Configure chameleon usage data extract docker bash wrapper + template: + src: chameleon-usage-extract-data.j2 + mode: a+x + dest: /usr/local/sbin/chameleon-usage-extract-kvm-data + +- name: Fill in up-to-date data to chameleon_usage + command: "/usr/local/sbin/chameleon-usage-extract-kvm-data" + +- name: Set up node data extract periodic task + include_role: + name: chameleon.periodic_task + vars: + task_name: "kvm_usage" + task_command: "/usr/local/sbin/chameleon-usage-extract-kvm-data" + task_calendar: "09:00" diff --git a/roles/chameleon_usage/tasks/mysql_init.yml b/roles/chameleon_usage/tasks/mysql_init.yml deleted file mode 100644 index 5d19eb3f..00000000 --- a/roles/chameleon_usage/tasks/mysql_init.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Create MySQL user - local_action: - module: mysql_user - login_host: "{{ groups.mariadb[0] }}" - login_password: "{{ database_password }}" - name: "{{ chameleon_usage_mysql_user }}" - host: "%" - password: "{{ chameleon_usage_mysql_password }}" - priv: "{{ item }}" - append_privs: 'yes' - with_items: - - 'blazar_backup\_%.*:CREATE,DROP,SELECT,INSERT' - - 'keystone_backup\_%.*:CREATE,DROP,SELECT,INSERT' - - 'nova_backup\_%.*:CREATE,DROP,SELECT,INSERT' - - 'ironic_backup\_%.*:CREATE,DROP,SELECT,INSERT' - - 'chameleon_usage.*:CREATE,SELECT,INSERT,UPDATE,DELETE' - - 'blazar.*:SELECT' - - 'ironic.*:SELECT' - -- name: Initialize chameleon_usage database - shell: | - docker run --rm --net=host \ - -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ - {{ chameleon_usage_docker_image }} setup_database --mysql-conf /etc/mysql/my.cnf 2>&1 diff --git a/roles/chameleon_usage/tasks/node.yml b/roles/chameleon_usage/tasks/node.yml deleted file mode 100644 index 86563557..00000000 --- a/roles/chameleon_usage/tasks/node.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- name: Configure chameleon usage data extract docker bash wrapper - template: - src: chameleon-usage-extract-data.j2 - mode: a+x - dest: /usr/local/sbin/chameleon-usage-extract-data - -- name: Fill in up-to-date data to chameleon_usage - command: "/usr/local/sbin/chameleon-usage-extract-data" - -- name: Set up data extract cron job - cron: - name: "chameleon_usage" - minute: "0" - hour: "9" - day: "*" - month: "*" - weekday: "*" - job: "/usr/local/sbin/chameleon-usage-extract-data | /usr/bin/logger -t \"chameleon_usage\"" - -- name: Configure slack webhook file - template: - src: slack.json.j2 - mode: 0600 - dest: "{{ chameleon_usage_config_dir }}/slack.json" - -- name: Configure chameleon node usage sanity checks docker bash wrapper - template: - src: chameleon-usage-sanity-checks.j2 - mode: a+x - dest: /usr/local/sbin/chameleon-usage-sanity-checks - -- name: Set up data extract cron job - cron: - name: "chameleon_usage_sanity_checks" - minute: "0" - hour: "10" - day: "*" - month: "*" - weekday: "*" - job: "/usr/local/sbin/chameleon-usage-sanity-checks | /usr/bin/logger -t \"chameleon_usage_sanity_checks\"" diff --git a/roles/chameleon_usage/tasks/user_project.yml b/roles/chameleon_usage/tasks/user_project.yml deleted file mode 100644 index fbbbe475..00000000 --- a/roles/chameleon_usage/tasks/user_project.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Configure credentials file - template: - src: user_project_report_credentials.j2 - mode: 0700 - dest: "{{ chameleon_usage_config_dir }}/user_project_report_credentials" - vars: - mysql_host: "{{ chameleon_usage_mysql_host }}" - -- name: Configure chameleon usage user project report docker bash wrapper - template: - src: chameleon-usage-user-project-report.j2 - mode: a+x - dest: /usr/local/sbin/chameleon-usage-user-project-report - -- name: Add glance backup cron jobs. - cron: - name: "{{ user_project_report_cron_name }}" - minute: "0" - hour: "3" - day: "*" - month: "*" - weekday: "*" - job: "/usr/local/sbin/chameleon-usage-user-project-report | /usr/bin/logger -t \"chameleon_usage_user_project\"" diff --git a/roles/chameleon_usage/templates/chameleon-usage-extract-data.j2 b/roles/chameleon_usage/templates/chameleon-usage-extract-data.j2 index 5f5dd5dc..3ca66e66 100644 --- a/roles/chameleon_usage/templates/chameleon-usage-extract-data.j2 +++ b/roles/chameleon_usage/templates/chameleon-usage-extract-data.j2 @@ -1,7 +1,6 @@ #! /bin/sh docker run --rm --net=host \ - -v "{{ backup_dir }}:{{ backup_dir }}" \ -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ {{ chameleon_usage_docker_image }} \ - extract_data --site {{chameleon_site_name}} --backup-location {{ backup_dir }} --mysql-conf /etc/mysql/my.cnf 2>&1 + {{ usage_extract_command }} --mysql-conf /etc/mysql/my.cnf 2>&1 diff --git a/roles/chameleon_usage/templates/chameleon-usage-heat-template-downloads.j2 b/roles/chameleon_usage/templates/chameleon-usage-heat-template-downloads.j2 deleted file mode 100644 index 594718d4..00000000 --- a/roles/chameleon_usage/templates/chameleon-usage-heat-template-downloads.j2 +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/sh - -docker run --rm --net=host \ - -v "{{ ga_reporting_private_key_json_location }}:/etc/google-service-account-private-key.json" \ - -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ - {{ chameleon_usage_docker_image }} \ - heat_template_downloads --google-key-file-location /etc/google-service-account-private-key.json \ - --google-profile-id {{google_analytics_view_id}} --mysql-conf /etc/mysql/my.cnf 2>&1 diff --git a/roles/chameleon_usage/templates/chameleon-usage-jupyter-usage-report-cache.j2 b/roles/chameleon_usage/templates/chameleon-usage-jupyter-usage-report-cache.j2 deleted file mode 100644 index e7fb38ad..00000000 --- a/roles/chameleon_usage/templates/chameleon-usage-jupyter-usage-report-cache.j2 +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/sh - -docker run --rm --net=host \ - -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ - {{ chameleon_usage_docker_image }} \ - jupyter_usage_report_cache --mysql-conf /etc/mysql/my.cnf 2>&1 diff --git a/roles/chameleon_usage/templates/chameleon-usage-jupyter.j2 b/roles/chameleon_usage/templates/chameleon-usage-jupyter.j2 deleted file mode 100644 index 6fd51f91..00000000 --- a/roles/chameleon_usage/templates/chameleon-usage-jupyter.j2 +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/sh - -docker run --rm --net=host \ - -v "/var/run/docker.sock:/var/run/docker.sock" \ - -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ - {{ chameleon_usage_docker_image }} \ - jupyter_usage --mysql-conf /etc/mysql/my.cnf 2>&1 diff --git a/roles/chameleon_usage/templates/chameleon-usage-sanity-checks.j2 b/roles/chameleon_usage/templates/chameleon-usage-sanity-checks.j2 deleted file mode 100644 index f3835dee..00000000 --- a/roles/chameleon_usage/templates/chameleon-usage-sanity-checks.j2 +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/sh - -docker run --rm --net=host \ - -v "{{ chameleon_usage_config_dir }}/slack.json:/etc/chameleon_usage/slack.json" \ - -v "{{ chameleon_usage_config_dir }}/my.cnf:/etc/mysql/my.cnf" \ - {{ chameleon_usage_docker_image }} \ - node_usage_sanity_checks --mysql-conf /etc/mysql/my.cnf --slack /etc/chameleon_usage/slack.json 2>&1 diff --git a/roles/chameleon_usage/templates/chameleon-usage-user-project-report.j2 b/roles/chameleon_usage/templates/chameleon-usage-user-project-report.j2 deleted file mode 100644 index 06447e41..00000000 --- a/roles/chameleon_usage/templates/chameleon-usage-user-project-report.j2 +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/sh - -docker run --rm --net=host \ - --env-file "{{ chameleon_usage_config_dir }}/user_project_report_credentials" \ - {{ chameleon_usage_docker_image }} \ - user_project_report 2>&1 diff --git a/roles/chameleon_usage/templates/slack.json.j2 b/roles/chameleon_usage/templates/slack.json.j2 deleted file mode 100644 index e74a7a02..00000000 --- a/roles/chameleon_usage/templates/slack.json.j2 +++ /dev/null @@ -1,7 +0,0 @@ -{ - "webhook": "{{ slack_hammers_webhook }}", - "hostname_names": { - "m01-07.chameleon.tacc.utexas.edu": "CHI@TACC", - "fx2-01.uc.chameleoncloud.org": "CHI@UC" - } -} diff --git a/roles/chameleon_usage/templates/user_project_report_credentials.j2 b/roles/chameleon_usage/templates/user_project_report_credentials.j2 deleted file mode 100644 index 493e5384..00000000 --- a/roles/chameleon_usage/templates/user_project_report_credentials.j2 +++ /dev/null @@ -1,6 +0,0 @@ -CHAMELEON_USAGE_DB_HOST={{ mysql_host }} -CHAMELEON_USAGE_DB_USER={{ chameleon_usage_mysql_user }} -CHAMELEON_USAGE_DB_PASSWORD={{ chameleon_usage_mysql_password }} -KEYCLOAK_SERVER_URL={{ keycloak_url }} -KEYCLOAK_CLIENT_ID={{ chameleon_usage_keycloak_client_id }} -KEYCLOAK_CLIENT_SECRET={{ chameleon_usage_keycloak_client_secret }} From 1e0af348d49879a9107764be4b5e5c50b5eb0a05 Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Fri, 12 Mar 2021 15:44:03 -0600 Subject: [PATCH 38/55] Adjust requirements for ansible to be more precicely locked to 2.8.x --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a54e0355..627ed068 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ansible~=2.8 +ansible~=2.8.0 docker From f820b0880c4d4a0df3e9335801aa3673113e8f49 Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Wed, 7 Apr 2021 09:27:30 -0500 Subject: [PATCH 39/55] Add conditionals to bridge_mappings that account for KVM hypervisors --- kolla/node_custom_config/neutron/ml2_conf.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kolla/node_custom_config/neutron/ml2_conf.ini b/kolla/node_custom_config/neutron/ml2_conf.ini index 4b22b8a5..65653b4c 100644 --- a/kolla/node_custom_config/neutron/ml2_conf.ini +++ b/kolla/node_custom_config/neutron/ml2_conf.ini @@ -20,7 +20,7 @@ network_vlan_ranges = {# DEPRECATED: neutron_ovs_bridge_mappings still takes priority if in use #} {% if neutron_ovs_bridge_mappings is defined %} bridge_mappings = {{ neutron_ovs_bridge_mappings }} -{% elif neutron_networks is defined %} +{% elif neutron_networks is defined and (inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool )) %} bridge_mappings = {% for config in neutron_networks %}{{ config.name }}:{{ config.bridge_name }}{% if not loop.last %},{% endif %}{% endfor %} {% endif %} From a8b82df90129a5590df26773514ae8fc76ee60ac Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Wed, 7 Apr 2021 09:28:24 -0500 Subject: [PATCH 40/55] Remove galera.cnf template; no longer necessary because we're no longer using DB replication --- kolla/node_custom_config/galera.cnf | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 kolla/node_custom_config/galera.cnf diff --git a/kolla/node_custom_config/galera.cnf b/kolla/node_custom_config/galera.cnf deleted file mode 100644 index b459ee1a..00000000 --- a/kolla/node_custom_config/galera.cnf +++ /dev/null @@ -1,13 +0,0 @@ -[mysqld] -server-id = {{ db_replication_id }} -binlog-do-db = keystone -binlog-format = ROW -replicate-ignore-table = keystone.revocation_event -replicate-ignore-table = keystone.trust -replicate-ignore-table = keystone.trust_role -replicate-ignore-table = keystone.endpoint -replicate-ignore-table = keystone.endpoint_group -replicate-ignore-table = keystone.project_endpoint_group -replicate-ignore-table = keystone.service -replicate-ignore-table = keystone.region -replicate-ignore-db = mysql From 071fa684768939cc6b50b6f8a8ec1245470c748d Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Mon, 21 Jun 2021 10:46:37 -0500 Subject: [PATCH 41/55] [KVM] Enable rbd_flatten_volume_from_snapshot in Cinder --- kolla/node_custom_config/cinder.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 7e0ab69f..509c745a 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -16,5 +16,6 @@ rbd_user = cinder rbd_secret_uuid = {{ cinder_rbd_secret_uuid }} report_discard_supported = True image_upload_use_cinder_backend = True +rbd_flatten_volume_from_snapshot = True #glance_api_version = 2 {% endif %} From c64c8a04cd82d549b674d78f5454190ba2b0e1f4 Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Wed, 28 Jul 2021 11:16:00 -0500 Subject: [PATCH 42/55] [kvm] Add galera.cnf to configure table_open_cache --- kolla/node_custom_config/galera.cnf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kolla/node_custom_config/galera.cnf diff --git a/kolla/node_custom_config/galera.cnf b/kolla/node_custom_config/galera.cnf new file mode 100644 index 00000000..dd58092d --- /dev/null +++ b/kolla/node_custom_config/galera.cnf @@ -0,0 +1,2 @@ +[mysqld] +table_open_cache = 20000 From 1b270186aaa0a1c0cd85e65be127586d797780e8 Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Thu, 2 Jun 2022 14:29:01 -0500 Subject: [PATCH 43/55] Add SSD cinder store --- kolla/node_custom_config/cinder.conf | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/kolla/node_custom_config/cinder.conf b/kolla/node_custom_config/cinder.conf index 509c745a..8fda2b0c 100644 --- a/kolla/node_custom_config/cinder.conf +++ b/kolla/node_custom_config/cinder.conf @@ -1,7 +1,7 @@ {% if cinder_backend_ceph | bool %} [DEFAULT] -default_volume_type = ceph-rbd -enabled_backends = rbd-1 +default_volume_type = ceph-hdd +enabled_backends = rbd-1,rbd-ssd [rbd-1] volume_driver = cinder.volume.drivers.rbd.RBDDriver @@ -18,4 +18,12 @@ report_discard_supported = True image_upload_use_cinder_backend = True rbd_flatten_volume_from_snapshot = True #glance_api_version = 2 + +[rbd-ssd] +volume_driver = cinder.volume.drivers.rbd.RBDDriver +volume_backend_name = rbd-ssd +rbd_pool = {{ ceph_cinder_ssd_pool_name }} +rbd_user = cinder +rbd_secret_uuid = {{ cinder_rbd_secret_uuid }} +rbd_secret_uuid = {{ cinder_rbd_secret_uuid }} {% endif %} From 2d3a9fb2dddc15a8d3811e542da368aa36f6dee9 Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Thu, 2 Jun 2022 14:30:05 -0500 Subject: [PATCH 44/55] Fix up nova for non-baremetal configs --- kolla/node_custom_config/nova.conf | 35 +++++++++++++++++++----------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 06f0d888..9be11d27 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -1,9 +1,10 @@ [DEFAULT] # TODO: [jca 2019-02-08] # This might not be necessary; it is unclear why we have this setting disabled. -vif_plugging_is_fatal = false -vif_plugging_timeout = 0 -max_concurrent_builds = 0 +# comented -- codyhammock +#vif_plugging_is_fatal = false +#vif_plugging_timeout = 0 +#max_concurrent_builds = 0 [compute] # https://docs.openstack.org/ironic/train/install/configure-compute.html @@ -16,7 +17,8 @@ max_concurrent_builds = 0 # compute service, will be remapped to a different one. That may cause # the second compute service to also be disabled, and so on, until no # compute services are active. -consecutive_build_service_disable_threshold = 0 +# comented -- codyhammock +#consecutive_build_service_disable_threshold = 0 [conductor] workers = 10 @@ -28,19 +30,22 @@ enabled_filters = {{ nova_enabled_filters }} # https://docs.openstack.org/ironic/rocky/install/configure-compute.html # > Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. -track_instance_changes = false +# comented -- codyhammock +# track_instance_changes = false # https://docs.openstack.org/ironic/train/install/configure-compute.html # > Enabling this option is beneficial as it reduces re-scheduling events # for ironic nodes when scheduling is based on resource classes, # especially for mixed hypervisor case with host_subset_size = 1. # However enabling it will also make packing of VMs on hypervisors # less dense even when scheduling weights are completely disabled. -shuffle_best_same_weighed_hosts = true +# comented -- codyhammock +# shuffle_best_same_weighed_hosts = true [neutron] # Increase timeout for Neutron to reduce probability of error during launches # of a lot of nodes at once. -timeout = 300 +# comented -- codyhammock +#timeout = 300 [oslo_messaging_notifications] # Experiment Precis requires 2.0 message format, i.e. set driver to messagingv2 @@ -56,26 +61,30 @@ until_refresh = 0 reservation_expire = 86400 {% if enable_blazar | bool %} # Remove quota limits for hosts; these are handled via Blazar -instances = -1 -cores = -1 -ram = -1 +# comented -- codyhammock +#instances = -1 +#cores = -1 +#ram = -1 {% endif %} # https://docs.openstack.org/nova/train/configuration/config.html#quota.recheck_quota # > This defaults to True (recheck quota after resource creation) but can be set # to False to avoid additional load if allowing quota to be exceeded because # of racing requests is considered acceptable. # Disable re-checking because we manage quota in Blazar. -recheck_quota = false +# comented -- codyhammock +#recheck_quota = false [scheduler] # TODO: [jca 2019-02-08] # This seems quite high. We potentially don't need to keep it this high. # This is the value we have used in the past however. -max_attempts = 50 +# comented -- codyhammock +# max_attempts = 50 # https://docs.openstack.org/ironic/train/install/configure-compute.html # > The recommended value of 2 minutes matches how often the Compute # service polls the Bare Metal service for node information. -discover_hosts_in_cells_interval = 120 +# comented -- codyhammock +# discover_hosts_in_cells_interval = 120 {% if enable_nova_serialconsole_proxy | bool %} [serial_console] From 36987f196c9d59c81b90f17d122350c92f8c83e8 Mon Sep 17 00:00:00 2001 From: Cody Hammock Date: Thu, 2 Jun 2022 14:30:22 -0500 Subject: [PATCH 45/55] disable mitogen --- ansible.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index e484606c..4e171682 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -14,8 +14,8 @@ fact_caching_connection = .facts # dictionary, without overriding everything. hash_behaviour = merge # Use Mitogen for a higher-performance task execution strategy -strategy_plugins = ./venv/lib/mitogen-latest/ansible_mitogen/plugins/strategy -strategy = mitogen_linear +#strategy_plugins = ./venv/lib/mitogen-latest/ansible_mitogen/plugins/strategy +#strategy = mitogen_linear [inventory] # Ensure we fail if the inventory is malformed; this is important From f9a1193ff7df5fb80deab5ded4da8061e3612fe8 Mon Sep 17 00:00:00 2001 From: Jason Anderson Date: Thu, 2 Jun 2022 14:42:44 -0500 Subject: [PATCH 46/55] Enable/disable hammers depending on site config This at least prevents hammers from being installed if they are not going to be useful/relevant. However, it will not disable already-existing hammers. --- roles/hammers/defaults/main.yml | 25 ++++++++++++++++++++++--- roles/hammers/tasks/main.yml | 2 ++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/roles/hammers/defaults/main.yml b/roles/hammers/defaults/main.yml index 7a7ae6fc..f41ba3aa 100644 --- a/roles/hammers/defaults/main.yml +++ b/roles/hammers/defaults/main.yml @@ -6,41 +6,60 @@ hammers_mysql_user: cc_hammers hammers_mysql_host: "{{ database_address }}" hammers: + # Networking/Neutron floating_ip_reaper: cmd: floatingip-reaper --grace-days 7 calendar: daily + enabled: "{{ enable_neutron | bool }}" conflict_macs: cmd: conflict-macs delete calendar: daily + enabled: "{{ enable_neutron | bool }}" + + # Compute/Nova undead_instances: cmd: undead-instances delete calendar: daily + enabled: "{{ enable_nova | bool }}" + + # Bare metal/Ironic dirty_ports: cmd: dirty-ports clean --multiport calendar: daily + enabled: "{{ enable_ironic | bool }}" ironic_error_resetter: cmd: ironic-error-resetter reset calendar: hourly + enabled: "{{ enable_ironic | bool }}" + enforce_node_retirement: + cmd: "retirement-enforcer" + calendar: daily + enabled: "{{ enable_ironic | bool }}" + + # Reservation/Blazar orphan_resource_providers: cmd: orphan-resource-providers update calendar: daily + enabled: "{{ enable_blazar | bool }}" reservation_usage_notification: cmd: "reservation-usage-notification" calendar: daily + enabled: "{{ enable_blazar | bool }}" orphans_detector: cmd: "orphans-detector" calendar: daily + enabled: "{{ enable_blazar | bool }}" lease_stacking: cmd: "lease-stack-reaper delete" calendar: daily + enabled: "{{ enable_blazar | bool }}" clean_old_aggregates: cmd: "clean-old-aggregates" calendar: daily + enabled: "{{ enable_blazar | bool }}" unutilized_leases: cmd: "unutilized-lease-reaper delete" calendar: hourly - enforce_node_retirement: - cmd: "retirement-enforcer" - calendar: daily + enabled: "{{ enable_blazar | bool }}" hammers_slack_webhook: "{{ slack_api_url }}" diff --git a/roles/hammers/tasks/main.yml b/roles/hammers/tasks/main.yml index 200140e7..25d759f6 100644 --- a/roles/hammers/tasks/main.yml +++ b/roles/hammers/tasks/main.yml @@ -65,3 +65,5 @@ loop: "{{ hammers | dict2items }}" loop_control: label: "{{ item.key }}" + when: + - "{{ item.enabled | bool }}" From 32e27b45f220bf254c343a98899484399f7250ff Mon Sep 17 00:00:00 2001 From: Jason Anderson Date: Thu, 2 Jun 2022 14:52:08 -0500 Subject: [PATCH 47/55] Use default keystone admin user/pw for hammers We already defaulted the username to the default admin user, but in that case we need to also use the same password, otherwise it will think the admin user has 2 passwords. --- kolla/defaults.yml | 3 ++- site-config.example/passwords.yml | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kolla/defaults.yml b/kolla/defaults.yml index 1eb5b7d7..42f911e1 100644 --- a/kolla/defaults.yml +++ b/kolla/defaults.yml @@ -70,7 +70,8 @@ enable_glance: yes enable_gnocchi: yes # Hammers -hammers_openstack_user: admin +hammers_openstack_user: "{{ keystone_admin_user }}" +hammers_openstack_password: "{{ keystone_admin_password }}" hammers_openstack_project_name: "{{ keystone_admin_project }}" # HAProxy diff --git a/site-config.example/passwords.yml b/site-config.example/passwords.yml index e49820d0..762c48eb 100644 --- a/site-config.example/passwords.yml +++ b/site-config.example/passwords.yml @@ -21,7 +21,6 @@ neutron_database_password: neutron_keystone_password: metadata_secret: hammers_mysql_password: -hammers_openstack_password: heat_database_password: heat_keystone_password: heat_domain_admin_password: From eb8c8fa5888ae12076cb2684120e6ac6aeeda11b Mon Sep 17 00:00:00 2001 From: Jason Anderson Date: Thu, 2 Jun 2022 15:03:19 -0500 Subject: [PATCH 48/55] allow disabling hammers --- roles/hammers/tasks/main.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/roles/hammers/tasks/main.yml b/roles/hammers/tasks/main.yml index 25d759f6..a838b8db 100644 --- a/roles/hammers/tasks/main.yml +++ b/roles/hammers/tasks/main.yml @@ -62,8 +62,7 @@ task_name: "hammer_{{ item.key }}" task_command: "/usr/local/sbin/cc-hammer {{ item.value.cmd }}" task_calendar: "{{ item.value.calendar }}" + task_enabled: "{{ item.value.enabled | bool }}" loop: "{{ hammers | dict2items }}" loop_control: label: "{{ item.key }}" - when: - - "{{ item.enabled | bool }}" From 6f7a1f7326ace6b4d0fae37758caf3a179c98ebf Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Thu, 9 Jan 2025 19:22:23 +0000 Subject: [PATCH 49/55] handle dependency drift --- ansible.cfg | 3 +++ requirements.txt | 2 ++ 2 files changed, 5 insertions(+) diff --git a/ansible.cfg b/ansible.cfg index 4e171682..2923779f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -21,3 +21,6 @@ hash_behaviour = merge # Ensure we fail if the inventory is malformed; this is important # for automation, so it does not proceed on failures. unparsed_is_failed = true + +[galaxy] +server = https://old-galaxy.ansible.com/ diff --git a/requirements.txt b/requirements.txt index 919e35e6..d121f650 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,6 @@ ansible~=2.8.0 +jinja2==3.0.3 +pyopenssl docker openstacksdk # See https://github.com/weldr/lorax/commit/c56d57ef7ee1c329161158ea8867064f74c0cffa From 061dd5962797636991c97111ad6724c33b48a458 Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Thu, 9 Jan 2025 19:22:33 +0000 Subject: [PATCH 50/55] fix slack alerting --- roles/hammers/templates/slack.json.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/hammers/templates/slack.json.j2 b/roles/hammers/templates/slack.json.j2 index 1e5a8c09..26b6ba92 100644 --- a/roles/hammers/templates/slack.json.j2 +++ b/roles/hammers/templates/slack.json.j2 @@ -3,7 +3,7 @@ "hostname_names": { "m01-07.chameleon.tacc.utexas.edu": "CHI@TACC", "chi01.tacc.chameleoncloud.org": "CHI@TACC", - "m01-03.chameleon.tacc.utexas.edu": "KVM@TACC", + "kvm01.chameleon.tacc.utexas.edu": "KVM@TACC", "admin01.uc.chameleoncloud.org": "CHI@UC" } } From d56f72e35323a6863470b0395c1dc6d95a8bb380 Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Thu, 9 Jan 2025 20:56:02 +0000 Subject: [PATCH 51/55] disable caching --- ansible.cfg | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index 2923779f..bf2da924 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -2,20 +2,6 @@ # Ensure that ansible can find roles relative to its working directory # (instead of looking within playbooks directory or default paths) roles_path = ./roles:./roles/galaxy.ansible.com -# Turn on fact caching. 'smart' means that if there are no facts found -# for the host, they are fetched the first time. Subsequent reads come -# from the cache. -gathering = smart -fact_caching = jsonfile -fact_caching_connection = .facts -# NOTE(jason): this is only needed for certain sites that need to -# write in to a 'primary' Keystone service when registering endpoints. -# That requires overriding a few specific things in the openstack_auth -# dictionary, without overriding everything. -hash_behaviour = merge -# Use Mitogen for a higher-performance task execution strategy -#strategy_plugins = ./venv/lib/mitogen-latest/ansible_mitogen/plugins/strategy -#strategy = mitogen_linear [inventory] # Ensure we fail if the inventory is malformed; this is important From 65cf7508813708e13a9da61af000a3764f8c1ddb Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Thu, 9 Jan 2025 21:11:17 +0000 Subject: [PATCH 52/55] add missing idp password --- site-config.example/passwords.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/site-config.example/passwords.yml b/site-config.example/passwords.yml index 762c48eb..9f645800 100644 --- a/site-config.example/passwords.yml +++ b/site-config.example/passwords.yml @@ -34,6 +34,7 @@ ironic_inspector_database_password: ironic_inspector_keystone_password: ironic_pxe_root_password: keystone_admin_token: +keystone_federation_openid_crypto_password: keystone_fernet_token_list: [] keystone_idp_client_secret: telemetry_secret_key: From 080f5d0a9f903f0cab8ec58445ba47527d450e5c Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Thu, 9 Jan 2025 21:19:47 +0000 Subject: [PATCH 53/55] fix unset defaults --- kolla/node_custom_config/nova.conf | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kolla/node_custom_config/nova.conf b/kolla/node_custom_config/nova.conf index 9be11d27..7859983c 100644 --- a/kolla/node_custom_config/nova.conf +++ b/kolla/node_custom_config/nova.conf @@ -26,7 +26,9 @@ workers = 10 [filter_scheduler] # Override default filters (just remove filters not relevant to baremetal-only) # default: RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter +{% if nova_enabled_filters is defined %} enabled_filters = {{ nova_enabled_filters }} +{% endif %} # https://docs.openstack.org/ironic/rocky/install/configure-compute.html # > Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. @@ -138,6 +140,3 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif %} virt_type = {{ nova_compute_virt_type }} {% endif %} -#{% if nova_libvirt_cpu_mode %} -#cpu_mode = {{ nova_libvirt_cpu_mode }} -#{% endif %} From 575417ac355cb301e0030eb7afe0651681f1432c Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Tue, 14 Jan 2025 14:29:53 -0600 Subject: [PATCH 54/55] print better errors --- ansible.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ansible.cfg b/ansible.cfg index bf2da924..7919bac6 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -3,6 +3,9 @@ # (instead of looking within playbooks directory or default paths) roles_path = ./roles:./roles/galaxy.ansible.com +# print better error messages +stdout_callback = yaml + [inventory] # Ensure we fail if the inventory is malformed; this is important # for automation, so it does not proceed on failures. From c80aadad24a7d4e2b1c4ac65fae6401ee14244fc Mon Sep 17 00:00:00 2001 From: Michael Sherman Date: Wed, 22 Jan 2025 15:31:38 +0000 Subject: [PATCH 55/55] add missing prometheus params --- kolla/defaults.yml | 1 + site-config.example/inventory/hosts | 2 +- site-config.example/passwords.yml | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/kolla/defaults.yml b/kolla/defaults.yml index 42f911e1..b3c283ec 100644 --- a/kolla/defaults.yml +++ b/kolla/defaults.yml @@ -224,6 +224,7 @@ prometheus_server_external_url: "{{ public_protocol }}://{{ prometheus_external_ prometheus_alertmanager_external_url: "{{ public_protocol }}://{{ prometheus_external_fqdn }}:{{ prometheus_alertmanager_port }}" # Legacy chameleon_prometheus role vars prometheus_bind_address: "{{ lookup('vars', 'ansible_' + network_interface).ipv4.address }}" +prometheus_user: prometheus # Redfish Monitor redfish_monitor_openstack_user: "{{ keystone_admin_username }}" diff --git a/site-config.example/inventory/hosts b/site-config.example/inventory/hosts index 54191720..0c753535 100644 --- a/site-config.example/inventory/hosts +++ b/site-config.example/inventory/hosts @@ -818,7 +818,7 @@ ceph-mgr [prometheus-openstack-exporter:children] monitoring -[prometheus-push-gateway:children] +[prometheus-pushgateway:children] monitoring [prometheus-redis-exporter:children] diff --git a/site-config.example/passwords.yml b/site-config.example/passwords.yml index 9f645800..bee454b0 100644 --- a/site-config.example/passwords.yml +++ b/site-config.example/passwords.yml @@ -59,6 +59,7 @@ prometheus_mysql_exporter_password: prometheus_alertmanager_password: prometheus_monitoring_password: prometheus_openstack_exporter_password: +prometheus_password: prometheus_jupyterhub_password: changeme rabbitmq_cluster_cookie: rabbitmq_monitoring_password: