diff --git a/ansible/roles/dingo-command/defaults/main.yml b/ansible/roles/dingo-command/defaults/main.yml index 95c1b21e5c..8f54534428 100644 --- a/ansible/roles/dingo-command/defaults/main.yml +++ b/ansible/roles/dingo-command/defaults/main.yml @@ -28,6 +28,36 @@ dingo_command_services: tls_backend: "{{ dingo_command_enable_tls_backend }}" backend_http_extra: - "option httpchk GET /docs" + +dingo_bear_services: + dingo-bear: + container_name: dingo-bear + group: dingo-bear + enabled: true + image: "{{ dingo_bear_image_full }}" + volumes: "{{ dingo_bear_default_volumes + dingo_bear_extra_volumes }}" + dimensions: "{{ dingo_bear_dimensions }}" + healthcheck: "{{ dingo_bear_healthcheck }}" + haproxy: + dingo-bear: + enabled: "{{ enable_dingo_bear }}" + mode: "http" + external: false + port: "{{ dingo_bear_port }}" + listen_port: "{{ dingo_bear_listen_port }}" + tls_backend: "{{ dingo_bear_enable_tls_backend }}" + backend_http_extra: + - "option httpchk GET /docs" + dingo_bear_external: + enabled: "{{ enable_dingo_bear }}" + mode: "http" + external: true + external_fqdn: "{{ dingo_bear_external_fqdn }}" + port: "{{ dingo_bear_port }}" + listen_port: "{{ dingo_bear_listen_port }}" + tls_backend: "{{ dingo_bear_enable_tls_backend }}" + backend_http_extra: + - "option httpchk GET /docs" dingo_command_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" dingo_command_tag: "{{ openstack_tag }}" @@ -140,4 +170,54 @@ aliyun_clickhouse_host: "10.220.244.176" aliyun_clickhouse_port: 8123 aliyun_clickhouse_user: "user01" aliyun_clickhouse_password: "YourStrongPassword123!" -aliyun_clickhouse_database: "bsm_monitor" \ No newline at end of file +aliyun_clickhouse_database: "bsm_monitor" + +metric_write_dingo: true +metric_write_clickhouse: true +openstack_lb_method: "ROUND_ROBIN" +openstack_lb_provider: "amphora" + +dingo_bear_tag: "{{ openstack_tag }}" +dingo_bear_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/dingo-bear" +dingo_bear_image_full: "{{ dingo_bear_image }}:{{ dingo_bear_tag }}" + + +dingo_bear_database_user: "{{ dingo_command_database_user }}" +dingo_bear_database_password: "{{ dingo_command_database_password }}" +dingo_bear_database_name: "dingo-bear" +dingo_bear_port: 38887 +dingo_bear_frontend_port: 30000 +dingo_bear_listen_port: "{{ dingo_bear_port }}" +dingo_bear_skyline_url: "http://{{ kolla_internal_vip_address }}:9999" +dingo_bear_database_address: "{{ database_address }}:{{ database_port }}" +dingo_bear_enable_tls_backend: "{{ kolla_enable_tls_backend }}" +dingo_bear_external_fqdn: "{{ dingo_command_external_fqdn }}" +dingo_bear_services: + dingo-bear: + container_name: dingo-bear + group: dingo-bear + enabled: true + image: "{{ dingo_bear_image_full }}" + volumes: "{{ dingo_bear_default_volumes + dingo_bear_extra_volumes }}" + dimensions: "{{ dingo_bear_dimensions }}" + healthcheck: "{{ dingo_bear_healthcheck }}" + haproxy: + dingo-bear: + enabled: "{{ enable_dingo_bear }}" + mode: "http" + external: false + port: "{{ dingo_bear_port }}" + listen_port: "{{ dingo_bear_listen_port }}" + tls_backend: "{{ dingo_bear_enable_tls_backend }}" + backend_http_extra: + - "option httpchk GET /docs" + dingo_bear_external: + enabled: "{{ enable_dingo_bear }}" + mode: "http" + external: true + external_fqdn: "{{ dingo_bear_external_fqdn }}" + port: "{{ dingo_bear_port }}" + listen_port: "{{ dingo_bear_listen_port }}" + tls_backend: "{{ dingo_bear_enable_tls_backend }}" + backend_http_extra: + - "option httpchk GET /docs" \ No newline at end of file diff --git a/ansible/roles/dingo-command/tasks/ceph.yml b/ansible/roles/dingo-command/tasks/ceph.yml index 2289d4d3a3..69c5e14395 100644 --- a/ansible/roles/dingo-command/tasks/ceph.yml +++ b/ansible/roles/dingo-command/tasks/ceph.yml @@ -74,3 +74,4 @@ -o name={{ dingo_command_ceph_client_name }},secretfile=/etc/ceph/ceph.client.{{ dingo_command_ceph_client_name }}.keyring,acl,noatime,_netdev,mds_namespace={{ dingo_command_ceph_mds_namespace }} when: ceph_mount_status.stdout == "" with_dict: "{{ dingo_command_services | select_services_enabled_and_mapped_to_host }}" + when: kolla_action != "upgrade" \ No newline at end of file diff --git a/ansible/roles/dingo-command/tasks/deploy-bear.yml b/ansible/roles/dingo-command/tasks/deploy-bear.yml new file mode 100644 index 0000000000..e48bed1eb7 --- /dev/null +++ b/ansible/roles/dingo-command/tasks/deploy-bear.yml @@ -0,0 +1,145 @@ +--- + +- name: Set dingo_bear fact + set_fact: + dingo_bear: "{{ dingo_bear_services['dingo-bear'] }}" + +- name: Creating dingo-bear database + become: true + kolla_toolbox: + container_engine: "{{ kolla_container_engine }}" + module_name: mysql_db + module_args: + login_host: "{{ database_address }}" + login_port: "{{ database_port }}" + login_user: "{{ dingo_command_database_shard_root_user }}" + login_password: "{{ database_password }}" + name: "{{ dingo_bear_database_name }}" + run_once: True + delegate_to: "{{ groups['dingo-command'][0] }}" + when: + - not use_preconfigured_databases | bool + - dingo_bear.enabled | bool + +- name: Creating dingo-bear database user and setting permissions + become: true + kolla_toolbox: + container_engine: "{{ kolla_container_engine }}" + module_name: mysql_user + module_args: + login_host: "{{ database_address }}" + login_port: "{{ database_port }}" + login_user: "{{ dingo_command_database_shard_root_user }}" + login_password: "{{ database_password }}" + name: "{{ dingo_bear_database_user }}" + password: "{{ dingo_bear_database_password }}" + host: "%" + priv: "{{ dingo_bear_database_name }}.*:ALL" + append_privs: "yes" + run_once: True + delegate_to: "{{ groups['dingo-command'][0] }}" + when: + - not use_preconfigured_databases | bool + - dingo_bear.enabled | bool + +- name: Pull dingo-bear image + become: true + kolla_container: + action: "pull_image" + common_options: "{{ docker_common_options }}" + image: "{{ dingo_bear_image_full }}" + when: + - dingo_bear.enabled | bool + +- name: Ensuring /var/log/dingo-bear directory exists + file: + path: "/var/log/kolla/dingo-bear" + state: "directory" + owner: "{{ config_owner_user }}" + group: "{{ config_owner_group }}" + mode: "0770" + become: true + when: + - dingo_bear.enabled | bool + +- name: Ensuring /etc/kolla/dingo-bear directory exists + file: + path: "{{ node_config_directory }}/dingo-bear" + state: "directory" + owner: "{{ config_owner_user }}" + group: "{{ config_owner_group }}" + mode: "0770" + become: true + when: + - dingo_bear.enabled | bool + + +- name: Copying over dingo-bear.conf + template: + src: "{{ role_path }}/templates/dingo-bear/dingo-bear.conf.j2" + dest: "{{ node_config_directory }}/dingo-bear/dingo-bear.conf" + mode: "0660" + become: true + when: + - dingo_bear.enabled | bool + +- name: Copying over supervisord.conf + template: + src: "{{ role_path }}/templates/dingo-bear/supervisord.conf" + dest: "{{ node_config_directory }}/dingo-bear/supervisord.conf" + mode: "0660" + become: true + when: + - dingo_bear.enabled | bool + + +- name: Copying over gunicorn.py + template: + src: "{{ role_path }}/templates/dingo-bear/gunicorn.py" + dest: "{{ node_config_directory }}/dingo-bear/gunicorn.py" + mode: "0660" + become: true + when: + - dingo_bear.enabled | bool + +- name: Copying over nginx.conf + template: + src: "{{ role_path }}/templates/dingo-bear/nginx.conf.j2" + dest: "{{ node_config_directory }}/dingo-bear/nginx.conf" + mode: "0660" + become: true + when: + - dingo_bear.enabled | bool + + +- name: Copy deploy.sh to target node + template: + src: "{{ role_path }}/templates/dingo-bear/deploy.sh.j2" + dest: "/tmp/deploy-bear.sh" + mode: "0755" + become: true + when: + - dingo_bear.enabled | bool + +- name: Execute deploy-bear.sh + command: > + bash /tmp/deploy-bear.sh + "{{ dingo_bear_image_full }}" + become: true + register: deploy_bear_result + when: + - dingo_bear.enabled | bool + +- name: Show deploy-bear.sh output + debug: + msg: "{{ deploy_bear_result.stdout_lines }}" + when: + - dingo_bear.enabled | bool + + + + + + + + diff --git a/ansible/roles/dingo-command/tasks/loadbalancer.yml b/ansible/roles/dingo-command/tasks/loadbalancer.yml index f126ec5b42..1d2e3184ce 100644 --- a/ansible/roles/dingo-command/tasks/loadbalancer.yml +++ b/ansible/roles/dingo-command/tasks/loadbalancer.yml @@ -5,3 +5,10 @@ vars: project_services: "{{ dingo_command_services }}" tags: always + +- name: "Configure loadbalancer for dingo-bear" + import_role: + name: loadbalancer-config + vars: + project_services: dingo-bear + tags: always \ No newline at end of file diff --git a/ansible/roles/dingo-command/tasks/main.yml b/ansible/roles/dingo-command/tasks/main.yml index bc5d1e6257..913a1637f1 100644 --- a/ansible/roles/dingo-command/tasks/main.yml +++ b/ansible/roles/dingo-command/tasks/main.yml @@ -1,2 +1,6 @@ --- - include_tasks: "{{ kolla_action }}.yml" + when: "'dingo-bear' not in ansible_run_tags" + +- include_tasks: deploy-bear.yml + when: "'dingo-bear' in ansible_run_tags" diff --git a/ansible/roles/dingo-command/templates/dingo-bear/deploy.sh.j2 b/ansible/roles/dingo-command/templates/dingo-bear/deploy.sh.j2 new file mode 100644 index 0000000000..c0650d5f95 --- /dev/null +++ b/ansible/roles/dingo-command/templates/dingo-bear/deploy.sh.j2 @@ -0,0 +1,56 @@ +#!/bin/bash +set -e +set -o pipefail + +image_full_url=${1:-"docker.io/dongshany"} +node_port=38887 +frontend_port=30000 +module_name=dingo-bear +skyline_url="{{ dingo_bear_skyline_url }}" + +echo "开始检查并移除旧的容器" +if podman ps -f "name=${module_name}$" -f "status=running" -q | grep -q .; then + printf "停止容器: " + podman stop "${module_name}" +fi + +if podman ps -a -f "name=${module_name}$" -q | grep -q .; then + printf "删除容器: " + podman rm "${module_name}" +fi + +# 确保必要目录存在 +mkdir -p /etc/kolla/dingo-bear /var/log/kolla/dingo-bear /var/lib/dingo-command + +# 构建运行命令(使用数组避免 eval) +run_args=( + run -d + --privileged + -p "${node_port}:${node_port}" + -p "${frontend_port}:${frontend_port}" + -e "SKYLINE_URL=${skyline_url}" + -v /etc/kolla/dingo-bear/:/etc/dingo-bear/:ro + -v /etc/localtime:/etc/localtime:ro + -v /var/log/kolla/dingo-bear/:/var/log/dingo-bear/ + --mount type=bind,source=/run/netns,target=/run/netns,bind-propagation=shared + -v /var/lib/dingo-command/:/var/lib/dingo-command/ + -e TZ=Asia/Shanghai + --health-cmd 'curl -sf http://localhost:38887/v1 || exit 1' + --health-interval=30s + --health-timeout=10s + --health-retries=3 + --health-start-period=30s + --name "${module_name}" + "${image_full_url}" +) + +printf "要执行的命令为:\npodman %s\n" "${run_args[*]}" +container_id=$(podman "${run_args[@]}") + +inspect_format='{{ '{{' }}.State.Running{{ '}}' }}' +if [[ "$(podman inspect -f "$inspect_format" "${container_id}")" == "true" ]]; then + printf "\033[32m%s 容器启动成功, 容器id: %s\033[0m\n" "${module_name}" "${container_id:0:12}" +else + printf "\033[31m%s 容器启动失败, 容器id: %s\033[0m\n" "${module_name}" "${container_id:0:12}" + exit 1 +fi diff --git a/ansible/roles/dingo-command/templates/dingo-bear/dingo-bear.conf.j2 b/ansible/roles/dingo-command/templates/dingo-bear/dingo-bear.conf.j2 new file mode 100644 index 0000000000..ae195a97e0 --- /dev/null +++ b/ansible/roles/dingo-command/templates/dingo-bear/dingo-bear.conf.j2 @@ -0,0 +1,198 @@ +[DEFAULT] +# region名称 黄海是RegionOne 其他环境按照部署长上去 +region_name = {{ dingo_command_region_name }} +aidc_name = {{ dingo_command_aidc_name }} +# 是否是中心region的标志 黄海是True 其他是False +center_region_flag = {{ dingo_command_center_region_flag }} +cluster_work_dir = /var/lib/dingo-command +my_ip = {{ api_interface_address }} +vip = {{ kolla_internal_fqdn }} +transport_url = {{ rpc_transport_url }} +# 中心region的mq地址,从黄海那边拿transport_url,如果是独立部署的不需要赋值 +center_transport_url = {{ center_rpc_transport_url }} +auth_url = http://{{ kolla_internal_fqdn }}:5000 +harbor_url = {{ harbor_url }} +fileserver_url = {{ fileserver_url }} +k8s_master_image = {{ k8s_master_image }} +k8s_master_flavor = {{ k8s_master_flavor }} +task_timeout = {{ task_timeout }} +soft_task_timeout = {{ soft_task_timeout }} +pushgateway_url = {{ kolla_internal_fqdn }}:19091 +chart_harbor_url = {{ chart_harbor_url }} +chart_harbor_user = {{ chart_harbor_user }} +chart_harbor_passwd = {{ chart_harbor_passwd }} +controller_password = {{ controller_password }} +controller_nodes = {{ controller_nodes }} +nameservers = {{ nameservers }} +kubeconfig_path = /etc/dingo-command/management.kubeconfig +datastore = {{ dingo_command_datastore }} +consul_address = {{ consul_address }} +consul_token = {{ consul_token }} +region_name = {{ dingo_command_region_name }} +prometheus_url = {{ prometheus_url }} +openstack_lb_provider = {{ openstack_lb_provider }} +openstack_lb_method = {{ openstack_lb_method }} +maas_base_url = {{ maas_base_url }} +maas_api_key = {{ maas_api_key }} +maas_llm_url = {{ maas_llm_url }} + +[database] +connection = mysql+pymysql://{{ dingo_bear_database_user }}:{{ dingo_bear_database_password }}@{{ dingo_bear_database_address }}/dingoops?charset=utf8mb3 + + +[bigscreen] +prometheus_query_url = {{ bigscreen_prometheus_query_url }} +metrics_fetch_interval = 1800 +metrics_expiration_time = 3600 +memcached_address = {{ api_interface_address }}:11211 +memcached_key_prefix = bigscreen_metrics_ + +[redis] +redis_ip = {{ api_interface_address }} +redis_port = 6379 +redis_password = {{ redis_master_password }} +sentinel_url = "sentinel://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}:{{ redis_master_password }}@{{ 'api' | kolla_address(host) }}:{{ redis_sentinel_port }}{% else %};sentinel://:{{ redis_master_password }}@{{ 'api' | kolla_address(host) }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}" + + +[ironic] +auth_url = http://{{ kolla_internal_fqdn }}:5000/ +auth_type = password +project_domain = Default +user_domain = Default +project_name = service +user_name = {{ ironic_keystone_user }} +password = {{ ironic_keystone_password }} +region_name = RegionOne + +[nova] +auth_url = http://{{ kolla_internal_fqdn }}:5000/ +auth_type = password +project_domain = Default +user_domain = Default +project_name = service +user_name = {{ nova_keystone_user }} +password = {{ nova_keystone_password }} +region_name = RegionOne + +[neutron] +metadata_proxy_shared_secret = {{ metadata_secret }} +service_metadata_proxy = true +auth_url = {{ keystone_internal_url }} +auth_type = password +project_domain_name = {{ default_project_domain_name }} +user_domain_id = {{ default_user_domain_id }} +project_name = service +username = {{ neutron_keystone_user }} +password = {{ neutron_keystone_password }} +region_name = {{ openstack_region_name }} +valid_interfaces = internal +cafile = {{ openstack_cacert }} + +[cloudkitty] +auth_url = http://{{ kolla_internal_fqdn }}:5000 +auth_type = password +project_domain = {{ default_project_domain_name }} +user_domain = {{ default_project_domain_name }} +project_name = service +username = "cloudkitty" +password = {{ cloudkitty_keystone_password }} +region_name = {{ openstack_region_name }} + +[aliyun_dingodb] +host = {{ aliyun_dingodb_host }} +port = {{ aliyun_dingodb_port }} +user = {{ aliyun_dingodb_user }} +read_user = {{ aliyun_dingodb_read_user }} +password = {{ aliyun_dingodb_password }} +read_password = {{ aliyun_dingodb_read_password }} +report_database = {{ aliyun_dingodb_report_database }} + +[cinder] +auth_url = http://{{ kolla_internal_fqdn }}:5000/ +auth_type = password +project_domain = Default +user_domain = Default +project_name = service +user_name = {{ cinder_keystone_user }} +password = {{ cinder_keystone_password }} +region_name = {{ openstack_region_name }} + +[harbor] +base_url={{ dingo_command_harbor_url }} +robot_username={{ dingo_command_harbor_robot_username }} +robot_token={{ dingo_command_harbor_robot_token }} +verify_ssl={{ dingo_command_harbor_verify_ssl }} +storage_limit={{ dingo_command_harbor_storage_limit }} + +[METRIC] +write_dingo = {{ metric_write_dingo }} +write_clickhouse = {{ metric_write_clickhouse }} + +[CLICKHOUSE] +host = {{ aliyun_clickhouse_host }} +port = {{ aliyun_clickhouse_port }} +username = {{ aliyun_clickhouse_user }} +password = {{ aliyun_clickhouse_password }} +database = {{ aliyun_clickhouse_database }} + +[manila] +auth_url = http://{{ kolla_internal_fqdn }}:5000/ +auth_type = password +project_domain = Default +user_domain = Default +project_name = service +user_name = {{ keystone_admin_user }} +password = {{ keystone_admin_password }} +region_name = RegionOne + +[keystone] +auth_url = http://{{ kolla_internal_fqdn }}:5000/ +auth_type = password +project_domain = Default +user_domain = Default +project_name = service +user_name = {{ keystone_alayanew_admin_user }} +password = {{ keystone_alayanew_admin_password }} +region_name = RegionOne + +[cephfs] +url = {{ dingo_command_cephfs_url }} +vol_name = {{ dingo_command_cephfs_vol_name }} +user_name = {{ dingo_command_cephfs_user_name }} +password = {{ dingo_command_cephfs_password }} +admin_username= {{ dingo_command_cephfs_admin_username }} +admin_password= {{ dingo_command_cephfs_admin_password }} +ssh_host={{ dingo_command_cephfs_ssh_host }} +ssh_user={{ dingo_command_cephfs_ssh_user }} +ssh_password={{ dingo_command_cephfs_ssh_password }} +ssh_port={{ dingo_command_cephfs_ssh_port }} +file_system_name={{ dingo_command_cephfs_file_system_name }} +file_limit = {{ dingo_command_cephfs_file_limit }} +mount_host={{ dingo_command_cephfs_mount_host }} +group_name={{ dingo_command_cephfs_group_name }} + + +[gpfs] +url = {{ dingo_command_gpfs_url }} +user_name = {{ dingo_command_gpfs_user_name }} +password = {{ dingo_command_gpfs_password }} +file_system_name = {{ dingo_command_gpfs_file_system_name }} +baremetal_filesystem_name = {{ dingo_command_gpfs_baremetal_filesystem_name }} +file_limit = {{ dingo_command_gpfs_file_limit }} +mount_host = {{ dingo_command_gpfs_mount_host }} + +[kafka] +kafka_broker_list = {{ kafka_broker_list }} + +[openclaw] +image_id = {{ openclaw_openstack_image_id }} +project_id = {{ openclaw_openstack_project_id }} +user_id = {{ openclaw_openstack_user_id }} +password = {{ openclaw_openstack_password }} +security_group_id = {{ openclaw_openstack_security_group_id }} +netowrk_id = {{ openclaw_openstack_network_id }} +subnet_id = {{ openclaw_openstack_subnet_id }} +ssh_user = {{ openclaw_ssh_user }} + +[alert_platform] +alert_platform_url = {{ alert_platform_url }} diff --git a/ansible/roles/dingo-command/templates/dingo-bear/gunicorn.py b/ansible/roles/dingo-command/templates/dingo-bear/gunicorn.py new file mode 100644 index 0000000000..4ecccd17fd --- /dev/null +++ b/ansible/roles/dingo-command/templates/dingo-bear/gunicorn.py @@ -0,0 +1,55 @@ +# Copyright 2022 99cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import multiprocessing +import os +os.environ['OPENBLAS_NUM_THREADS'] = '1' + +bind = "0.0.0.0:38887" +workers = 4 +worker_class = "uvicorn.workers.UvicornWorker" +timeout = 300 +keepalive = 5 +reuse_port = True +proc_name = "dingo-command" + +# Use gunicorn native log file settings instead of logconfig_dict file handlers, +# which are unreliable with RotatingFileHandler in gunicorn's worker model. +accesslog = "/var/log/dingo-bear/dingo-bear-access.log" +errorlog = "/var/log/dingo-bear/dingo-bear-error.log" +loglevel = "info" + +logconfig_dict = { + "version": 1, + "disable_existing_loggers": False, + "root": {"level": "INFO", "handlers": ["console"]}, + "loggers": { + "gunicorn.error": {"level": "INFO", "handlers": ["console"], "propagate": 0}, + "gunicorn.access": {"level": "INFO", "handlers": ["console"], "propagate": 0}, + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "level": "INFO", + "formatter": "generic", + }, + }, + "formatters": { + "generic": { + "format": "%(asctime)s.%(msecs)03d %(process)d %(levelname)s [-] %(message)s", + "datefmt": "[%Y-%m-%d %H:%M:%S %z]", + "class": "logging.Formatter", + } + }, +} diff --git a/ansible/roles/dingo-command/templates/dingo-bear/nginx.conf.j2 b/ansible/roles/dingo-command/templates/dingo-bear/nginx.conf.j2 new file mode 100644 index 0000000000..4410f1ecf0 --- /dev/null +++ b/ansible/roles/dingo-command/templates/dingo-bear/nginx.conf.j2 @@ -0,0 +1,28 @@ +server { + listen 30000; + root /opt/dingo-aurora/frontend/dist; + index index.html; + + # SPA fallback + location / { + try_files $uri $uri/ /index.html; + } + + # Proxy API requests to FastAPI backend + location /v1 { + proxy_pass http://127.0.0.1:38887; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + # Proxy Skyline for login (strip /skyline prefix) + location /skyline/ { + proxy_pass {{ dingo_bear_skyline_url }}/; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } +} diff --git a/ansible/roles/dingo-command/templates/dingo-bear/supervisord.conf b/ansible/roles/dingo-command/templates/dingo-bear/supervisord.conf new file mode 100644 index 0000000000..0a2f5d6262 --- /dev/null +++ b/ansible/roles/dingo-command/templates/dingo-bear/supervisord.conf @@ -0,0 +1,43 @@ +[supervisord] +nodaemon=true +logfile=/var/log/supervisord.log +pidfile=/var/run/supervisord.pid + +[unix_http_server] +file=/var/run/supervisor.sock + +[program:fastapi] +command=/usr/local/bin/gunicorn -c /etc/dingo-bear/gunicorn.py dingo_command.main:app +directory=/opt/dingo-aurora +autostart=true +autorestart=true +stdout_logfile=/var/log/dingo-bear/dingo-bear.log +stderr_logfile=/var/log/dingo-bear/dingo-bear.err + +[program:celery] +command=celery -A dingo_command.celery_api.workers worker -n dingo-bear@%(host_node_name)s -Q dingo-bear --loglevel=info --pool=threads --concurrency=16 +directory=/opt/dingo-aurora +autostart=true +autorestart=true +stdout_logfile=/var/log/dingo-bear/celery.log +stderr_logfile=/var/log/dingo-bear/celery-err.log + +[program:nginx] +command=nginx -g "daemon off;" +autostart=true +autorestart=true +stdout_logfile=/var/log/dingo-bear/nginx-access.log +stderr_logfile=/var/log/dingo-bear/nginx-error.log + +[program:flower] +command=celery -A dingo_command.celery_api.workers flower --port=5555 +directory=/opt/dingo-aurora +stdout_logfile=/var/log/dingo-bear/flower.log +stderr_logfile=/var/log/dingo-bear/flower-err.log + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl=unix:///var/run/supervisor.sock +prompt=dingoops-supervisor diff --git a/ansible/roles/dingo-command/templates/dingo-command.conf.j2 b/ansible/roles/dingo-command/templates/dingo-command.conf.j2 index 35a4d3e6e4..e01df9b34c 100644 --- a/ansible/roles/dingo-command/templates/dingo-command.conf.j2 +++ b/ansible/roles/dingo-command/templates/dingo-command.conf.j2 @@ -1,6 +1,7 @@ [DEFAULT] # region名称 黄海是RegionOne 其他环境按照部署长上去 region_name = {{ dingo_command_region_name }} +aidc_name = {{ dingo_command_aidc_name }} # 是否是中心region的标志 黄海是True 其他是False center_region_flag = {{ dingo_command_center_region_flag }} cluster_work_dir = /var/lib/dingo-command @@ -25,6 +26,15 @@ controller_nodes = {{ controller_nodes }} nameservers = {{ nameservers }} kubeconfig_path = /etc/dingo-command/management.kubeconfig datastore = {{ dingo_command_datastore }} +consul_address = {{ consul_address }} +consul_token = {{ consul_token }} +region_name = {{ dingo_command_region_name }} +prometheus_url = {{ prometheus_url }} +openstack_lb_provider = {{ openstack_lb_provider }} +openstack_lb_method = {{ openstack_lb_method }} +maas_base_url = {{ maas_base_url }} +maas_api_key = {{ maas_api_key }} +maas_llm_url = {{ maas_llm_url }} [database] connection = mysql+pymysql://{{ dingo_command_database_user }}:{{ dingo_command_database_password }}@{{ dingo_command_database_address }}/dingoops?charset=utf8mb3 @@ -41,7 +51,7 @@ memcached_key_prefix = bigscreen_metrics_ redis_ip = {{ api_interface_address }} redis_port = 6379 redis_password = {{ redis_master_password }} -sentinel_url = "sentinel://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% else %};sentinel://:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}" +sentinel_url = "sentinel://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}:{{ redis_master_password }}@{{ 'api' | kolla_address(host) }}:{{ redis_sentinel_port }}{% else %};sentinel://:{{ redis_master_password }}@{{ 'api' | kolla_address(host) }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}" [ironic] @@ -115,8 +125,8 @@ verify_ssl={{ dingo_command_harbor_verify_ssl }} storage_limit={{ dingo_command_harbor_storage_limit }} [METRIC] -write_dingo = true -write_clickhouse = true +write_dingo = {{ metric_write_dingo }} +write_clickhouse = {{ metric_write_clickhouse }} [CLICKHOUSE] host = {{ aliyun_clickhouse_host }} @@ -141,8 +151,8 @@ auth_type = password project_domain = Default user_domain = Default project_name = service -user_name = {{ keystone_admin_user }} -password = {{ keystone_admin_password }} +user_name = {{ keystone_alayanew_admin_user }} +password = {{ keystone_alayanew_admin_password }} region_name = RegionOne [cephfs] @@ -150,3 +160,39 @@ url = {{ dingo_command_cephfs_url }} vol_name = {{ dingo_command_cephfs_vol_name }} user_name = {{ dingo_command_cephfs_user_name }} password = {{ dingo_command_cephfs_password }} +admin_username= {{ dingo_command_cephfs_admin_username }} +admin_password= {{ dingo_command_cephfs_admin_password }} +ssh_host={{ dingo_command_cephfs_ssh_host }} +ssh_user={{ dingo_command_cephfs_ssh_user }} +ssh_password={{ dingo_command_cephfs_ssh_password }} +ssh_port={{ dingo_command_cephfs_ssh_port }} +file_system_name={{ dingo_command_cephfs_file_system_name }} +file_limit = {{ dingo_command_cephfs_file_limit }} +mount_host={{ dingo_command_cephfs_mount_host }} +group_name={{ dingo_command_cephfs_group_name }} + + +[gpfs] +url = {{ dingo_command_gpfs_url }} +user_name = {{ dingo_command_gpfs_user_name }} +password = {{ dingo_command_gpfs_password }} +file_system_name = {{ dingo_command_gpfs_file_system_name }} +baremetal_filesystem_name = {{ dingo_command_gpfs_baremetal_filesystem_name }} +file_limit = {{ dingo_command_gpfs_file_limit }} +mount_host = {{ dingo_command_gpfs_mount_host }} + +[kafka] +kafka_broker_list = {{ kafka_broker_list }} + +[openclaw] +image_id = {{ openclaw_openstack_image_id }} +project_id = {{ openclaw_openstack_project_id }} +user_id = {{ openclaw_openstack_user_id }} +password = {{ openclaw_openstack_password }} +security_group_id = {{ openclaw_openstack_security_group_id }} +netowrk_id = {{ openclaw_openstack_network_id }} +subnet_id = {{ openclaw_openstack_subnet_id }} +ssh_user = {{ openclaw_ssh_user }} + +[alert_platform] +alert_platform_url = {{ alert_platform_url }} diff --git a/ansible/roles/octavia-certificates/defaults/main.yml b/ansible/roles/octavia-certificates/defaults/main.yml index 2061dbe438..283f69bf9f 100644 --- a/ansible/roles/octavia-certificates/defaults/main.yml +++ b/ansible/roles/octavia-certificates/defaults/main.yml @@ -35,7 +35,7 @@ octavia_certs_client_ca_organizational_unit: "{{ octavia_certs_organizational_un octavia_certs_client_ca_common_name: client-ca.example.org # Client certificate. -octavia_certs_client_expiry: 365 +octavia_certs_client_expiry: 3650 octavia_certs_client_req_country: "{{ octavia_certs_country }}" octavia_certs_client_req_state: "{{ octavia_certs_state }}" octavia_certs_client_req_organization: "{{ octavia_certs_organization }}" diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml index eb88790b98..9f2952ace6 100644 --- a/ansible/roles/octavia/defaults/main.yml +++ b/ansible/roles/octavia/defaults/main.yml @@ -370,7 +370,7 @@ octavia_amp_network: enable_dhcp: yes # Octavia management network subnet CIDR. -octavia_amp_network_cidr: 10.1.0.0/24 +octavia_amp_network_cidr: 10.1.0.0/18 octavia_amp_router: name: lb-mgmt-router diff --git a/ansible/site.yml b/ansible/site.yml index 2d63556192..daa1ac6ea6 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -352,6 +352,11 @@ tasks_from: loadbalancer tags: dingo-command when: enable_dingo_command | bool + - include_role: + name: dingo-bear + tasks_from: loadbalancer + tags: dingo-bear + when: enable_dingo_command | bool - include_role: name: skyline tasks_from: loadbalancer @@ -1140,4 +1145,13 @@ serial: '{{ kolla_serial|default("0") }}' roles: - { role: dingo-command, - tags: dingo-command } \ No newline at end of file + tags: dingo-command } + +- name: Apply role dingo-command + gather_facts: false + hosts: + - dingo-command + serial: '{{ kolla_serial|default("0") }}' + roles: + - { role: dingo-command, + tags: dingo-bear } diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 8ab0b06a50..b4a08c1bce 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -65,6 +65,9 @@ dingo_command_ceph_mds_namespace: "dingoops-cephfs-hdd" dingo_command_ceph_mount_path: "/var/lib/dingo-command" dingo_command_ceph_fs_name: "/dingoops-test" + + + #dingo_command_keystone_user: "" #dingo_command_keystone_password: "" diff --git a/tests/templates/global.conf.j2 b/tests/templates/global.conf.j2 index 64f8aa2107..86e90dbb25 100644 --- a/tests/templates/global.conf.j2 +++ b/tests/templates/global.conf.j2 @@ -5,3 +5,5 @@ # is no backup to respond, most often with Placement, but also # with Neutron and Nova. http_request_max_retries = 9 + +