diff --git a/README.md b/README.md index 6d9fe88694..6419a781c3 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ $ docker volume create pmm-data 3. Run PMM server container ```bash $ docker run --detach --restart always \ ---publish 443:443 \ +--publish 443:8443 \ --volume pmm-data:/srv \ --name pmm-server \ percona/pmm-server:3 @@ -99,4 +99,4 @@ As a general rule of thumb, please try to create bug reports that are: ## Licensing -Percona is dedicated to **keeping open source open**. Wherever possible, we strive to include permissive licensing for both our software and documentation. For this project, we are using the [GNU AGPLv3](https://github.com/percona/pmm/blob/main/LICENSE) license. +Percona is dedicated to **keeping open source open**. Wherever possible, we strive to include permissive licensing for both our software and documentation. For this project, we are using the [GNU AGPLv3](./LICENSE) license. diff --git a/api-tests/server/version_test.go b/api-tests/server/version_test.go index 55294af65c..b0e2e1c141 100644 --- a/api-tests/server/version_test.go +++ b/api-tests/server/version_test.go @@ -34,7 +34,6 @@ import ( func TestVersion(t *testing.T) { t.Parallel() paths := []string{ - "managed/v1/version", "v1/version", } for _, path := range paths { diff --git a/api/nginx/nginx.conf b/api/nginx/nginx.conf index 2e385864ba..730479d2a9 100644 --- a/api/nginx/nginx.conf +++ b/api/nginx/nginx.conf @@ -3,7 +3,7 @@ daemon off; -error_log stderr info; +error_log /dev/stderr info; # error_log stderr debug; events { diff --git a/build/Makefile b/build/Makefile index 4bf4ea0d83..8d87e996e2 100644 --- a/build/Makefile +++ b/build/Makefile @@ -66,36 +66,39 @@ pmm-ami: -var 'pmm_client_repo_name=percona-experimental-x86_64' \ -var 'pmm_server_repo=experimental' \ -only amazon-ebs -color=false \ - packer/pmm.json + packer/pmm.json pmm-ami-rc: docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ build -var 'pmm_client_repos=original testing' \ - -var 'pmm_client_repo_name=percona-testing-x86_64' \ - -var 'pmm_server_repo=testing' \ - -only amazon-ebs '-color=false' \ - packer/pmm.json + -var 'pmm_client_repo_name=percona-testing-x86_64' \ + -var 'pmm_server_repo=testing' \ + -only amazon-ebs '-color=false' \ + packer/pmm.json pmm-ami-el9: mkdir -p update && \ - cp -r ../update/ansible/playbook/* update/ && \ - sed -i 's|become_method: su|become_method: sudo|g' update/tasks/roles/postgres/tasks/main.yml && \ + sed -i 's|become_method: su|become_method: sudo|g' ./roles/postgres/tasks/main.yml && \ docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ build -var 'pmm_client_repos=original experimental' \ -var 'pmm_client_repo_name=percona-experimental-x86_64' \ -var 'pmm_server_repo=experimental' \ -only amazon-ebs -color=false \ - packer/pmm.el9.json + packer/pmm.el9.json pmm-ami-el9-rc: mkdir -p update && \ - cp -r ../update/ansible/playbook/* update/ && \ - sed -i 's|become_method: su|become_method: sudo|g' update/tasks/roles/postgres/tasks/main.yml && \ - docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ - build -var 'pmm_client_repos=original testing' \ - -var 'pmm_client_repo_name=percona-testing-x86_64' \ + sed -i 's|become_method: su|become_method: sudo|g' ./roles/postgres/tasks/main.yml && \ + docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ + build -var 'pmm_client_repos=original testing' \ + -var 'pmm_client_repo_name=percona-testing-x86_64' \ -var 'pmm_server_repo=testing' \ - -only amazon-ebs '-color=false' \ - packer/pmm.el9.json + -only amazon-ebs '-color=false' \ + packer/pmm.el9.json ## ----------------- PACKER ------------------ + +check: ## Run required checkers and linters + ansible-playbook --syntax-check ansible/pmm-docker/update.yml + ansible-playbook --check ansible/pmm-docker/update.yml + ansible-lint ansible/pmm-docker/update.yml diff --git a/update/.ansible-lint b/build/ansible/.ansible-lint similarity index 100% rename from update/.ansible-lint rename to build/ansible/.ansible-lint diff --git a/build/ansible/ansible.cfg b/build/ansible/ansible.cfg new file mode 100644 index 0000000000..60507f8031 --- /dev/null +++ b/build/ansible/ansible.cfg @@ -0,0 +1,11 @@ +# This is the default ansible.cfg file. +# It necessary for ansible to work properly when it acts as 'pmm' user. +# Otherwise, it will fail with 'Permission denied' error since the default paths are '/root/.ansible/tmp' +# Ref: https://github.com/ansible/ansible/blob/stable-2.9/examples/ansible.cfg +[defaults] + +remote_tmp = /tmp +local_tmp = /tmp + +# additional paths to search for roles in, colon separated +roles_path = /opt/ansible/roles diff --git a/update/ansible/playbook/tasks/files/maintenance.html b/build/ansible/pmm-docker/files/maintenance.html similarity index 100% rename from update/ansible/playbook/tasks/files/maintenance.html rename to build/ansible/pmm-docker/files/maintenance.html diff --git a/update/ansible/playbook/tasks/init.yml b/build/ansible/pmm-docker/init.yml similarity index 58% rename from update/ansible/playbook/tasks/init.yml rename to build/ansible/pmm-docker/init.yml index 5c27f29b2d..4eba8ceb66 100644 --- a/update/ansible/playbook/tasks/init.yml +++ b/build/ansible/pmm-docker/init.yml @@ -2,8 +2,10 @@ # This playbook contains tasks executed during initialization PMM Server - hosts: localhost become: true + become_method: su + become_user: pmm gather_facts: true - tasks: - - name: Run initialization role - include_role: - name: initialization + + + roles: + - initialization diff --git a/update/ansible/playbook/tasks/update.yml b/build/ansible/pmm-docker/update.yml similarity index 50% rename from update/ansible/playbook/tasks/update.yml rename to build/ansible/pmm-docker/update.yml index 745d55cf6a..8189a721dd 100644 --- a/update/ansible/playbook/tasks/update.yml +++ b/build/ansible/pmm-docker/update.yml @@ -9,19 +9,19 @@ PATH: /usr/local/bin:{{ ansible_env.PATH }} pre_tasks: - - name: detect /srv/pmm-distribution + - name: Detect /srv/pmm-distribution stat: path: /srv/pmm-distribution no_log: true register: srv_pmm_distribution - - name: detect containers + - name: Detect container environment set_fact: is_docker: '{{ lookup("file", "/srv/pmm-distribution") == "docker" }}' no_log: true when: srv_pmm_distribution.stat.exists - - name: force container + - name: Set the variable to true if undefined set_fact: is_docker: true when: is_docker is undefined @@ -31,19 +31,10 @@ copy: src: maintenance.html dest: /usr/share/pmm-server/maintenance/ + owner: pmm + group: pmm mode: 0644 - - name: Cleanup yum metadata - command: yum clean metadata - become: true - tags: - - skip_ansible_lint - - - name: Upgrade supervisor config - copy: - src: pmm.ini - dest: /etc/supervisord.d/pmm.ini - # restart pmm-managed-init and pmm-managed first as they may update supervisord configuration on start - name: Generate new supervisor config command: pmm-managed-init @@ -57,59 +48,17 @@ option: autostart value: "false" - - name: Upgrade supervisord config - copy: - src: supervisord.ini - dest: /etc/supervisord.d/supervisord.ini - - - name: Remove supervisord - file: - state: absent - path: /etc/supervisord.d/supervisord.ini - when: not is_docker - - # Set forking type to 'simple' - - name: Configure systemd - when: not is_docker - copy: - src: supervisord.service - dest: /usr/lib/systemd/system/supervisord.service - mode: 0644 - - - name: Remove old supervisord service configuration - when: not is_docker - file: - path: /etc/systemd/system/supervisord.service - state: absent - - # Start the services - - name: Enable supervisord | Make the service persist between reboots - when: not is_docker - systemd: - name: supervisord - enabled: yes - - - name: Supervisord start | Start supervisord service for AMI/OVF - when: not is_docker - systemd: - name: supervisord - state: started # supervisord may already be running - daemon_reload: yes - - name: Check that supervisor socket exists stat: path: /run/supervisor/supervisor.sock - register: is_supervisor_running - - - name: Start supervisord for docker - when: - - is_docker - - not is_supervisor_running.stat.exists - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + register: supervisor_socket + + # During build time, this will be the first start of supervisord. + - name: Start supervisord + when: not supervisor_socket.stat.exists shell: supervisord -c /etc/supervisord.conf & - - name: Wait until postgres port is present before continuing + - name: Wait until postgres port is present wait_for: host: localhost port: 5432 @@ -118,80 +67,38 @@ - name: Run initialization playbook include_role: name: initialization - vars: - ui_upgrade: True - - - name: Enable crond service - when: not is_docker - service: - name: crond - state: started - enabled: yes - - - name: Increase number of open files for jobs - when: not is_docker - ini_file: - dest: /etc/supervisord.conf - section: supervisord - option: minfds - value: "800000" # See https://github.com/Supervisor/supervisor/issues/1264 for explanation # why we do reread + stop/remove/add instead of using supervisorctl Ansible module. - - name: Reread supervisord configuration EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Reread supervisord configuration command: supervisorctl reread + become: true + become_user: pmm + become_method: su register: reread_result changed_when: "'No config updates to processes' not in reread_result.stdout" - name: Check reread results debug: var=reread_result.stdout_lines - - name: Restart pmm-managed EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' - command: supervisorctl {{ item }} pmm-managed + - name: Restart pmm-managed + command: "supervisorctl {{ item }} pmm-managed" become: true - changed_when: true - with_items: ["stop", "remove", "add"] - - # give pmm-managed time to update supervisord configuration, + become_user: pmm + become_method: su + loop: + - stop + - remove + - add + + # Give pmm-managed time to update supervisord configuration, # and give update UI time to catch up after pmm-managed restart - name: Wait for pmm-managed pause: seconds=10 # Fix things that should be fixed before restarts. - - name: Stop systemd pmm-agent service, if running - systemd: - name: pmm-agent - state: stopped - enabled: no - when: not is_docker - - # https://jira.percona.com/browse/PMM-9298 - - name: Copy rezise-xfs file for lvm - copy: - src: resize-xfs-lvm - dest: /var/lib/cloud/scripts/per-boot/resize-xfs - mode: 0755 - when: not is_docker - - # https://jira.percona.com/browse/PMM-5271 - - name: Check volume size - when: not is_docker - replace: - dest: /var/lib/cloud/scripts/per-boot/resize-xfs - regexp: "set -o errexit" - replace: "" - - - name: Reread supervisord configuration again EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Reread supervisord configuration again command: supervisorctl reread register: reread_result changed_when: "'No config updates to processes' not in reread_result.stdout" @@ -199,14 +106,12 @@ - name: Check reread results debug: var=reread_result.stdout_lines - - name: Restart services EL9 - when: - - is_docker - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Restart services command: supervisorctl {{ item.1 }} {{ item.0 }} become: true - changed_when: true + become_user: pmm + become_method: su + # changed_when: true with_nested: - - nginx - grafana @@ -237,31 +142,27 @@ query: UPDATE "user" SET id='1' WHERE login='admin'; when: not ansible_check_mode - # we need to put this step as one of the last steps, because it removes pmm.ini - - name: Remove redundant packages - yum: - state: absent - name: - - logrotate # https://jira.percona.com/browse/PMM-7627 + # - name: Remove redundant packages + # yum: + # state: absent + # name: + # - logrotate # https://jira.percona.com/browse/PMM-7627 # Regenerating pmm.ini and enabling pmm-update-perform-init - name: Generate new supervisor config command: pmm-managed-init + become: true + become_user: pmm + become_method: su register: managed_init_result changed_when: True - - name: Reread pmm-update-perform-init supervisor config EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Reread pmm-update-perform-init supervisor config command: supervisorctl reread register: reread_init__result changed_when: "'No config updates to processes' not in reread_init__result.stdout" - - name: Update/restart other services EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Update/restart other services command: supervisorctl update register: update_result changed_when: "'updated' in update_result.stdout" @@ -281,10 +182,7 @@ # SIGUSR2 is sent to supervisord by pmm-managed right before the update for logging to work correctly. # We use that fact to show what was restarted during the update. - - name: Get supervisord logs EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Get supervisord logs shell: supervisorctl maintail -100000 | tac | awk '!flag; /received SIGUSR2/{flag = 1};' | tac register: maintail_result changed_when: False diff --git a/update/ansible/playbook/tasks/create-lvm.yml b/build/ansible/pmm/create-lvm.yml similarity index 95% rename from update/ansible/playbook/tasks/create-lvm.yml rename to build/ansible/pmm/create-lvm.yml index 93a072b38f..0126562c0a 100644 --- a/update/ansible/playbook/tasks/create-lvm.yml +++ b/build/ansible/pmm/create-lvm.yml @@ -1,3 +1,4 @@ +# TODO: This role seems to no longer be used. Verify and remove. - hosts: localhost become: true gather_facts: true diff --git a/update/ansible/playbook/tasks/files/cloud.cfg b/build/ansible/pmm/files/cloud.cfg similarity index 100% rename from update/ansible/playbook/tasks/files/cloud.cfg rename to build/ansible/pmm/files/cloud.cfg diff --git a/update/ansible/playbook/tasks/files/resize-xfs-lvm b/build/ansible/pmm/files/resize-xfs-lvm similarity index 100% rename from update/ansible/playbook/tasks/files/resize-xfs-lvm rename to build/ansible/pmm/files/resize-xfs-lvm diff --git a/update/ansible/playbook/tasks/supervisord.service b/build/ansible/pmm/files/supervisord.service similarity index 100% rename from update/ansible/playbook/tasks/supervisord.service rename to build/ansible/pmm/files/supervisord.service diff --git a/build/ansible/pmm/post-build-actions.yml b/build/ansible/pmm/post-build-actions.yml index e2cdbfdcd8..038e951b6a 100644 --- a/build/ansible/pmm/post-build-actions.yml +++ b/build/ansible/pmm/post-build-actions.yml @@ -1,67 +1,32 @@ --- -# This playbook is used as a post build actions for all pmm images (AMI/OVF/Docker). +# This playbook runs post build tasks for all pmm distributions (AMI/OVF/Docker/Digitalocean). -- hosts: localhost +- hosts: all become: yes gather_facts: yes vars: pmm_client_repos: "original testing" pmm_client_repos_final: "original release" + pmm_server_distribution: "docker" tasks: - # pmm-managed checks that if /srv/pmm-distribution exist, it contains "docker", "ovf", or "ami" (all lowercase) - - name: Detect distribution | Create '/srv/pmm-distribution' file for Docker - when: ansible_virtualization_type == "docker" + # pmm-managed checks that if /srv/pmm-distribution exists, it contains "docker", "ovf", "ami" or "digitalocean" - all lowercase. + # TODO: refactor the build pipelines to call post-build.yml with the distribution name provided in the variable (above). + # https://jira.percona.com/browse/PMM-4991 + - name: Create a distribution file for Docker copy: - content: "docker" - dest: /srv/pmm-distribution - - - name: Detect distribution | Create '/srv/pmm-distribution' file for OVF - when: ansible_virtualization_type == "virtualbox" - copy: - content: "ovf" - dest: /srv/pmm-distribution - - # TODO https://jira.percona.com/browse/PMM-4991 - - name: Detect distribution | Create '/srv/pmm-distribution' file for AMI - when: > - ( ansible_virtualization_type == "xen" - or ansible_virtualization_type == "kvm" ) - and ansible_system_vendor != "DigitalOcean" - copy: - content: "ami" - dest: /srv/pmm-distribution - - - name: Detect distribution | Create '/srv/pmm-distribution' file for DigitalOcean - when: ansible_system_vendor == "DigitalOcean" - copy: - content: "digitalocean" + content: "{{ pmm_server_distribution}}" dest: /srv/pmm-distribution + owner: pmm + group: pmm - - name: Disable repo | Disable testing repo for pmm-client + - name: Disable testing repo for pmm-client command: percona-release disable {{ pmm_client_repos }} - - name: Enable repo | Enable release repo for pmm-client + - name: Enable release repo for pmm-client command: percona-release enable {{ pmm_client_repos_final }} - - name: Install glibc-langpack-en | EL9 - dnf: - name: glibc-langpack-en - state: present - update_cache: yes - when: - - ansible_virtualization_type != "docker" - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' - - - name: Set locale to en_US.utf8 | EL9 - command: localectl set-locale LANG=en_US.utf8 - when: - - ansible_virtualization_type != "docker" - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' - - - name: pmm-agent | Setup pmm-agent + - name: Set up pmm-agent command: > pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml @@ -70,29 +35,26 @@ --server-address=127.0.0.1:8443 --server-insecure-tls - - name: Reread supervisord configuration EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' - command: /usr/local/bin/supervisorctl reread + - name: Reread supervisord configuration + command: supervisorctl reread + become: true + become_user: pmm + become_method: su register: reread_result changed_when: "'No config updates to processes' not in reread_result.stdout" - name: See which service configs changed debug: var=reread_result.stdout_lines - - name: Stop pmm-managed before deleting the database EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Stop pmm-managed before deleting the database supervisorctl: name: pmm-managed state: stopped + become: true + become_user: pmm + become_method: su - - name: Remove pmm-managed database EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' + - name: Remove pmm-managed database postgresql_db: login_user: postgres name: pmm-managed @@ -104,16 +66,14 @@ name: pmm-managed state: absent - - name: Stop supervisord service for AMI/OVF - when: ansible_virtualization_type != "docker" - service: name=supervisord state=stopped enabled=yes - - name: Stop supervisord service for docker - when: ansible_virtualization_type == "docker" - shell: supervisorctl shutdown + command: supervisorctl shutdown + become: true + become_user: pmm + become_method: su - name: Cleanup dnf cache - shell: dnf clean all + command: dnf clean all # "yum clean all" function will only remove cache from configured yum repositories # Details: https://bugzilla.redhat.com/show_bug.cgi?id=1357083 @@ -123,8 +83,10 @@ path: /var/cache/dnf - name: Cleanup build logs and data - file: path={{ item }} state=absent - with_items: + file: + path: "{{ item }}" + state: absent + loop: - /srv/logs - /tmp/RPMS - /var/log/dnf.log @@ -141,6 +103,7 @@ loop: - postgres - clickhouse + - nginx - name: Clean Clickhouse dir shell: find /srv/clickhouse -mindepth 1 -maxdepth 1 -print0 | xargs -0 rm -rf -- @@ -158,7 +121,7 @@ owner: pmm group: pmm mode: 0775 - with_items: + loop: - absent - directory @@ -170,11 +133,11 @@ group: pmm mode: 0775 - # This is a temp workaround to make sure that the file exists and has the correct permissions. - # TODO: Remove, as it won't be needed once the main process is run as `pmm` user. - - name: Create nginx log file + # nginx needs to be able to write to /var/lib/nginx, but it's owned by root. + - name: Change ownership of nginx dirs file: - path: /srv/logs/nginx.log - state: touch + path: /var/lib/nginx + state: directory group: pmm owner: pmm + recurse: yes diff --git a/build/ansible/pmm/systemd.yml b/build/ansible/pmm/systemd.yml new file mode 100644 index 0000000000..8d16d20a2f --- /dev/null +++ b/build/ansible/pmm/systemd.yml @@ -0,0 +1,43 @@ +--- +# This playbook contains tasks executed during PMM Server update in non-docker environments. +# TODO: refactor from supervisord to systemd if necessary. +# NOTE: it's currently unused, just a placeholder for future use. +- hosts: all + become: true + remote_user: root + gather_facts: true + + # TODO: replace supervisord.service with pmm.service + tasks: + # Note: forking type must be set to 'simple' + - name: Configure supervisord + copy: + src: supervisord.service + dest: /usr/lib/systemd/system/supervisord.service + mode: 0644 + + # Start the services + - name: Enable supervisord service to persist between reboots + systemd: + name: supervisord + enabled: yes + + - name: Start supervisord service for AMI/OVF + systemd: + name: supervisord + state: started # supervisord may already be running + daemon_reload: yes + + - name: Enable crond service + service: + name: crond + state: started + enabled: yes + + # https://jira.percona.com/browse/PMM-9298 + - name: Copy rezise-xfs file for lvm + copy: + src: resize-xfs-lvm + dest: /var/lib/cloud/scripts/per-boot/resize-xfs + mode: 0755 + force: true diff --git a/update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml b/build/ansible/roles/dashboards/tasks/main.yml similarity index 58% rename from update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml rename to build/ansible/roles/dashboards/tasks/main.yml index 3e1745e998..e0c51bd75e 100644 --- a/update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml +++ b/build/ansible/roles/dashboards/tasks/main.yml @@ -1,12 +1,12 @@ --- - name: Get plugin list - register: plugin_list find: paths: /usr/share/percona-dashboards/panels/ depth: 2 file_type: directory + register: plugin_list -- name: Delete redundant dist folders +- name: Delete older plugins file: path: "/srv/grafana/plugins/{{ item['path'].split('/')[-1] }}" state: absent @@ -26,14 +26,12 @@ mode: 0775 recurse: yes -- name: Restart grafana with new plugins EL9 - supervisorctl: - name: grafana - state: restarted +- name: Restart grafana with new plugins + shell: "supervisorctl {{ item }} grafana" become: true - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' - ignore_errors: true - # TODO: fix the race condition. - # We generate grafana supervisor config in pmm-managed and it may not exist at this stage + become_user: pmm + become_method: su + loop: + - stop + - remove + - add diff --git a/build/ansible/roles/grafana/tasks/main.yml b/build/ansible/roles/grafana/tasks/main.yml index 6a84218c05..e214262520 100644 --- a/build/ansible/roles/grafana/tasks/main.yml +++ b/build/ansible/roles/grafana/tasks/main.yml @@ -48,7 +48,7 @@ - plugins - dashboards -- name: Create change-admin-password command +- name: Copy change-admin-password command copy: src: change-admin-password dest: /usr/local/sbin/change-admin-password diff --git a/update/ansible/playbook/tasks/roles/initialization/tasks/main.yml b/build/ansible/roles/initialization/tasks/main.yml similarity index 68% rename from update/ansible/playbook/tasks/roles/initialization/tasks/main.yml rename to build/ansible/roles/initialization/tasks/main.yml index 3bf62e9910..935f2b9d51 100644 --- a/update/ansible/playbook/tasks/roles/initialization/tasks/main.yml +++ b/build/ansible/roles/initialization/tasks/main.yml @@ -1,10 +1,5 @@ --- # This role contains tasks executed during initialization of PMM Server -- name: Determine type of upgrade - set_fact: - ui_upgrade: False - when: ui_upgrade is undefined - - name: Get current version slurp: src: /srv/grafana/PERCONA_DASHBOARDS_VERSION @@ -38,33 +33,17 @@ debug: msg: "Current version: {{ pmm_current_version }} Image Version: {{ pmm_image_version }}" -# We use current_version_file['failed'] because we don't want to run this on creating container -# and we use pmm_current_version is version(pmm_image_version, '>=') to run it only if upgrade is required -- name: Determine type of upgrade - set_fact: - docker_upgrade: "{{ not ui_upgrade and current_version_file['failed'] != true and not pmm_current_version is version(pmm_image_version, '>=') }}" - -- name: Print Docker upgrade fact - debug: - msg: "Docker upgrade: {{ docker_upgrade }}" - -- name: Enable maintenance mode only for docker upgrade +- name: Enable maintenance mode before upgrade copy: src: maintenance.html dest: /usr/share/pmm-server/maintenance/ owner: pmm group: pmm mode: 0644 - when: docker_upgrade - name: Upgrade dashboards include_role: - name: dashboards_upgrade - when: need_upgrade - -- name: Clickhouse migration - include_role: - name: clickhouse + name: dashboards when: need_upgrade - name: Copy file with image version @@ -77,10 +56,13 @@ remote_src: yes when: need_upgrade -- name: Create backup directory +- name: Create a backup directory file: path: /srv/backup state: directory + owner: pmm + group: pmm + mode: 0775 - name: Wait for PMM to be ready ansible.builtin.uri: @@ -89,7 +71,6 @@ method: GET retries: 120 delay: 1 - when: docker_upgrade - name: Disable maintenance mode file: diff --git a/build/ansible/roles/nginx/files/conf.d/pmm.conf b/build/ansible/roles/nginx/files/conf.d/pmm.conf index 91f077d71e..fca411bbe4 100644 --- a/build/ansible/roles/nginx/files/conf.d/pmm.conf +++ b/build/ansible/roles/nginx/files/conf.d/pmm.conf @@ -179,8 +179,8 @@ } # Swagger UI - rewrite ^/swagger/swagger.json$ /swagger.json permanent; - rewrite ^(/swagger)/(.*)$ /swagger permanent; + rewrite ^/swagger/swagger.json$ $scheme://$http_host/swagger.json permanent; + rewrite ^(/swagger)/(.*)$ $scheme://$http_host/swagger permanent; location /swagger { auth_request off; root /usr/share/pmm-managed/swagger; @@ -222,10 +222,8 @@ # for minimal compatibility with PMM 1.x rewrite ^/ping$ /v1/readyz; - rewrite ^/managed/v1/version$ /v1/version; # logs.zip in both PMM 1.x and 2.x variants - rewrite ^/managed/logs.zip$ /logs.zip; location /logs.zip { proxy_pass http://managed-json; proxy_http_version 1.1; @@ -253,7 +251,7 @@ set $feed https://www.percona.com/blog/feed/; proxy_pass $feed; - proxy_set_header User-Agent "$http_user_agent pmm-server/2.x"; + proxy_set_header User-Agent "$http_user_agent pmm-server/3.x"; error_page 500 502 503 504 /pmm-static/local-rss.xml; } } diff --git a/build/ansible/roles/nginx/files/nginx.conf b/build/ansible/roles/nginx/files/nginx.conf index ea60c7bbe5..49f57aa5da 100644 --- a/build/ansible/roles/nginx/files/nginx.conf +++ b/build/ansible/roles/nginx/files/nginx.conf @@ -3,7 +3,7 @@ worker_processes 2; daemon off; -error_log /srv/logs/nginx.log warn; +error_log /dev/stderr warn; pid /run/nginx.pid; events { @@ -23,7 +23,7 @@ http { '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; - access_log /srv/logs/nginx.log main; + access_log /dev/stdout main; sendfile on; gzip on; diff --git a/build/ansible/roles/nginx/tasks/main.yml b/build/ansible/roles/nginx/tasks/main.yml index d3cea0753a..d57a142e73 100644 --- a/build/ansible/roles/nginx/tasks/main.yml +++ b/build/ansible/roles/nginx/tasks/main.yml @@ -1,9 +1,6 @@ --- # We already have nginx package in epel repo - name: Add Nginx repository - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' yum_repository: name: nginx description: nginx repo @@ -60,10 +57,18 @@ when: not certificate_file.stat.exists command: /var/lib/cloud/scripts/per-boot/generate-ssl-certificate +- name: Find the default nginx config files + find: + paths: /etc/nginx + patterns: "*.default" + register: default_config + - name: Remove the default nginx config files file: - path: /etc/nginx/*.default + path: "{{ item }}" state: absent + loop: "{{ default_config.files | map(attribute='path') | list }}" + when: default_config.files | length > 0 - name: Change ownership of nginx dirs file: diff --git a/build/ansible/roles/pmm-images/tasks/main.yml b/build/ansible/roles/pmm-images/tasks/main.yml index 44d15d24a6..5a423b6ddf 100644 --- a/build/ansible/roles/pmm-images/tasks/main.yml +++ b/build/ansible/roles/pmm-images/tasks/main.yml @@ -5,9 +5,6 @@ key: https://downloads.percona.com/downloads/RPM-GPG-KEY-percona - name: Add PMM3 Server YUM repository - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' yum_repository: name: pmm-server description: PMM Server YUM repository - x86_64 @@ -16,8 +13,8 @@ enabled: yes gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY -# local yum repo for building pmm server docker image in autobuild jobs -- name: Packages | Add a local YUM repository +# Local yum repo for building pmm server docker image in autobuild jobs +- name: Add a local YUM repository when: ansible_virtualization_type == "docker" yum_repository: name: local @@ -26,41 +23,31 @@ gpgcheck: no enabled: no -# we use it for pmm-client (TODO we'll need switch to pmm-client client repo) +# we use it for pmm-client (TODO we'll need to switch to pmm-client repo) # To workaround the package's incompatibility with RHEL9, we have to ignore errors :( # Error: `Failed to validate GPG signature for percona-release-1.0-27.noarch` # Despite the error, this will still install the repo and the GPG key -- name: Packages | Install percona-release rpm +- name: Add percona-release repository yum: name: https://repo.percona.com/yum/percona-release-latest.noarch.rpm state: installed ignore_errors: True -- name: Packages | Update OS EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' +- name: Update OS packages yum: name: "*" state: latest disablerepo: percona-release-x86_64 -- name: Packages | Install OS tools for EL9 - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' +- name: Install OS tools yum: name: - python3-pip - python3.11-pip - python3.11-psycopg2 - rsync - - libsqlite3x-devel # package does not come pre-installed on EL9 - name: Install ansible-core and ansible collections - when: - - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' - - ansible_distribution_major_version == '9' yum: name: - ansible-core @@ -69,11 +56,6 @@ - ansible-collection-ansible-posix state: present -- name: Create users for non-docker images | Create users - user: - name: "pmm" - when: ansible_virtualization_type != "docker" - - name: Create users and groups in docker container block: - name: Ensure groups exist with correct gid @@ -82,10 +64,11 @@ gid: "{{ item.gid }}" loop: - { name: pmm, gid: 1000 } - - { name: nginx, gid: 1002 } - - { name: clickhouse, gid: 1001 } + - { name: nginx, gid: 1001 } + - { name: clickhouse, gid: 1002 } - - name: Create users | Create users + # Note: nginx and clickhouse users will get removed in post-build. + - name: Create users user: name: "{{ item.name }}" uid: "{{ item.uid }}" @@ -95,17 +78,24 @@ group: "{{ item.group }}" loop: - { name: pmm, uid: 1000, comment: "PMM Server", shell: "/usr/bin/bash", home: "/home/pmm", group: pmm, } - - { name: nginx, uid: 1002, comment: "nginx user", shell: "/sbin/nologin", home: "/dev/null", group: nginx, } - - { name: clickhouse, uid: 1001, comment: "Clickhouse server", shell: "/sbin/nologin", home: "/dev/null", group: clickhouse, } + - { name: nginx, uid: 1001, comment: "Nginx user", shell: "/sbin/nologin", home: "/dev/null", group: nginx, } + - { name: clickhouse, uid: 1002, comment: "Clickhouse server", shell: "/sbin/nologin", home: "/dev/null", group: clickhouse, } when: ansible_virtualization_type == "docker" -- name: Create directories | Create dirs - file: path={{ item }} state=directory owner=pmm group=pmm - with_items: +- name: Create directories (mask 022) + file: + path: "{{ item }}" + state: directory + owner: pmm + group: pmm + loop: + - /srv # otherwise it's owned by root - /srv/prometheus/rules - /etc/grafana + - /srv/clickhouse +# Note a special mode required mainly by nginx - name: Create a directory for logs file: path: /srv/logs @@ -114,22 +104,16 @@ group: pmm mode: 0775 -- name: Create clickhouse data directory - file: - path: /srv/clickhouse - state: directory - owner: pmm - group: pmm - mode: 0755 - -- name: Create dirs | Create dirs +- name: Create dirs when: ansible_virtualization_type == "docker" - file: path={{ item }} state=directory - with_items: + file: + path: "{{ item }}" + state: directory + loop: - /var/lib/cloud/scripts/per-once - /var/lib/cloud/scripts/per-boot -- name: Install RPMs | Install RPMs for PMM3 server +- name: Install PMM Server components yum: name: - percona-grafana @@ -162,33 +146,22 @@ - name: Install supervisord include_role: - name: supervisord-init + name: supervisord -- name: PMM | Enable repo for pmm-client +- name: Enable repo for pmm-client command: percona-release enable {{ pmm_client_repos }} - name: Install pmm-client include_role: name: pmm-client -- name: Disable pmm-agent service | Disable pmm-agent - when: ansible_virtualization_type != "docker" - service: name=pmm-agent state=stopped enabled=no - -- name: Create tmp dirs | Create tmp dirs - when: ansible_virtualization_type != "docker" - command: /usr/bin/systemd-tmpfiles --create --remove --boot --exclude-prefix=/dev - -- name: Copy grafana.ini file for the first run - copy: - src: grafana.ini - dest: /etc/supervisord.d/grafana.ini - mode: 0644 - -- name: Create backup directory +- name: Create a backup directory file: path: /srv/backup state: directory + owner: pmm + group: pmm + mode: 0775 - name: Create a working directory for VictoriaMetrics file: diff --git a/update/ansible/playbook/tasks/roles/postgres/tasks/backup.yml b/build/ansible/roles/postgres/tasks/backup.yml similarity index 85% rename from update/ansible/playbook/tasks/roles/postgres/tasks/backup.yml rename to build/ansible/roles/postgres/tasks/backup.yml index 80eae4447e..2daabb37a9 100644 --- a/update/ansible/playbook/tasks/roles/postgres/tasks/backup.yml +++ b/build/ansible/roles/postgres/tasks/backup.yml @@ -1,7 +1,7 @@ --- # Backup Postgres Databases # This playbook can be run on its own as follows: -# ansible-playbook -i localhost /usr/share/pmm-update/ansible/playbook/tasks/roles/postgres/tasks/backup.yml +# ansible-playbook -i localhost /opt/ansible/roles/postgres/tasks/backup.yml # or using `include_role` from another playbook with `tasks_from: backup.yml` - name: Check if supervisor socket exists @@ -28,6 +28,8 @@ - pmm-agent - postgresql become: true + become_user: pmm + become_method: su - name: Run Postgres database without supervisor command: /usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14 -o "-c logging_collector=off" @@ -35,14 +37,6 @@ become_user: pmm become_method: su - - name: Create the backup directory if it does not exist - file: - path: /srv/backup - state: directory - owner: pmm - group: pmm - mode: 0775 - - name: Dump pmm-managed database postgresql_db: name: pmm-managed @@ -71,6 +65,8 @@ - postgresql - pmm-managed - pmm-agent - become: true + become: true + become_user: pmm + become_method: su when: is_supervisor_running diff --git a/update/ansible/playbook/tasks/roles/postgres/tasks/restore.yml b/build/ansible/roles/postgres/tasks/restore.yml similarity index 90% rename from update/ansible/playbook/tasks/roles/postgres/tasks/restore.yml rename to build/ansible/roles/postgres/tasks/restore.yml index f29b16e874..c3485f4dcf 100644 --- a/update/ansible/playbook/tasks/roles/postgres/tasks/restore.yml +++ b/build/ansible/roles/postgres/tasks/restore.yml @@ -1,7 +1,7 @@ --- # Restore Postgres Databases # This playbook can be run on its own as follows: -# ansible-playbook -i localhost /usr/share/pmm-update/ansible/playbook/tasks/roles/postgres/tasks/restore.yml +# ansible-playbook -i localhost /opt/ansible/roles/postgres/tasks/restore.yml # or using `include_role` from another playbook with `tasks_from: restore.yml` - name: Check if supervisor socket exists @@ -28,6 +28,8 @@ - pmm-agent - postgresql become: true + become_user: pmm + become_method: su - name: Run Postgres database without supervisor command: /usr/pgsql-14/bin/pg_ctl start -D /srv/postgres @@ -68,6 +70,9 @@ - postgresql - pmm-managed - pmm-agent - become: true + become: true + become_user: pmm + become_method: su + when: is_supervisor_running diff --git a/build/ansible/roles/supervisord-init/files/supervisord.service b/build/ansible/roles/supervisord-init/files/supervisord.service deleted file mode 100644 index fb8a92c84f..0000000000 --- a/build/ansible/roles/supervisord-init/files/supervisord.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Process Monitoring and Control Daemon -After=rc-local.service nss-user-lookup.target -After=network.target -RequiresMountsFor=/srv - -[Service] -Type=simple -# we need to wait till time is synchronized -ExecStartPre=/usr/bin/sleep 10 -ExecStart=/usr/bin/supervisord -c /etc/supervisord.conf -TimeoutStartSec=120 - -[Install] -WantedBy=multi-user.target diff --git a/build/ansible/roles/pmm-images/files/grafana.ini b/build/ansible/roles/supervisord/files/grafana.ini similarity index 100% rename from build/ansible/roles/pmm-images/files/grafana.ini rename to build/ansible/roles/supervisord/files/grafana.ini diff --git a/update/ansible/playbook/tasks/files/pmm.ini b/build/ansible/roles/supervisord/files/pmm.ini similarity index 93% rename from update/ansible/playbook/tasks/files/pmm.ini rename to build/ansible/roles/supervisord/files/pmm.ini index 0215623ccf..dec1638e1a 100644 --- a/update/ansible/playbook/tasks/files/pmm.ini +++ b/build/ansible/roles/supervisord/files/pmm.ini @@ -9,7 +9,8 @@ password = dummy ; we rewrite autostart to true during update or build. [program:pmm-update-perform-init] -command = /usr/sbin/pmm-update -run-playbook -playbook=/usr/share/pmm-update/ansible/playbook/tasks/init.yml +command = /usr/sbin/pmm-update -run-playbook -playbook=/opt/ansible/pmm-docker/init.yml +user = pmm directory = / autorestart = unexpected priority=-1 @@ -68,6 +69,7 @@ redirect_stderr = true [program:nginx] priority = 4 command = nginx +user = pmm autorestart = true autostart = true startretries = 10 @@ -89,6 +91,7 @@ command = --postgres-username=pmm-managed --postgres-password=pmm-managed --supervisord-config-dir=/etc/supervisord.d +user = pmm autorestart = true autostart = true startretries = 1000 @@ -116,7 +119,8 @@ stdout_logfile_backups = 2 redirect_stderr = true [program:pmm-update-perform] -command = /usr/sbin/pmm-update -perform -playbook=/usr/share/pmm-update/ansible/playbook/tasks/update.yml +command = /usr/sbin/pmm-update -perform -playbook=/opt/ansible/pmm-docker/update.yml +user = pmm directory = / autorestart = unexpected exitcodes = 0 diff --git a/update/ansible/playbook/tasks/files/supervisord.ini b/build/ansible/roles/supervisord/files/supervisord.ini similarity index 85% rename from update/ansible/playbook/tasks/files/supervisord.ini rename to build/ansible/roles/supervisord/files/supervisord.ini index adf3ed726d..c9eeb67347 100644 --- a/update/ansible/playbook/tasks/files/supervisord.ini +++ b/build/ansible/roles/supervisord/files/supervisord.ini @@ -1,4 +1,5 @@ [supervisord] +user = pmm logfile = /srv/logs/supervisord.log logfile_maxbytes = 5MB logfile_backups = 1 @@ -6,5 +7,4 @@ loglevel = info pidfile = /tmp/supervisord.pid nodaemon = true nocleanup = false -user = root -strip_ansi = false \ No newline at end of file +strip_ansi = false diff --git a/build/ansible/roles/supervisord-init/tasks/main.yml b/build/ansible/roles/supervisord/tasks/main.yml similarity index 62% rename from build/ansible/roles/supervisord-init/tasks/main.yml rename to build/ansible/roles/supervisord/tasks/main.yml index 878485fa03..43d7286f96 100644 --- a/build/ansible/roles/supervisord-init/tasks/main.yml +++ b/build/ansible/roles/supervisord/tasks/main.yml @@ -11,7 +11,6 @@ - name: Create a default configuration file for supervisord shell: /usr/local/bin/echo_supervisord_conf > /etc/supervisord.conf - ignore_errors: True - name: Modify supervisord.conf ini_file: @@ -32,14 +31,7 @@ dest: /etc/supervisord.conf section: supervisord option: logfile - value: "/srv/logs/supervisord.log" - -- name: Modify supervisord.conf - ini_file: - dest: /etc/supervisord.conf - section: supervisord - option: pidfile - value: /run/supervisord.pid + value: /srv/logs/supervisord.log - name: Modify supervisord.conf ini_file: @@ -48,25 +40,33 @@ option: serverurl value: unix:///run/supervisor/supervisor.sock -- name: Create dirs +- name: Create a directory for socket file: path: /run/supervisor state: directory + owner: pmm + group: pmm mode: 0770 - name: Create /etc/supervisord.d dir file: path: /etc/supervisord.d - mode: 0755 state: directory + owner: pmm + group: pmm + mode: 0755 -- name: Add /etc/tmpfiles.d/supervisor.conf config - when: ansible_virtualization_type != "docker" +- name: Copy *.ini files for PMM components copy: - content: | - D /var/run/supervisor 0775 root root - - dest: /etc/tmpfiles.d/supervisor.conf + src: "{{ item }}" + dest: "/etc/supervisord.d/{{ item }}" + owner: pmm + group: pmm mode: 0644 + loop: + - supervisord.ini + - grafana.ini + - pmm.ini - name: Fix credentials ini_file: @@ -82,24 +82,6 @@ option: password value: dummy -- name: Increase number of open files for jobs - when: ansible_virtualization_type != "docker" - ini_file: - dest: /etc/supervisord.conf - section: supervisord - option: minfds - value: "800000" - -- name: Add supervisord.service - when: ansible_virtualization_type != "docker" - copy: - src: supervisord.service - dest: /usr/lib/systemd/system/ - mode: 0644 - -- name: Fix motd - shell: echo "Welcome to PMM Server!" > /etc/motd; echo "Welcome to PMM Server!" > /etc/motd.conf - - name: Print the contents of supervisord.conf debug: msg: diff --git a/build/docker/server/Dockerfile b/build/docker/server/Dockerfile index 65acb5826f..057baf88ca 100644 --- a/build/docker/server/Dockerfile +++ b/build/docker/server/Dockerfile @@ -23,9 +23,10 @@ COPY pmm-client.tar.gz /tmp/ COPY ansible /opt/ansible RUN cp -r /opt/ansible/roles /opt/ansible/pmm-docker/roles -RUN ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/main.yml \ - && ansible-playbook -vvv -i 'localhost,' -c local /usr/share/pmm-update/ansible/playbook/tasks/update.yml \ - && ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm/post-build-actions.yml + +RUN ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/main.yml && \ + ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/update.yml && \ + ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm/post-build-actions.yml COPY entrypoint.sh /opt/entrypoint.sh HEALTHCHECK --interval=3s --timeout=2s --start-period=10s --retries=3 CMD curl -f http://127.0.0.1/v1/readyz || exit 1 diff --git a/build/docker/server/Dockerfile.el9 b/build/docker/server/Dockerfile.el9 index a7ed9285a5..35dc8ac340 100644 --- a/build/docker/server/Dockerfile.el9 +++ b/build/docker/server/Dockerfile.el9 @@ -27,19 +27,21 @@ RUN microdnf -y install epel-release && \ yum \ vi +COPY entrypoint.sh /opt/entrypoint.sh +COPY ansible /opt/ansible COPY RPMS /tmp/RPMS COPY gitCommit /tmp/gitCommit # Use COPY as we want to unarchive it with ansible COPY pmm-client.tar.gz /tmp/ -COPY ansible /opt/ansible -# NOTE: this needs to be refactored, since some of the playbooks are duplicates -RUN cp -r /opt/ansible/roles /opt/ansible/pmm-docker/roles -RUN ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/main.yml \ - && ansible-playbook -vvv -i 'localhost,' -c local /usr/share/pmm-update/ansible/playbook/tasks/update.yml \ - && ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm/post-build-actions.yml +RUN install -T -p -m 644 /opt/ansible/ansible.cfg /etc/ansible/ansible.cfg && \ + ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/main.yml && \ + ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/update.yml && \ + ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm/post-build-actions.yml + +USER pmm + +HEALTHCHECK --interval=3s --timeout=2s --start-period=10s --retries=3 CMD curl -sf http://127.0.0.1:8080/v1/readyz -COPY entrypoint.sh /opt/entrypoint.sh -HEALTHCHECK --interval=3s --timeout=2s --start-period=10s --retries=3 CMD curl -f http://127.0.0.1:8080/v1/readyz || exit 1 CMD ["/opt/entrypoint.sh"] diff --git a/build/docker/server/README.md b/build/docker/server/README.md index ca805a4442..d49a8136c2 100644 --- a/build/docker/server/README.md +++ b/build/docker/server/README.md @@ -12,12 +12,12 @@ Percona Monitoring and Management (PMM) is an open source database observability ``` docker pull percona/pmm-server:3 docker volume create pmm-data -docker run --detach --restart always --publish 443:443 -v pmm-data:/srv --name pmm-server percona/pmm-server:3 +docker run --detach --restart always --publish 443:8443 -v pmm-data:/srv --name pmm-server percona/pmm-server:3 ``` Point your browser to https://hostname:443 -This example uses the tag `:2` to pull the latest PMM 2.x version, but other, [more specific tags](https://hub.docker.com/r/percona/pmm-server/tags), are also available. +This example uses the tag `:3` to pull the latest PMM 3.x version, but other, [more specific tags](https://hub.docker.com/r/percona/pmm-server/tags), are also available. ## Environment variables diff --git a/build/docker/server/entrypoint.sh b/build/docker/server/entrypoint.sh index fe09758328..51f79746dd 100755 --- a/build/docker/server/entrypoint.sh +++ b/build/docker/server/entrypoint.sh @@ -1,28 +1,35 @@ #!/bin/bash set -o errexit -# init /srv if empty +if [ ! -w /srv ]; then + echo "FATAL: /srv is not writable by $(whoami) user." >&2 + echo "Please make sure that /srv is owned by uid $(id -u) and gid $(id -g) and try again." >&2 + echo "You can change ownership by running: sudo chown -R $(id -u):$(id -g) /srv" >&2 + exit 1 +fi + +# Initialize /srv if empty DIST_FILE=/srv/pmm-distribution if [ ! -f $DIST_FILE ]; then - echo "File $DIST_FILE doesn't exist. Initialize /srv..." + echo "File $DIST_FILE doesn't exist. Initializing /srv..." echo docker > $DIST_FILE - mkdir -p /srv/{clickhouse,grafana,logs,postgres14,prometheus,nginx,victoriametrics} - echo "Copying plugins and VERSION file" + mkdir -p /srv/{backup,clickhouse,grafana,logs,nginx,postgres14,prometheus,victoriametrics} + echo "Copying grafana plugins and the VERSION file..." + cp -r /usr/share/percona-dashboards/panels/* /srv/grafana/plugins cp /usr/share/percona-dashboards/VERSION /srv/grafana/PERCONA_DASHBOARDS_VERSION - cp -r /usr/share/percona-dashboards/panels/ /srv/grafana/plugins - chown -R pmm:pmm /srv/grafana - chown pmm:pmm /srv/{victoriametrics,prometheus,logs} - chown pmm:pmm /srv/postgres14 - echo "Generating self-signed certificates for nginx" + + echo "Generating self-signed certificates for nginx..." bash /var/lib/cloud/scripts/per-boot/generate-ssl-certificate - echo "Initializing Postgres" - su pmm -c "/usr/pgsql-14/bin/initdb -D /srv/postgres14 --auth=trust --username=postgres" - echo "Enable pg_stat_statements extension" - su pmm -c "/usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14 -o '-c logging_collector=off'" + + echo "Initializing Postgres..." + /usr/pgsql-14/bin/initdb -D /srv/postgres14 --auth=trust --username=postgres + + echo "Enabling pg_stat_statements extension for PostgreSQL..." + /usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14 -o '-c logging_collector=off' # We create the postgres user with superuser privileges to not break the code that connects pmm-managed to postgres. - su pmm -c "/usr/pgsql-14/bin/createuser --echo --superuser --host=/run/postgresql --no-password postgres" - su pmm -c "/usr/bin/psql postgres postgres -c 'CREATE EXTENSION pg_stat_statements SCHEMA public'" - su pmm -c "/usr/pgsql-14/bin/pg_ctl stop -D /srv/postgres14" + /usr/pgsql-14/bin/createuser --echo --superuser --host=/run/postgresql --no-password postgres + /usr/bin/psql postgres postgres -c 'CREATE EXTENSION pg_stat_statements SCHEMA public' + /usr/pgsql-14/bin/pg_ctl stop -D /srv/postgres14 fi # pmm-managed-init validates environment variables. diff --git a/build/packer/pmm.el9.json b/build/packer/pmm.el9.json index b6ded35a72..536e466bb8 100644 --- a/build/packer/pmm.el9.json +++ b/build/packer/pmm.el9.json @@ -131,22 +131,22 @@ "ansible/roles/cloud-node", "ansible/roles/lvm-init", "ansible/roles/pmm-images", - "ansible/roles/supervisord-init", + "ansible/roles/supervisord", "ansible/roles/ami-ovf" ] }, { "type": "ansible-local", - "playbook_dir": "update/tasks", - "playbook_file": "update/tasks/update.yml", + "playbook_dir": "ansible/pmm-docker", + "playbook_file": "ansible/pmm-docker/update.yml", "extra_arguments": ["-vvv", "-u root"], "role_paths": [ - "update/tasks/roles/clickhouse", - "update/tasks/roles/dashboards_upgrade", - "update/tasks/roles/grafana", - "update/tasks/roles/initialization", - "update/tasks/roles/nginx", - "update/tasks/roles/postgres" + "ansible/roles/clickhouse", + "ansible/roles/dashboards", + "ansible/roles/grafana", + "ansible/roles/initialization", + "ansible/roles/nginx", + "ansible/roles/postgres" ] }, { diff --git a/build/packer/pmm.json b/build/packer/pmm.json index 3312dcbae5..f816657ef1 100644 --- a/build/packer/pmm.json +++ b/build/packer/pmm.json @@ -123,14 +123,14 @@ "ansible/roles/cloud-node", "ansible/roles/lvm-init", "ansible/roles/pmm-images", - "ansible/roles/supervisord-init", + "ansible/roles/supervisord", "ansible/roles/ami-ovf" ] }, { "type": "shell", "inline": [ - "sudo ansible-playbook -vvv -i 'localhost,' -c local /usr/share/pmm-update/ansible/playbook/tasks/update.yml" + "sudo ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/tasks/update.yml" ] }, { diff --git a/get-pmm.sh b/get-pmm.sh index bb04db2d18..b76f411d83 100755 --- a/get-pmm.sh +++ b/get-pmm.sh @@ -13,12 +13,12 @@ set -Eeuo pipefail trap cleanup SIGINT SIGTERM ERR EXIT # Set defaults. -tag=${PMM_TAG:-"2"} -repo=${PMM_REPO:-"percona/pmm-server"} +tag=${PMM_TAG:-3} +repo=${PMM_REPO:-percona/pmm-server} port=${PMM_PORT:-443} -container_name=${CONTAINER_NAME:-"pmm-server"} +container_name=${CONTAINER_NAME:-pmm-server} interactive=0 -root_is_needed='no' +root_is_needed=no ####################################### # Show script usage info. @@ -85,9 +85,9 @@ msg() { # writes message to stderr. ####################################### die() { - local msg=$1 + local message=$1 local code=${2-1} # default exit status 1 - msg "$msg" + msg "$message" exit "$code" } @@ -136,7 +136,7 @@ gather_info() { : ${port:=$default_port} read -p " PMM Server Container Name (default: $default_container_name): " container_name : ${container_name:="$default_container_name"} - read -p " Override specific version (container tag) (default: $default_tag in 2.x series) format: 2.x.y: " tag + read -p " Override specific version (container tag) (default: $default_tag in 3.x series) format: 3.x.y: " tag : ${tag:=$default_tag} } @@ -179,7 +179,7 @@ install_docker() { if ! check_command docker; then if is_darwin; then echo - echo "ERROR: Cannot auto-install components on macOS" + echo "ERROR: Cannot auto-install components on MacOS" echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop and rerun installer after starting" echo exit 1 @@ -198,7 +198,7 @@ install_docker() { if ! run_root 'docker ps > /dev/null'; then if is_darwin; then run_root 'open --background -a Docker' - echo "Giving docker desktop time to start" + echo "Giving Docker Desktop time to start" sleep 30 else die "${RED}ERROR: cannot run "docker ps" command${NOFORMAT}" @@ -219,15 +219,17 @@ run_docker() { } ####################################### -# Starts PMM server container with give repo, tag, name and port. -# If any PMM server instance is run - stop and backup it. +# Starts PMM Server container with given repo, tag, name and port. +# If a PMM Server instance is running - stop and back it up. ####################################### start_pmm() { - msg "Starting PMM server..." + msg "Starting PMM Server..." run_docker "pull $repo:$tag 1> /dev/null" if ! run_docker "inspect pmm-data 1> /dev/null 2> /dev/null"; then - run_docker "create -v /srv/ --name pmm-data $repo:$tag /bin/true 1> /dev/null" + if ! run_docker "volume create pmm-data 1> /dev/null"; then + die "${RED}ERROR: cannot create PMM Data Volume${NOFORMAT}" + fi msg "Created PMM Data Volume: pmm-data" fi @@ -237,17 +239,17 @@ start_pmm() { run_docker 'stop pmm-server' || : run_docker "rename pmm-server $pmm_archive\n" fi - run_pmm="run -d -p $port:8443 --volumes-from pmm-data --name $container_name --restart always $repo:$tag" + run_pmm="run -d -p $port:8443 --volume pmm-data:/srv --name $container_name --restart always $repo:$tag" run_docker "$run_pmm 1> /dev/null" msg "Created PMM Server: $container_name" - msg "\tUse the following command if you ever need to update your container by hand:" + msg "\nUse the following command if you ever need to update your container manually:" msg "\tdocker $run_pmm \n" } ####################################### # Shows final message. -# Shows a list of addresses on which PMM server available. +# Shows a list of addresses on which PMM Server is available. ####################################### show_message() { msg "PMM Server has been successfully setup on this system!\n" @@ -257,7 +259,7 @@ show_message() { elif check_command ip; then ips=$(ip -f inet a | awk -F"[/ ]+" '/inet / {print $3}') else - die "${RED}ERROR: cannot detect PMM server address${NOFORMAT}" + die "${RED}ERROR: cannot detect PMM Server address${NOFORMAT}" fi msg "You can access your new server using one of the following web addresses:" @@ -271,7 +273,7 @@ show_message() { main() { setup_colors - if [[ $interactive == 1 ]]; then + if [[ "$interactive" == 1 ]]; then gather_info fi msg "Gathering/downloading required components, this may take a moment\n" diff --git a/managed/services/grafana/auth_server.go b/managed/services/grafana/auth_server.go index 0a83233659..0c1b086d69 100644 --- a/managed/services/grafana/auth_server.go +++ b/managed/services/grafana/auth_server.go @@ -78,8 +78,7 @@ var rules = map[string]role{ "/ping": none, // PMM 1.x variant // must not be available without authentication as it can leak data - "/v1/version": viewer, - "/managed/v1/version": viewer, // PMM 1.x variant + "/v1/version": viewer, "/v0/qan/": viewer, diff --git a/managed/services/grafana/auth_server_test.go b/managed/services/grafana/auth_server_test.go index 09cb5cd319..e4efdca3d9 100644 --- a/managed/services/grafana/auth_server_test.go +++ b/managed/services/grafana/auth_server_test.go @@ -228,8 +228,7 @@ func TestAuthServerAuthenticate(t *testing.T) { "/v1/readyz": none, "/ping": none, - "/v1/version": viewer, - "/managed/v1/version": viewer, + "/v1/version": viewer, "/v0/qan/ObjectDetails/GetQueryExample": viewer, diff --git a/managed/services/supervisord/pmm_config.go b/managed/services/supervisord/pmm_config.go index 78940e13e2..4ef71c0fdd 100644 --- a/managed/services/supervisord/pmm_config.go +++ b/managed/services/supervisord/pmm_config.go @@ -88,7 +88,8 @@ username = dummy password = dummy [program:pmm-update-perform-init] -command = /usr/sbin/pmm-update -run-playbook -playbook=/usr/share/pmm-update/ansible/playbook/tasks/init.yml +command = /usr/sbin/pmm-update -run-playbook -playbook=/opt/ansible/pmm-docker/init.yml +user = pmm directory = / autorestart = unexpected priority=-1 @@ -169,6 +170,7 @@ command = /usr/sbin/pmm-managed --victoriametrics-config=/etc/victoriametrics-promscrape.yml --supervisord-config-dir=/etc/supervisord.d +user = pmm autorestart = true autostart = true startretries = 1000 @@ -196,7 +198,8 @@ stdout_logfile_backups = 2 redirect_stderr = true [program:pmm-update-perform] -command = /usr/sbin/pmm-update -perform -playbook=/usr/share/pmm-update/ansible/playbook/tasks/update.yml +command = /usr/sbin/pmm-update -perform -playbook=/opt/ansible/pmm-docker/update.yml +user = pmm directory = / autorestart = unexpected exitcodes = 0 diff --git a/managed/testdata/supervisord.d/pmm-db_disabled.ini b/managed/testdata/supervisord.d/pmm-db_disabled.ini index 4d7693ee99..a9191eb4b5 100644 --- a/managed/testdata/supervisord.d/pmm-db_disabled.ini +++ b/managed/testdata/supervisord.d/pmm-db_disabled.ini @@ -9,7 +9,8 @@ username = dummy password = dummy [program:pmm-update-perform-init] -command = /usr/sbin/pmm-update -run-playbook -playbook=/usr/share/pmm-update/ansible/playbook/tasks/init.yml +command = /usr/sbin/pmm-update -run-playbook -playbook=/opt/ansible/pmm-docker/init.yml +user = pmm directory = / autorestart = unexpected priority=-1 @@ -62,6 +63,7 @@ command = /usr/sbin/pmm-managed --victoriametrics-config=/etc/victoriametrics-promscrape.yml --supervisord-config-dir=/etc/supervisord.d +user = pmm autorestart = true autostart = true startretries = 1000 @@ -89,7 +91,8 @@ stdout_logfile_backups = 2 redirect_stderr = true [program:pmm-update-perform] -command = /usr/sbin/pmm-update -perform -playbook=/usr/share/pmm-update/ansible/playbook/tasks/update.yml +command = /usr/sbin/pmm-update -perform -playbook=/opt/ansible/pmm-docker/update.yml +user = pmm directory = / autorestart = unexpected exitcodes = 0 diff --git a/managed/testdata/supervisord.d/pmm-db_enabled.ini b/managed/testdata/supervisord.d/pmm-db_enabled.ini index 891ae16f6e..bb28208342 100644 --- a/managed/testdata/supervisord.d/pmm-db_enabled.ini +++ b/managed/testdata/supervisord.d/pmm-db_enabled.ini @@ -9,7 +9,8 @@ username = dummy password = dummy [program:pmm-update-perform-init] -command = /usr/sbin/pmm-update -run-playbook -playbook=/usr/share/pmm-update/ansible/playbook/tasks/init.yml +command = /usr/sbin/pmm-update -run-playbook -playbook=/opt/ansible/pmm-docker/init.yml +user = pmm directory = / autorestart = unexpected priority=-1 @@ -86,6 +87,7 @@ command = /usr/sbin/pmm-managed --victoriametrics-config=/etc/victoriametrics-promscrape.yml --supervisord-config-dir=/etc/supervisord.d +user = pmm autorestart = true autostart = true startretries = 1000 @@ -113,7 +115,8 @@ stdout_logfile_backups = 2 redirect_stderr = true [program:pmm-update-perform] -command = /usr/sbin/pmm-update -perform -playbook=/usr/share/pmm-update/ansible/playbook/tasks/update.yml +command = /usr/sbin/pmm-update -perform -playbook=/opt/ansible/pmm-docker/update.yml +user = pmm directory = / autorestart = unexpected exitcodes = 0 diff --git a/managed/utils/envvars/parser.go b/managed/utils/envvars/parser.go index b9a669e491..58266da869 100644 --- a/managed/utils/envvars/parser.go +++ b/managed/utils/envvars/parser.go @@ -83,7 +83,7 @@ func ParseEnvVars(envs []string) (*models.ChangeSettingsParams, []error, []strin var err error switch k { - case "_", "HOME", "HOSTNAME", "LANG", "PATH", "PWD", "SHLVL", "TERM", "LC_ALL": + case "_", "HOME", "HOSTNAME", "LANG", "PATH", "PWD", "SHLVL", "TERM", "LC_ALL", "SHELL", "LOGNAME", "USER", "PS1": // skip default environment variables continue case "PMM_DEBUG", "PMM_TRACE": diff --git a/qan-api2/.github/CONTRIBUTING.md b/qan-api2/CONTRIBUTING.md similarity index 100% rename from qan-api2/.github/CONTRIBUTING.md rename to qan-api2/CONTRIBUTING.md diff --git a/update/.devcontainer/install-dev-tools.sh b/update/.devcontainer/install-dev-tools.sh index 7dc710b135..442a2e8bb6 100755 --- a/update/.devcontainer/install-dev-tools.sh +++ b/update/.devcontainer/install-dev-tools.sh @@ -8,7 +8,7 @@ set -o errexit set -o xtrace # download (in the background) the same verison as used by PMM build process -curl -sS https://dl.google.com/go/go1.21.1.linux-amd64.tar.gz -o /tmp/golang.tar.gz & +curl -sS https://dl.google.com/go/go1.21.5.linux-amd64.tar.gz -o /tmp/golang.tar.gz & # to install man pages sed -i '/nodocs/d' /etc/yum.conf @@ -36,14 +36,14 @@ yum install -y gcc git make pkgconfig \ bash-completion \ man man-pages -if [ "$RHEL" = '7' ]; then +if [ "$RHEL" = "7" ]; then yum install -y ansible-lint glibc-static bash-completion-extras else yum install -y ansible-lint glibc-static --enablerepo=ol9_codeready_builder fi fg || true -tar -C /usr/local -xzf /tmp/golang.tar.gz +tar -C /usr/local -xzf /tmp/golang.tar.gz && rm -f /tmp/golang.tar.gz update-alternatives --install "/usr/bin/go" "go" "/usr/local/go/bin/go" 0 update-alternatives --set go /usr/local/go/bin/go update-alternatives --install "/usr/bin/gofmt" "gofmt" "/usr/local/go/bin/gofmt" 0 diff --git a/update/Makefile b/update/Makefile index fb3afa5c60..a4a3ac4551 100644 --- a/update/Makefile +++ b/update/Makefile @@ -71,8 +71,3 @@ env-down: ## Stop development environment install-dev-tools: docker exec pmm-update-server /root/go/src/github.com/percona/pmm/update/.devcontainer/install-dev-tools.sh - -check: ## Run required checkers and linters - ansible-playbook --syntax-check ansible/playbook/tasks/update.yml - ansible-playbook --check ansible/playbook/tasks/update.yml - ansible-lint ansible/playbook/tasks/update.yml diff --git a/update/ansible/playbook/templates/.gitkeep b/update/ansible/.gitkeep similarity index 100% rename from update/ansible/playbook/templates/.gitkeep rename to update/ansible/.gitkeep diff --git a/update/ansible/playbook/tasks/roles/clickhouse/main.yml b/update/ansible/playbook/tasks/roles/clickhouse/main.yml deleted file mode 100644 index bb2f660bfd..0000000000 --- a/update/ansible/playbook/tasks/roles/clickhouse/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# This role contains tasks executed during the migration of PMM Server from v2 to v3 -- name: Create a feature flag directory - file: - path: /srv/clickhouse/flags - state: directory - owner: pmm - group: pmm - mode: 0755 - recurse: true - -- name: Create a feature flag - file: - path: /srv/clickhouse/flags/convert_ordinary_to_atomic - state: touch - owner: pmm - group: pmm - mode: 0755