Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: export corosync configuration #231

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 14 additions & 8 deletions .github/workflows/python-unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,20 @@ jobs:
python:
strategy:
matrix:
os:
- ubuntu-22.04
- ubuntu-latest
pcs_version:
- v0.11.4
- v0.11.5
- v0.11.6
- main
include:
- os: ubuntu-22.04
pcs_version: v0.11.4
- os: ubuntu-22.04
pcs_version: v0.11.5
- os: ubuntu-22.04
pcs_version: v0.11.6
- os: ubuntu-22.04
pcs_version: v0.11.7
- os: ubuntu-22.04
pcs_version: v0.11.8
- os: ubuntu-24.04
pcs_version: main
# pcs_version 0.12.x will go to ubuntu 24.04 only
runs-on: ${{ matrix.os }}
steps:
- name: Update git
Expand Down
13 changes: 13 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,19 @@ unit_tests:
- pcs --version
- PYTHONPATH="./library:./module_utils:$PYTHONPATH" python -m unittest --verbose tests/unit/*.py

unit_tests_rhel8:
parallel:
matrix:
- BASE_IMAGE_NAME: *ALL_IMAGES
rules:
- if: $BASE_IMAGE_NAME =~ /^LsrRhel8.*/
stage: tier0
script:
- dnf install -y pcs
- *symlink_pcs_to_pyenv
- pcs --version
- PYTHONPATH="./library:./module_utils:$PYTHONPATH" python -m unittest --verbose tests/unit/test_ha_cluster_info.py

# tier 1
build_tier1_ci_yml:
variables:
Expand Down
8 changes: 8 additions & 0 deletions .sanity-ansible-ignore-2.13.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,11 @@ plugins/modules/pcs_qdevice_certs.py import-2.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.6!skip
plugins/modules/pcs_qdevice_certs.py import-3.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.8!skip
plugins/modules/ha_cluster_info.py compile-2.7!skip
plugins/modules/ha_cluster_info.py import-2.7!skip
plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license
plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip
tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip
14 changes: 14 additions & 0 deletions .sanity-ansible-ignore-2.14.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,17 @@ plugins/modules/pcs_qdevice_certs.py import-3.5!skip
plugins/modules/pcs_qdevice_certs.py import-3.6!skip
plugins/modules/pcs_qdevice_certs.py import-3.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.8!skip
plugins/modules/ha_cluster_info.py compile-2.7!skip
plugins/modules/ha_cluster_info.py compile-3.5!skip
plugins/modules/ha_cluster_info.py import-2.7!skip
plugins/modules/ha_cluster_info.py import-3.5!skip
plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license
plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-3.5!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py import-3.5!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py compile-3.5!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py import-3.5!skip
tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip
14 changes: 14 additions & 0 deletions .sanity-ansible-ignore-2.15.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,17 @@ plugins/modules/pcs_qdevice_certs.py import-3.5!skip
plugins/modules/pcs_qdevice_certs.py import-3.6!skip
plugins/modules/pcs_qdevice_certs.py import-3.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.8!skip
plugins/modules/ha_cluster_info.py compile-2.7!skip
plugins/modules/ha_cluster_info.py compile-3.5!skip
plugins/modules/ha_cluster_info.py import-2.7!skip
plugins/modules/ha_cluster_info.py import-3.5!skip
plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license
plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-3.5!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py import-3.5!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py compile-3.5!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py import-3.5!skip
tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip
8 changes: 8 additions & 0 deletions .sanity-ansible-ignore-2.16.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,11 @@ plugins/modules/pcs_qdevice_certs.py import-2.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.6!skip
plugins/modules/pcs_qdevice_certs.py import-3.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.8!skip
plugins/modules/ha_cluster_info.py compile-2.7!skip
plugins/modules/ha_cluster_info.py import-2.7!skip
plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license
plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip
plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip
tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip
2 changes: 2 additions & 0 deletions .sanity-ansible-ignore-2.17.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,5 @@ plugins/module_utils/ha_cluster_lsr/pcs_api_v2_utils.py compile-3.7!skip
plugins/module_utils/ha_cluster_lsr/pcs_api_v2_utils.py compile-3.8!skip
plugins/modules/pcs_qdevice_certs.py import-3.7!skip
plugins/modules/pcs_qdevice_certs.py import-3.8!skip
plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license
tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip
79 changes: 79 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ An Ansible role for managing High Availability Clustering.
* Pacemaker Access Control Lists (ACLs)
* node and resource utilization
* Pacemaker Alerts
* The role provides `ha_cluster_info` module which exports current cluster
configuration. The module is capable of exporting:
* single-link or multi-link cluster
* Corosync transport options including compression and encryption
* Corosync totem options
* Corosync quorum options

## Requirements

Expand Down Expand Up @@ -1545,6 +1551,79 @@ all:
monitoring. Defaults to empty list if not set. Always refer to the devices
using the long, stable device name (`/dev/disk/by-id/`).

## Export current cluster configuration

The role provides `ha_cluster_info` module which exports current cluster
configuration in a dictionary matching the structure of this role variables. If
the role is run with these variables, it recreates the same cluster.

Note that the dictionary of variables may not be complete and manual
modification of it is expected. Most notably, you need to set
[`ha_cluster_hacluster_password`](#ha_cluster_hacluster_password).

Note that depending on pcs version installed on managed nodes, certain variables
may not be present in the export.

* Following variables are present in the export:
* [`ha_cluster_cluster_present`](#ha_cluster_cluster_present)
* [`ha_cluster_start_on_boot`](#ha_cluster_start_on_boot)
* [`ha_cluster_cluster_name`](#ha_cluster_cluster_name)
* [`ha_cluster_transport`](#ha_cluster_transport)
* [`ha_cluster_totem`](#ha_cluster_totem)
* [`ha_cluster_quorum`](#ha_cluster_quorum)
* [`ha_cluster_node_options`](#ha_cluster_node_options) - currently only
`node_name`, `corosync_addresses` and `pcs_address` are present

* Following variables are never present in the export (consult the role
documentation for impact of the variables missing when running the role):
* [`ha_cluster_hacluster_password`](#ha_cluster_hacluster_password) - This is
a mandatory variable for the role but it cannot be extracted from existing
clusters.
* [`ha_cluster_corosync_key_src`](#ha_cluster_corosync_key_src),
[`ha_cluster_pacemaker_key_src`](#ha_cluster_pacemaker_key_src) and
[`ha_cluster_fence_virt_key_src`](#ha_cluster_fence_virt_key_src) - These
are supposed to contain paths to files with the keys. Since the keys
themselves are not exported, these variables are not present in the export
either. Corosync and pacemaker keys are supposed to be unique for each
cluster.
* [`ha_cluster_regenerate_keys`](#ha_cluster_regenerate_keys) - It is your
responsibility to decide if you want to use existing keys or generate new
ones.

To export current cluster configuration and store it in
`ha_cluster_info_result` variable, write a task like this:

```yaml
- name: Get current cluster configuration
linux-system-roles.ha_cluster.ha_cluster_info:
register: ha_cluster_info_result
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

System roles, by convention, do not support users using modules directly. In every other role that does something like this, users use the role with either no arguments like https://github.com/linux-system-roles/firewall?tab=readme-ov-file#gathering-firewall-ansible-facts:

- name: Get current cluster configuration
  include_role:
    name: linux-system-roles.ha_cluster

or with some special variable

- name: Get current cluster configuration
  include_role:
    name: linux-system-roles.ha_cluster
  vars:
    ha_cluster_get_info: true

I think the ha_cluster role will have to do something like the latter, since there are numerous public api variables, as opposed to the firewall role which just has the one main firewall variable. The latter also makes it possible for the role to

  • set the state of the cluster and return the cluster configuration
  • return a subset of the information

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The role would then set a global variable e.g. ha_cluster_info that users would use. This return variable will be declared in the README.md in the section Variables Exported by the Role e.g. https://github.com/linux-system-roles/kernel_settings?tab=readme-ov-file#variables-exported-by-the-role

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ping

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bootloader and snapshot roles export info with <rolename>_facts variable. Let's be consistent with this naming.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, I've been busy with other projects.

I wasn't sure what would be the correct way to expose the export functionality. So I'm glad you pointed me in the right direction. I'm going to implement this change, hopefully in a couple of weeks, once I finish tasks that require my immediate attention.

```

Then you may use the `ha_cluster_info_result` variable in your playbook
depending on your needs.

If you just want to see the content of the variable, use the ansible debug
module like this:

```yaml
- name: Print ha_cluster_info_result variable
debug:
var: ha_cluster_info_result
```

Or you may want to save the configuration to a file on your controller node in
YAML format with a task similar to this one, so that you can write a playbook
around it:

```yaml
- name: Save current cluster configuration to a file
delegate_to: localhost
copy:
content: "{{
ha_cluster_info_result.ha_cluster | to_nice_yaml(sort_keys=false) }}"
dest: /path/to/file
```

## Example Playbooks

Following examples show what the structure of the role variables looks like.
Expand Down
178 changes: 178 additions & 0 deletions library/ha_cluster_info.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-

# Copyright (C) 2024 Red Hat, Inc.
# Author: Tomas Jelinek <[email protected]>
# SPDX-License-Identifier: MIT

# make ansible-test happy, even though the module requires Python 3
from __future__ import absolute_import, division, print_function

# make ansible-test happy, even though the module requires Python 3
# pylint: disable=invalid-name
__metaclass__ = type

DOCUMENTATION = r"""
---
module: ha_cluster_info
short_description: Export HA cluster configuration
description:
This module exports live cluster configuration in form of variables which
recreate the same configuration when passed to ha_cluster role. Note that
the set of variables may not be complete and manual modification of the
result is expected (at least setting ha_cluster_hacluster_password is
required).
author:
- Tomas Jelinek (@tomjelinek)
requirements:
- pcs-0.10.8 or newer installed on managed nodes
- pcs-0.10.8 or newer for exporting corosync configuration
- python 3.6 or newer
"""

EXAMPLES = r"""
- name: Get HA cluster configuration
ha_cluster_info:
register: my_ha_cluster_info
"""

RETURN = r"""
ha_cluster:
returned: success
type: dict
description:
- Information about existing cluster on the node. If passed to
ha_cluster role, the role recreates the same cluster. Note that the
set of variables may not be complete and manual modification of the
result is expected. The variables are documented in the role.
- Note that depending on pcs version present on the managed node,
certain variables may not be exported.
- HORIZONTALLINE
- Following variables are present in the output
- ha_cluster_cluster_present
- ha_cluster_start_on_boot
- ha_cluster_cluster_name
- ha_cluster_transport
- ha_cluster_totem
- ha_cluster_quorum
- ha_cluster_node_options - currently only node_name,
corosync_addresses and pcs_address are present
- HORIZONTALLINE
- Following variables are required for running ha_cluster role but are
never present in this module output
- ha_cluster_hacluster_password
- HORIZONTALLINE
- Following variables are never present in this module output (consult
the role documentation for impact of the variables missing)
- ha_cluster_corosync_key_src
- ha_cluster_pacemaker_key_src
- ha_cluster_fence_virt_key_src
- ha_cluster_regenerate_keys
- HORIZONTALLINE
"""

from typing import Any, Dict, List, Optional, Tuple

from ansible.module_utils.basic import AnsibleModule

# pylint: disable=no-name-in-module
from ansible.module_utils.ha_cluster_lsr.info import exporter, loader


def get_cmd_runner(module: AnsibleModule) -> loader.CommandRunner:
"""
Provide a function responsible for running external processes
"""

def runner(
args: List[str], environ_update: Optional[Dict[str, str]] = None
) -> Tuple[int, str, str]:
return module.run_command(
args, check_rc=False, environ_update=environ_update
)

return runner


def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]:
"""
Export existing HA cluster configuration
"""
# Until pcs is able to export the whole configuration in one go, we need to
# put it together from separate parts provided by pcs. Some parts are only
# available in recent pcs versions. Check pcs capabilities.
result: dict[str, Any] = dict()
cmd_runner = get_cmd_runner(module)

corosync_enabled = loader.is_service_enabled(cmd_runner, "corosync")
pacemaker_enabled = loader.is_service_enabled(cmd_runner, "pacemaker")
result["ha_cluster_start_on_boot"] = exporter.export_start_on_boot(
corosync_enabled, pacemaker_enabled
)

# Corosync config is availabe via CLI since pcs-0.10.8, via API v2 since
# pcs-0.12.0 and pcs-0.11.9. For old pcs versions, CLI must be used, and
# there is no benefit in implementing access via API on top of that.
# No need to check pcs capabilities. If this is not supported by pcs,
# exporting anything else is pointless (and not supported by pcs anyway).
corosync_conf_pcs = loader.get_corosync_conf(cmd_runner)
# known-hosts file is available since pcs-0.10, but is not exported by pcs
# in any version.
# No need to check pcs capabilities.
known_hosts_pcs = loader.get_pcsd_known_hosts()

# Convert corosync config to role format
result["ha_cluster_cluster_name"] = exporter.export_corosync_cluster_name(
corosync_conf_pcs
)
result["ha_cluster_transport"] = exporter.export_corosync_transport(
corosync_conf_pcs
)
exported_totem = exporter.export_corosync_totem(corosync_conf_pcs)
if exported_totem:
result["ha_cluster_totem"] = exported_totem
exported_quorum = exporter.export_corosync_quorum(corosync_conf_pcs)
if exported_quorum:
result["ha_cluster_quorum"] = exported_quorum

# Convert nodes definition to role format
result["ha_cluster_node_options"] = exporter.export_cluster_nodes(
corosync_conf_pcs, known_hosts_pcs
)

return result


def main() -> None:
"""
Top level module function
"""
module_args: Dict[str, Any] = dict()
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)

module_result: Dict[str, Any] = dict()
ha_cluster_result: Dict[str, Any] = dict()
module_result["ha_cluster"] = ha_cluster_result

try:
if loader.has_corosync_conf():
ha_cluster_result.update(**export_cluster_configuration(module))
ha_cluster_result["ha_cluster_cluster_present"] = True
else:
ha_cluster_result["ha_cluster_cluster_present"] = False
module.exit_json(**module_result)
except exporter.JsonMissingKey as e:
module.fail_json(
msg=f"Missing key {e.key} in pcs {e.data_desc} JSON output",
error_details=e.kwargs,
)
except loader.JsonParseError as e:
module.fail_json(
msg="Error while parsing pcs JSON output", error_details=e.kwargs
)
except loader.CliCommandError as e:
module.fail_json(msg="Error while running pcs", error_details=e.kwargs)


if __name__ == "__main__":
main()
Empty file.
Loading
Loading