From e8640f261333d006abd93f79de418a8b6a16685e Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Thu, 22 Apr 2021 11:07:26 +0530 Subject: [PATCH 01/50] Cloned the 4.0 docset to form 3.10 docset Former-commit-id: 5d5b399853becb1437cf2cd9f99724649390e5b9 --- .../efm_pgpool_ha_guide/01_introduction.mdx | 60 + .../efm_pgpool_ha_guide/02_architecture.mdx | 32 + .../03_components_ha_pgpool.mdx | 161 +++ .../efm_pgpool_ha_guide/04_appendix_a.mdx | 70 + .../efm_pgpool_ha_guide/05_appendix_b.mdx | 17 + .../efm_pgpool_ha_guide/images/EDB_logo.png | 3 + .../images/EFM_PgPool_Azure.png | 3 + .../images/backend_pools.png | 3 + .../images/edb_ha_architecture.png | 3 + .../images/edb_ha_architecture1.png | 3 + .../edb_ha_architecture_separate_VM.png | 3 + .../efm_pgpool_ha_guide/images/edb_logo.svg | 56 + .../images/failover_manager_overview.png | 3 + .../images/health_probes.png | 3 + .../images/load_balancing_rules.png | 3 + .../images/placeholder.png | 3 + .../images/rule_port_9898.png | 3 + .../images/rule_port_9999.png | 3 + .../efm/3.10/efm_pgpool_ha_guide/index.mdx | 18 + .../3.10/efm_quick_start/images/edb_logo.png | 3 + .../3.10/efm_quick_start/images/edb_logo.svg | 56 + .../images/failover_manager_overview.png | 3 + .../efm_quick_start/images/placeholder.png | 3 + .../docs/efm/3.10/efm_quick_start/index.mdx | 146 ++ .../docs/efm/3.10/efm_user/01_whats_new.mdx | 17 + .../01_prerequisites.mdx | 102 ++ .../02_failover_manager_overview/index.mdx | 37 + .../efm/3.10/efm_user/03_installing_efm.mdx | 315 +++++ .../01_encrypting_database_password.mdx | 77 ++ .../01_cluster_properties/index.mdx | 1186 +++++++++++++++++ .../02_encrypting_database_password.mdx | 81 ++ .../04_configuring_efm/03_cluster_members.mdx | 35 + .../04_extending_efm_permissions.mdx | 116 ++ .../05_using_vip_addresses.mdx | 148 ++ .../efm_user/04_configuring_efm/index.mdx | 20 + .../docs/efm/3.10/efm_user/05_using_efm.mdx | 318 +++++ .../efm_user/06_monitoring_efm_cluster.mdx | 142 ++ .../3.10/efm_user/07_using_efm_utility.mdx | 213 +++ .../efm_user/08_controlling_efm_service.mdx | 55 + .../3.10/efm_user/09_controlling_logging.mdx | 90 ++ .../efm/3.10/efm_user/10_notifications.mdx | 157 +++ .../3.10/efm_user/11_supported_scenarios.mdx | 117 ++ .../12_upgrading_existing_cluster.mdx | 108 ++ .../efm/3.10/efm_user/13_troubleshooting.mdx | 47 + .../14_configuring_streaming_replication.mdx | 44 + .../15_configuring_ssl_authentication.mdx | 69 + .../efm_user/images/cascading_replication.png | 3 + .../images/cascading_replication1.png | 3 + .../efm/3.10/efm_user/images/edb_logo.png | 3 + .../images/failover_manager_overview.png | 3 + .../efm/3.10/efm_user/images/placeholder.png | 3 + .../str_replication_dashboard_master.png | 3 + .../str_replication_dashboard_standby.png | 3 + ...supported_scenarios_master_agent_exits.png | 3 + .../supported_scenarios_master_db_down.png | 3 + ...ported_scenarios_node_becomes_isolated.png | 3 + ...upported_scenarios_standby_agent_exits.png | 3 + .../supported_scenarios_standby_db_down.png | 3 + ...upported_scenarios_witness_agent_exits.png | 3 + product_docs/docs/efm/3.10/efm_user/index.mdx | 26 + 60 files changed, 4220 insertions(+) create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png create mode 100644 product_docs/docs/efm/3.10/efm_quick_start/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/10_notifications.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png create mode 100755 product_docs/docs/efm/3.10/efm_user/images/edb_logo.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png create mode 100755 product_docs/docs/efm/3.10/efm_user/images/placeholder.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png create mode 100644 product_docs/docs/efm/3.10/efm_user/index.mdx diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx new file mode 100644 index 00000000000..1d582eeb107 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx @@ -0,0 +1,60 @@ +--- +title: "Architecture Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" +--- + +This guide explains how to configure Failover Manager and Pgpool best to leverage the benefits that they provide for Advanced Server. Using the reference architecture described in the Architecture section, you can learn how to achieve high availability by implementing an automatic failover mechanism (with Failover Manager) while scaling the system for larger workloads and an increased number of concurrent clients with read-intensive or mixed workloads to achieve horizontal scaling/read-scalability (with Pgpool). + +The architecture described in this document has been developed and tested for EFM 4.1, EDB Pgpool 4.1, and Advanced Server 13. + +Documentation for Advanced Server and Failover Manager are available from EnterpriseDB at: + + + +Documentation for pgPool-II can be found at: + + + +## Failover Manager Overview + +Failover Manager is a high-availability module that monitors the health of a Postgres streaming replication cluster and verifies failures quickly. When a database failure occurs, Failover Manager can automatically promote a streaming replication Standby node into a writable Primary node to ensure continued performance and protect against data loss with minimal service interruption. + +**Basic EFM Architecture Terminology** + +A Failover Manager cluster is comprised of EFM processes that reside on the following hosts on a network: + +- A **Primary** node is the Primary database server that is servicing database clients. +- One or more **Standby nodes** are streaming replication servers associated with the Primary node. +- The **Witness node** confirms assertions of either the Primary or a Standby in a failover scenario. If, during a failure situation, the Primary finds itself in a partition with half or more of the nodes, it will stay Primary. As such, EFM supports running in a cluster with an even number of agents. + +## Pgpool-II Overview + +Pgpool-II (Pgpool) is an open-source application that provides connection pooling and load balancing for horizontal scalability of SELECT queries on multiple Standbys in EPAS and community Postgres clusters. For every backend, a backend_weight parameter can set the ratio of read traffic to be directed to the backend node. To prevent read traffic on the Primary node, the backend_weight parameter can be set to 0. In such cases, data modification language (DML) queries (i.e., INSERT, UPDATE, and DELETE) will still be sent to the Primary node, while read queries are load-balanced to the Standbys, providing scalability with mixed and read-intensive workloads. + +EnterpriseDB supports the following Pgpool functionality: + +- Load balancing +- Connection pooling +- High availability +- Connection limits + +### PCP Overview + +Pgpool provides an interface called PCP for administrators that performs management operations such as retrieving the status of Pgpool or terminating Pgpool processes remotely. PCP commands are UNIX commands that manipulate Pgpool via the network. + +### Pgpool Watchdog + +`watchdog` is an optional sub process of Pgpool that provides a high availability feature. Features added by `watchdog` include: + +- Health checking of the pgpool service +- Mutual monitoring of other watchdog processes +- Changing leader/Standby state if certain faults are detected +- Automatic virtual IP address assigning synchronous to server switching +- Automatic registration of a server as a Standby during recovery + +More information about the `Pgpool watchdog` component can be found at: + + diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx new file mode 100644 index 00000000000..8526284e6b5 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx @@ -0,0 +1,32 @@ +--- +title: "Architecture" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/architecture.html" +--- + +![A typical EFM and Pgpool configuration](images/edb_ha_architecture.png) + +The sample architecture diagram shows four nodes as described in the table below: + +| **Systems** | **Components** | +| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Primary Pgpool/EFM witness node | The Primary Pgpool node will only run Pgpool, and EFM witness, as such leaving as much resources available to Pgpool as possible. During normal runmode (no Pgpool Failovers), the Primary Pgpool node has attached the Virtual IP address, and all applications connect through the Virtual IP address to Pgpool. Pgpool will forward all write traffic to the Primary Database node, and will balance all read across all Standby nodes.On the Primary Pgpool node, the EFM witness process ensures that a minimum quota of three EFM agents remains available even if one of the database nodes fails. Some examples are when a node is already unavailable due to maintenance, or failure, and another failure occurs. | +| Primary Database node | The Primary Database node will only run Postgres (Primary)and EFM, leaving all resources to Postgres. Read/Write traffic (i.e., INSERT, UPDATE, DELETE) is forwarded to this node by the Primary Pgpool node. | +| Standby nodes | The Standby nodes are running Postgres (Standby), EFM and an inactive Pgpool process. In case of a Primary database failure, EFM will promote Postgres on one of these Standby nodes to handle read-write traffic. In case of a Primary Pgpool failure, the Pgpool watchdog will activate Pgpool on one of the Standby nodes which will attach the VIP, and handle the forwarding of the application connections to the Database nodes. Note that in a double failure situation (both the Primary Pgpool node and the Primary Database node are in failure), both of these Primary processes might end up on the same node. | + +This architecture: + +- Achieves high availability by providing two Standbys that can be promoted in case of a Primary Postgres node failure. +- Achieves high availability by providing at least three Pgpool processes in a watchdog configuration. +- Increases performance with mixed and read-intensive workloads by introducing increased read scalability with more than one Standby for load balancing. +- Reduces load on the Primary database node by redirecting read-only traffic with the Primary pgpool node. +- Prevents resource contention between Pgpool and Postgres on the Primary Database node. By not running Pgpool on the Primary database node, the Primary Postgres process can utilize as much resources as possible. +- Prevents resource contention between pgpool and Postgres on the Primary Pgpool node. By not running Standby databases on the Primary Pgpool node, Pgpool can utilize as many resources as possible. +- Optionally, synchronous replication can be set up to achieve near-zero data loss in a failure event. + +!!! Note + The architecture also allows us to completely separate 3 virtual machines running Postgres from 3 virtual machines running Pgpool. This kind of setup requires 2 extra virtual machines, but it is a better choice if you want to prevent resource contention between Pgpool and Postgres in Failover scenarios. In this setup, the architecture can run without an extra 7th node running the EFM Witness Process. To increase failure resolution efm witness agents could be deployed on the Pgpool servers. + +![Deployment of EFM and Pgpool on separate virtual machines](images/edb_ha_architecture_separate_VM.png) diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx new file mode 100644 index 00000000000..8487a402cf1 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx @@ -0,0 +1,161 @@ +--- +title: "Implementing High Availability with Pgpool" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/components_ha_pgpool.html" +--- + +Failover Manager monitors the health of Postgres nodes; in the event of a database failure, Failover Manager performs an automatic failover to a Standby node. Note that Pgpool does not monitor the health of backend nodes and will not perform failover to any Standby nodes. + +## Configuring Failover Manager + +Failover Manager provides functionality that will remove failed database nodes from Pgpool load balancing; it can also re-attach nodes to Pgpool when returned to the Failover Manager cluster. To configure EFM for high availability using Pgpool, you must set the following properties in the cluster properties file: + +pgpool.enable =<true/false> + +'pcp.user' = <User that would be invoking PCP commands> + +'pcp.host' = <Virtual IP that would be used by pgpool. Same as pgpool parameter 'delegate_IP’> + +'pcp.port' = <The port on which pgpool listens for pcp commands> + +'pcp.pass.file' = <Absolute path of PCPPASSFILE> + +'pgpool.bin' = <Absolute path of pgpool bin directory> + +## Configuring Pgpool + +The section lists the configuration of some important parameters in the `pgpool.conf` file to integrate the Pgpool-II with EFM. + +**Backend node setting** + +There are three PostgreSQL backend nodes, one Primary and two Standby nodes. Configure using `backend_*` configuration parameters in `pgpool.conf`, and use the equal backend weights for all nodes. This will make the read queries to be distributed equally among all nodes. + +```text +backend_hostname0 = ‘server1_IP' +backend_port0 = 5444 +backend_weight0 = 1 +backend_flag0 = 'DISALLOW_TO_FAILOVER' + +backend_hostname1 = ‘server2_IP' +backend_port1 = 5444 +backend_weight1 = 1 +backend_flag1 = 'DISALLOW_TO_FAILOVER' + +backend_hostname2 = ‘server3_IP' +backend_port2 = 5444 +backend_weight2 = 1 +backend_flag2 = 'DISALLOW_TO_FAILOVER' +``` + +**Enable Load-balancing and streaming replication mode** + +Set the following configuration parameter in the `pgpool.conf` file to enable load balancing and streaming replication mode + +```text +master_slave_mode = on +master_slave_sub_mode = 'stream' +load_balance_mode = on +``` + +**Disable health-checking and failover** + +Health-checking and failover must be handled by EFM and hence, these must be disabled on Pgpool-II side. To disable the health-check and failover on pgpool-II side, assign the following values: + +```text +health_check_period = 0 +fail_over_on_backend_error = off +failover_if_affected_tuples_mismatch = off +failover_command = ‘’ +failback_command = ‘’ +``` + +Ensure the following while setting up the values in the `pgpool.conf` file: + +- Keep the value of wd_priority in pgpool.conf different on each node. The node with the highest value gets the highest priority. +- The properties backend_hostname0 , backend_hostname1, backend_hostname2 and so on are shared properties (in EFM terms) and should hold the same value for all the nodes in pgpool.conf file. +- Update the correct interface value in *if\_* \* and arping cmd props in the pgpool.conf file. +- Add the properties heartbeat_destination0, heartbeat_destination1, heartbeat_destination2 etc. as per the number of nodes in pgpool.conf file on every node. Here heartbeat_destination0 should be the ip/hostname of the local node. + +**Setting up PCP** + +Script uses the PCP interface, So we need to set up the PCP and .PCPPASS file to allow PCP connections without password prompt. + +setup PCP: + +setup PCPPASS: + +Note that the load-balancing is turned on to ensure read scalability by distributing read traffic across the standby nodes + +The health checking and error-triggered backend failover have been turned off, as Failover Manager will be responsible for performing health checks and triggering failover. It is not advisable for Pgpool to perform health checking in this case, so as not to create a conflict with Failover Manager, or prematurely perform failover. + +Finally, `search_primary_node_timeout` has been set to a low value to ensure prompt recovery of Pgpool services upon an Failover Manager-triggered failover. + +## Virtual IP Addresses + +Both Pgpool-II and Failover Manager provide functionality to employ a virtual IP for seamless failover. While both provide this capability, the pgpool-II leader is the process that receives the Application connections through the Virtual IP. As in this design, such Virtual IP management is performed by the Pgpool-II watchdog system. EFM VIP has no beneficial effect in this design and it must be disabled. + +Note that in a failure situation of the active instance of Pgpool (The Primary Pgpool Server in our sample architecture), the next available Standby Pgpool instance (according to watchdog priority) will be activated and takes charge as the leader Pgpool instance. + +## Configuring Pgpool-II Watchdog + +Watchdog provides the high availability of Pgpool-II nodes. This section lists the configuration required for watchdog on each Pgpool-II node. + +**Common watchdog configurations on all Pgpool nodes** + +The following configuration parameters enable and configure the watchdog. The interval and retry values can be adjusted depending upon the requirements and testing results. + +```text +use_watchdog = on # enable watchdog +wd_port = 9000 # watchdog port, can be changed +delegate_IP = ‘Virtual IP address’ +wd_lifecheck_method = 'heartbeat' +wd_interval = 10 # we can lower this value for quick detection +wd_life_point = 3 +# virtual IP control +ifconfig_path = '/sbin' # ifconfig command path +if_up_cmd = 'ifconfig eth0:0 inet $_IP_$ netmask 255.255.255.0' + # startup delegate IP command +if_down_cmd = 'ifconfig eth0:0 down' # shutdown delegate IP command +arping_path = '/usr/sbin' # arping command path +``` + +!!! Note + Replace the value of eth0 with the network interface on your system. See [Chapter 5](05_appendix_b/#configuration-for-number-of-connections-and-pooling) for tuning the number of connections, and pooling configuration. + +**Watchdog configurations on server 2** + +```text +other_pgpool_hostname0 = 'server 3 IP/hostname' +other_pgpool_port0 = 9999 +other_wd_port0 = 9000 +other_pgpool_hostname1 = 'server 4 IP/hostname' +other_pgpool_port1 = 9999 +other_wd_port1 = 9000 +wd_priority = 1 +``` + +**Watchdog configurations on server 3** + +```text +other_pgpool_hostname0 = 'server 2 IP/hostname' +other_pgpool_port0 = 9999 +other_wd_port0 = 9000 +other_pgpool_hostname1 = 'server 4 IP/hostname' +other_pgpool_port1 = 9999 +other_wd_port1 = 9000 +wd_priority = 3 +``` + +**Watchdog configurations on server 4** + +```text +other_pgpool_hostname0 = 'server 2 IP/hostname' +other_pgpool_port0 = 9999 +other_wd_port0 = 9000 +other_pgpool_hostname1 = 'server 3 IP/hostname' +other_pgpool_port1 = 9999 +other_wd_port1 = 9000 +wd_priority = 5 # use high watchdog priority on server 4 +``` diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx new file mode 100644 index 00000000000..5e3d3bb11e3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx @@ -0,0 +1,70 @@ +--- +title: "EFM Pgpool Integration Using Azure Network Load Balancer" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/appendix_a.html" +--- + + + +This section describes a specific use case for EFM Pgpool integration, where the database, EFM, and Pgpool are installed on CentOS 8 Virtual Machines in Azure. For this specific use case, Azure Load Balancer (LNB) has been used to distribute the traffic amongst all the active Pgpool Instances instead of directing the traffic using Pgpool VIP. + +![Architecture diagram for EFM and Pgpool integration using Azure Load Balancer](images/EFM_PgPool_Azure.png) + +**Step 1 (Installation)**: + +Install and configure Advanced Server database, EFM, and Pgpool on Azure Virtual Machines as following: + +| **Systems** | **Components** | +| ----------- | ------------------------------------------------------------------------------ | +| Primary | Primary node running Advanced Server 13 and Failover Manager 4.1 | +| Standby 1 | Standby node running Advanced Server 13, Failover Manager 4.1, and Pgpool 4.1. | +| Standby 2 | Standby node running Advanced Server 13, Failover Manager 4.1, and Pgpool 4.1. | +| Witness | Witness node running Failover Manager 4.1 and Pgpool 4.1. | + +**Step 2 (Pgpool configuration)**: + +Configure Pgpool as per the steps given in chapter 3 (except for delegate_ip, which should be left empty in this architecture). + +**Step 3 (Azure Load Balancer configuration)**: + +You need to do the following configuration for using Azure NLB: + +**Networking**: You need to ensure the following settings for Network Load Balancer and for each of the virtual machines: Assign Public IP as well as private IP to the NLB, and only private IP to the virtual machines. The application server should connect to the NLB over public IP and NLB in turn should connect to the virtual machines over private IPs. + +In the current scenario, following are the IP addresses assigned to each component: + +- Public IP of NLB : 40.76.240.33 (pcp.host) +- Private IP of Primarydb : 172.16.1.3 (note that this is not part of the backend pool of the Load Balancer) +- Private IP of Standby 1 : 172.16.1.4 +- Private IP of Standby 2 : 172.16.1.5 +- Private IP of witness node: 172.16.1.6 + +Ensure that the ports required to run the database, EFM, and Pgpool are open for communication. Following is the list of default ports for each of these component (you can customize the ports for your environment): + +- Database: 5444 +- EFM: 7800 (bind.address) +- Pgpool: 9000, 9694, 9898, 9999 + +**Backend pool**: Create a Backend pool consisting of all the 3 virtual machines running Pgpool instances. Use the private IPs of the virtual machines to create the Backend pool. + +![Backend pool in Azure console](images/backend_pools.png) + +**Health Probe**: Add a health probe to check if the Pgpool instance is available on the virtual machines. The health probe periodically pings the virtual machines of the Backend pool on port 9999. If it does not receive any response from any of the virtual machines, it assumes that the Pgpool instance is not available and hence stops sending traffic towards that particular machine. + +![Health probes in Azure console](images/health_probes.png) + +**Load balancing rules**: Add two Load balancing rules - one each for port 9898 and port 9999. These rules should ensure that the network traffic coming towards that particular port gets distributed evenly among all the virtual machines present in the Backend pool. + +![Load balancing rules in Azure console](images/load_balancing_rules.png) + +1. Rule created for port 9999 (i.e. PCP port) + +![Load balancing rule for port 9999](images/rule_port_9898.png) + +1. Rule created for port 9999 (i.e. Pgpool port) + +![Load balancing rule for port 9999](images/rule_port_9999.png) + +After configuration of the above-mentioned setup, you can connect to Postgres on the IP address of the Network Load Balancer on port 9999. If a failure occurs on the Primary database server, EFM will promote a new Primary and then reconfigure Pgpool to redistribute traffic. If any one of the Pgpool processes is not available to accept traffic anymore, the Network Load Balancer will redistribute all the traffic to the remaining two Pgpool processes. Make sure that listen_backlog_multiplier is tuned to compensate for the higher number of connections in case of failover. diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx new file mode 100644 index 00000000000..042804c6718 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx @@ -0,0 +1,17 @@ +--- +title: "Configuration for Number of Connections and Pooling" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/appendix_b.html" +--- + +Pgpool has some configuration to tune the pooling and connection processing. Depending on this configuration, also the Postgres configuration for `max_connections` should be set to make sure all connections can be accepted as required. Furthermore, note that the Cloud Architecture works with active/active instances, which requires to spread `num_init_children` over all Pgpool instances (divide the normally used value by the number of active instances). The below text describes the effect of changing the configuration, and advises values for both the on-premise and the Cloud architecture. + +**max_pool**: Generally, it is advised to set `max_pool` to 1. Alternatively, for applications with a lot of reconnects, `max_pool` can be set to the number of distinct combinations of users, databases and connection options for the application connections. All but one connection in the pool would be stale connections, which consumes a connection slot from Postgres, without adding to performance. It is therefore advised not to configure `max_pool` beyond 4 to preserve a healthy ratio between active and stale connections. As an example, for an application which constantly reconnects and uses 2 distinct users both connecting to their own database, set it to 2. If both users would be able to connect to both databases set it to 4. Note that increasing `max_pool` requires to tune down `num_init_children` in Pgpool, or tune up `max_connections` in Postgres. + +**num_init_children**: It is advised to set `num_init_children` to the number of connections that could be running active in parallel, but the value should be divided by the number of active Pgpool-II instances (one with the on-premise architecture, and all instances for the cloud architecture). As an example: In an architecture with 3 Pgpool instances, to allow the application to have 100 active connections in parallel, set `num_init_children` to 100 for the on-premise architecture, and set `num_init_children` to 33 for the cloud architecture. Note that increasing `num_init_children` generally requires to tune up `max_connections` in Postgres. + +**listen_backlog_multiplier**: Can be set to multiply the number of open connections (as perceived by the application) with the number of active connections (`num_init_children`). As an example, when the application might open 500 connections of which 100 should be active in parallel, with the on-premise architecture, `num_init_children` should be set to 100, and `listen_backlog_multiplier` should be set to 4. This setup can process 100 connections active in parallel, and another 400 (`listen_backlog_multiplier*num_init_children`) connections will be queued before connections will be blocked. The application would perceive a total of 500 open connections, and Postgres would process the load of 100 connections maximum at all times. Note that increasing `listen_backlog_multiplier` only causes the application to perceive more connections, but will not increase the number of parallel active connections (which is determined by `num_init_children`). + +**max_connections**: It is advised to set `max_connections` in Postgres higher than `[number of active pgpool instances]*[max_pool]*[num_init_children] + [superuser_reserved_connections] (Postgres)`. As an example: in the on-premise setup with 3 instances active/passive, `max_pool` set to 2, `num_init_children` set to 100, and `superuser_reserved_connections (Postgres)` set to 5, Postgres `max_connections` should be set equal or higher then `[1*2*100+5]` which is 205 connections or higher. A similar setup in the cloud setup would run with 3 active instances, `max_pool` set to 2, `num_init_children` set to 33, and `superuser_reserved_connections (Postgres)` set to 5, in which case Postgres `max_connections` should be set equal or higher than `[3*2*33+5]` which is 203 or higher. Note that configuring below the advised setting can cause issues opening new connections, and in a combination with `max_pool` can cause unexpected behaviour (low or no active connections but still connection issues due to stale pooled connections using connection slots from Postgres. For more information on the relation between `num_init_children`, `max_pool` and `max_connections`, see this background information. diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png new file mode 100755 index 00000000000..9ec76139f63 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3f95f25c7493174f25102604b286ceb5116b7b41c15a0dc232c8fd852536de +size 13356 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png new file mode 100644 index 00000000000..5bde6798c07 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f96dc8dad8fb1514127e410dbe6bd668691a0138b731e150afb8b5cffb2f9e65 +size 38838 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png new file mode 100644 index 00000000000..927dbdbc997 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6674dda03b836ac7e5e06cb059a15650f966f3d816263a04ddbb7fba4ec74436 +size 147475 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png new file mode 100644 index 00000000000..cd42278ac4d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a08834d26e39190da4f533032ad9f78ec5f253c97167f504aee92da9ec9ce76 +size 35314 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png new file mode 100755 index 00000000000..547cbf01a6e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:950a1df9ad74895e52417a738a64014eed2203d7d98a1ee95c5aa86ba3078577 +size 116023 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png new file mode 100644 index 00000000000..826dfbabc8b --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c7ad7caf3ea611ac0d56dbdfdc3c67513863e0efd1b88dec306a77caa8d127c +size 39576 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg new file mode 100755 index 00000000000..74babf2f8da --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg @@ -0,0 +1,56 @@ + + + + +logo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png new file mode 100755 index 00000000000..0a3389950c6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5784411bf1d038252baba457c643c00d59a9ea67d3eaaab73b04b8025a62249 +size 87850 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png new file mode 100644 index 00000000000..d68d6e41fd9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16026cb626476565b516885fd5dadc3dbceb933964d0189bb22a992cb4de8229 +size 114669 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png new file mode 100644 index 00000000000..081db02c30e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f26fa44740e64aed35044635b87629b4561f083dd6ce950a88ba6a38c3008daa +size 138639 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png new file mode 100644 index 00000000000..290825aeeb3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593aa7ddebe937d7fb837b4784658abfa1733389cd09873a150b5ea66778a2d4 +size 118143 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png new file mode 100644 index 00000000000..8d19389dd7a --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:738e8fad910a66ce32c087cd410c6b9b06a7eff0b9388bc553b021b08f085301 +size 117221 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx new file mode 100644 index 00000000000..645be9cabc6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx @@ -0,0 +1,18 @@ +--- +title: "EDB Postgres High Availability & Horizontal Read Scaling Architecture" + +#legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/conclusion.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/index.html" +--- + +Since high-availability and read scalability are not part of the core feature set of EDB Postgres Advanced Server, Advanced Server relies on external tools to provide this functionality. This document focuses on the functionality provided by EDB Failover Manager and Pgpool-II, and discusses the implications of a high-availability architecture formed around these tools. + +
+ +introduction architecture components_ha_pgpool appendix_a appendix_b conclusion + +
diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg new file mode 100755 index 00000000000..74babf2f8da --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg @@ -0,0 +1,56 @@ + + + + +logo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png b/product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png new file mode 100755 index 00000000000..0a3389950c6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5784411bf1d038252baba457c643c00d59a9ea67d3eaaab73b04b8025a62249 +size 87850 diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png b/product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_quick_start/index.mdx b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx new file mode 100644 index 00000000000..f196c7ac5c3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx @@ -0,0 +1,146 @@ +--- +title: "Creating a Failover Manager Cluster" + +#legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/index.html" +--- + +EDB Postgres Failover Manager (Failover Manager) is a high-availability module from EnterpriseDB that enables a Postgres Primary node to automatically failover to a Standby node in the event of a software or hardware failure on the Primary. + +This quick start guide describes configuring a Failover Manager cluster in a test environment. You should read and understand the [EDB Failover Manager User's Guide](/efm/latest/) before configuring Failover Manager for a production deployment. + +You must perform some basic installation and configuration steps before performing this tutorial: + +- You must install and initialize a database server on one primary and one or two standby nodes; for information about installing Advanced Server, visit: + + [https://www.enterprisedb.com/docs/p/edb-postgres-advanced-server](/epas/latest/) + +- Postgres streaming replication must be configured and running between the primary and standby nodes. For detailed information about configuring streaming replication, visit: + + . + +- You must also install Failover Manager on each primary and standby node. During Advanced Server installation, you configured an EnterpriseDB repository on each database host. You can use the EnterpriseDB repository and the `yum install` command to install Failover Manager on each node of the cluster: + + ```text + yum install edb-efm41 + ``` + +During the installation process, the installer will create a user named `efm` that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by `enterprisedb` or `postgres`. The example that follows creates a cluster named `efm`. + +Start the configuration process on a primary or standby node. Then, copy the configuration files to other nodes to save time. + +**Step 1: Create Working Configuration Files** + +Copy the provided sample files to create EFM configuration files, and correct the ownership: + +```text +cd /etc/edb/efm-4.0 + +cp efm.properties.in efm.properties + +cp efm.nodes.in efm.nodes + +chown efm:efm efm.properties + +chown efm:efm efm.nodes +``` + +**Step 2: Create an Encrypted Password** + +Create the [encrypted password](/efm/latest/efm_user/04_configuring_efm/02_encrypting_database_password/) needed for the properties file: + +```text +/usr/edb/efm-4.0/bin/efm encrypt efm +``` + +Follow the onscreen instructions to produce the encrypted version of your database password. + +**Step 3: Update the efm.properties File** + +The `.properties` file (efm.properties file in this example) contains parameters that specify connection properties and behaviors for your Failover Manager cluster. Modifications to property settings are applied when Failover Manager starts. + +The properties mentioned in this tutorial are the minimal properties required to configure a Failover Manager cluster. If you are configuring a production system, please review the *EDB Failover Manager Guide* for detailed information about Failover Manager options. + +Provide values for the following properties on all cluster nodes: + +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `db.user` | The name of the database user. | +| `db.password.encrypted` | The encrypted password of the database user. | +| `db.port` | The port monitored by the database. | +| `db.database` | The name of the database. | +| `db.service.owner` | The owner of the `data` directory (usually `postgres` or `enterprisedb`). Required only if the database is running as a service. | +| `db.service.name` | The name of the database service (used to restart the server). Required only if the database is running as a service. | +| `db.bin` | The path to the `bin` directory (used for calls to `pg_ctl`). | +| `db.recovery.dir` | The `data` directory in which EFM will find or create the `recovery.conf` file or the `standby.signal` file. | +| `user.email` | An email address at which to receive email notifications (notification text is also in the agent log file). | +| `bind.address` | The local address of the node and the port to use for EFM. The format is: `bind.address=1.2.3.4:7800` | +| `is.witness` | `true` on a witness node and `false` if it is a primary or standby. | +| `ping.server.ip` | If you are running on a network without Internet access, set `ping.server.ip` to an address that is available on your network. | +| `auto.allow.hosts` | On a test cluster, set to `true` to simplify startup; for production usage, consult the user's guide. | +| `stable.nodes.file` | On a test cluster, set to `true` to simplify startup; for production usage, consult the user's guide. | + +**Step 4: Update the efm.nodes File** + +The `.nodes` file (efm.nodes file in this example) is read at startup to tell an agent how to find the rest of the cluster or, in the case of the first node started, can be used to simplify authorization of subsequent nodes. Add the addresses and ports of each node in the cluster to this file. One node will act as the membership coordinator; the list should include at least the membership coordinator's address. For example: + + `1.2.3.4:7800` + + `1.2.3.5:7800` + + `1.2.3.6:7800` + +Please note that the Failover Manager agent will not verify the content of the `efm.nodes` file; the agent expects that some of the addresses in the file cannot be reached (e.g. that another agent hasn’t been started yet). + +**Step 5: Configure the Other Nodes** + +Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-4.0` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: + +- Modify the `bind.address` property to use the node’s local address. +- Set `is.witness` to `true` if the node is a witness node. If the node is a witness node, the properties relating to a local database installation will be ignored. + +**Step 6: Start the EFM Cluster** + +On any node, start the Failover Manager agent. The agent is named `edb-efm-4.0`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or CentOS/RHEL 8.x host use the command: + +```text +systemctl start edb-efm-4.0 +``` + +On a a CentOS or RHEL 6.x host use the command: + +```text +service edb-efm-4.0 start +``` + +After the agent starts, run the following command to see the status of the single-node cluster. You should see the addresses of the other nodes in the `Allowed node host` list. + +```text +/usr/edb/efm-4.0/bin/efm cluster-status efm +``` + +Start the agent on the other nodes. Run the `efm cluster-status efm` command on any node to see the cluster status. + +If any agent fails to start, see the startup log for information about what went wrong: + +```text +cat /var/log/efm-4.0/startup-efm.log +``` + +**Performing a Switchover** + +If the cluster status output shows that the primary and standby(s) are in sync, you can perform a switchover with the following command: + +```text +/usr/edb/efm-4.0/bin/efm promote efm -switchover +``` + +The command will promote a standby and reconfigure the primary database as a new standby in the cluster. To switch back, run the command again. + +For quick access to online help, you can invoke the following command: + +```text +/usr/edb/efm-4.0/bin/efm --help +``` diff --git a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx new file mode 100644 index 00000000000..33bc9b3f8f5 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx @@ -0,0 +1,17 @@ +--- +title: "What’s New" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/whats_new.html" +--- + + + +The following changes have been made to EDB Postgres Failover Manager to create version 4.0: + +- Encryption for database password has been improved. Encryption has also been enabled for communication between the agents. +- Standby servers are no longer stopped while selecting the new primary. This enhancement significantly speeds up the promotion process. +- To be consistent with community naming guidelines, the term Master has been replaced with Primary in the Failover Manager product and documentation. The upgrade-conf tool will handle the task of renaming the impacted properties post-upgrade. The load balancer scripts such as `script.load.balancer.attach`, `script.load. balancer.detach` will now accept character `p` instead of character `m` as an argument. +- Support has been added to delay the restart of standbys after a promotion. You can increase the availability by staggering the restart of standbys. +- A primary agent now attempts to resume health monitoring in a situation where the agent can not reach its local database but other agents can. diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx new file mode 100644 index 00000000000..dff775c99ff --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx @@ -0,0 +1,102 @@ +--- +title: "Prerequisites" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/prerequisites.html" +--- + + + +Before configuring a Failover Manager cluster, you must satisfy the prerequisites described below. + +**Install Java 1.8 (or later)** + +Before using Failover Manager, you must first install Java (version 1.8 or later). Failover Manager is tested with OpenJDK, and we strongly recommend installing that version of Java. [Installation instructions for Java](https://openjdk.java.net/install/) are platform specific. + +**Provide an SMTP Server** + +You can receive notifications from Failover Manager as specified by a user-defined notification script, by email, or both. + +- If you are using email notifications, an SMTP server must be running on each node of the Failover Manager scenario. +- If you provide a value in the script.notification property, you can leave the user.email field blank; an SMTP server is not required. + +If an event occurs, Failover Manager invokes the script (if provided), and can also send a notification email to any email addresses specified in the user.email parameter of the cluster properties file. For more information about using an SMTP server, visit: + +[https://access.redhat.com/site/documentation](https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/s1-email-mta.html) + +**Configure Streaming Replication** + +Failover Manager requires that PostgreSQL streaming replication be configured between the Primary node and the Standby node or nodes. Failover Manager does not support other types of replication. + +On database versions 11 (or prior), unless specified with the `-sourcenode` option, a `recovery.conf` file is copied from a random standby node to the stopped primary during switchover. You should ensure that the paths within the `recovery.conf` file on your standby nodes are consistent before performing a switchover. For more information about the `-sourcenode` option, please see [Promoting a Failover Manager Node](../05_using_efm/#promote_node). + +On database version 12 or later, the `primary_conninfo` and `restore_command` properties are copied from a random standby node to the stopped primary during switchover (unless otherwise specified with the `-sourcenode` option). + +**Modify the pg_hba.conf File** + +You must modify the `pg_hba.conf` file on the Primary and Standby nodes, adding entries that allow communication between the all of the nodes in the cluster. The following example demonstrates entries that might be made to the pg_hba.conf file on the Primary node: + +```text +# access for itself +host fmdb efm 127.0.0.1/32 md5 +# access for standby +host fmdb efm 192.168.27.1/32 md5 +# access for witness +host fmdb efm 192.168.27.34/32 md5 +``` + +Where: + + `efm` specifies the name of a valid database user. + + `fmdb` specifies the name of a database to which the efm user may connect. + +By default, the `pg_hba.conf` file resides in the `data` directory, under your Postgres installation. After modifying the `pg_hba.conf` file, you must reload the configuration file on each node for the changes to take effect. You can use the following command: + + `# systemctl reload edb-as-x` + +Where `x` specifies the Postgres version. + +**Using Autostart for the Database Servers** + +If a Primary node reboots, Failover Manager may detect the database is down on the Primary node and promote a Standby node to the role of Primary. If this happens, the Failover Manager agent on the (rebooted) Primary node will not get a chance to write the `recovery.conf` file; the `recovery.conf` file prevents the database server from starting. If this happens, the rebooted Primary node will return to the cluster as a second Primary node. + +To prevent this, ensure that the Failover Manager agent auto starts before the database server. The agent will start in idle mode, and check to see if there is already a primary in the cluster. If there is a primary node, the agent will verify that a `recovery.conf` or `standby.signal` file exists, and the database will not start as a second primary. + +**Ensure Communication Through Firewalls** + +If a Linux firewall (i.e. iptables) is enabled on the host of a Failover Manager node, you may need to add rules to the firewall configuration that allow tcp communication between the Failover Manager processes in the cluster. For example: + +```text +# iptables -I INPUT -p tcp --dport 7800 -j ACCEPT +/sbin/service iptables save +``` + +The command shown above opens the port 7800. Failover Manager will connect via the port that corresponds to the port specified in the cluster properties file. + +**Ensure that the Database user has Sufficient Privileges** + +The database user specified by the `db.user` property in the `efm.properties` file must have sufficient privileges to invoke the following functions on behalf of Failover Manager: + + `pg_current_wal_lsn()` + + `pg_last_wal_replay_lsn()` + + `pg_wal_replay_resume()` + + `pg_wal_replay_pause()` + + `pg_reload_conf()` + +The `pg_reload_conf()` privilege is required only if you have the `reconfigure.num.sync` or `reconfigure.sync.primary` property set to `true`. + +For detailed information about each of these functions, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/index.html). + +The user must also have permissions to read the values of configuration variables; a database superuser can use the PostgreSQL `GRANT` command to provide the permissions needed: + +```text +GRANT pg_read_all_settings TO user_name; +``` + +For more information about `pg_read_all_settings`, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/default-roles.html). diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx new file mode 100644 index 00000000000..fac4ec73d30 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx @@ -0,0 +1,37 @@ +--- +title: "Failover Manager Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/failover_manager_overview.html" +--- + + + +An EDB Postgres Failover Manager (EFM) cluster is comprised of Failover Manager processes that reside on the following hosts on a network: + +- A Primary node - The Primary node is the primary database server that is servicing database clients. +- One or more Standby nodes - A Standby node is a streaming replication server associated with the Primary node. +- A Witness node - The Witness node confirms assertions of either the Primary or a Standby in a failover scenario. A cluster does not need a dedicated witness node if the cluster contains three or more nodes. If you do not have a third cluster member that is a database host, you can add a dedicated Witness node. A cluster may include more than one witness node. + +Traditionally, a *cluster* is a single instance of Postgres managing multiple databases. In this document, the term cluster refers to a Failover Manager cluster. A Failover Manager cluster consists of a Primary agent, one or more Standby agents, and an optional Witness agent that reside on servers in a cloud or on a traditional network and communicate using the JGroups toolkit. + +![An EFM scenario employing a Virtual IP address.](../images/failover_manager_overview.png) + +When a non-witness agent starts, it connects to the local database and checks the state of the database: + +- If the agent cannot reach the database, it will start in idle mode. +- If it finds that the database is in recovery, the agent assumes the role of standby. +- If the database is not in recovery, the agent assumes the role of primary. + +In the event of a failover, Failover Manager attempts to ensure that the promoted standby is the most up-to-date standby in the cluster; please note that data loss is possible if the standby node is not in sync with the primary node. + +[JGroups](http://www.jgroups.org/) provides technology that allows Failover Manager to create clusters whose member nodes can communicate with each other and detect node failures. + +The figure shown above illustrates a Failover Manager cluster that employs a virtual IP address. You can use a load balancer in place of a [virtual IP address](../04_configuring_efm/05_using_vip_addresses/#using_vip_addresses) if you provide your own [script](../04_configuring_efm/01_cluster_properties/#cluster_properties) to re-configure the load balancer whenever databases are added or removed. You can also choose to enable native EFM-Pgpool integration for high availability. + +
+ +prerequisites + +
diff --git a/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx new file mode 100644 index 00000000000..add844dddfe --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx @@ -0,0 +1,315 @@ +--- +title: "Installing Failover Manager" +legacyRedirects: + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_debian_ubuntu.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_sles.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/post_install_tasks.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_rhel_centos_oel.html" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_efm.html" +--- + + + + + +To request credentials that allow you to access an EnterpriseDB repository, visit the EDB website at: + + + +## RedHat or CentOS Host + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter y, and press Return to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + +Failover Manager must be installed by root. During the installation process, the installer will also create a user named efm that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. + +After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster, and then modify the file to enable access. The following steps provide detailed information about accessing the EnterpriseDB repository; the steps must be performed on each node of the cluster. + +### RHEL or CentOS 7 PPCLE Host + +1. Use the following command to create a configuration file and install Advance Toolchain: + + ```text + rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b + + cat > /etc/yum.repos.d/advance-toolchain.repo <:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` + +4. Install the EPEL repository: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +5. On RHEL 7 PPCLE, enable the additional repositories to resolve EPEL dependencies: + + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` + +6. Install the selected package: + + ```text + yum -y install edb-efm40 + ``` + +### RHEL or CentOS 7 Host + +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: + + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` + +3. Install the EPEL repository: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +4. On RHEL 7, enable the additional repositories to resolve dependencies: + + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` + +5. Install the selected package: + + ```text + yum -y install edb-efm40 + ``` + +### RHEL or CentOS 8 Host + +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: + + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` + +3. Install the EPEL repository: + +- On CentOS 8 + ```text + dnf -y install epel-release + ``` +- On RHEL 8 + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +4. Enable the additional repositories to resolve dependencies: + +- On CentOS 8 + ```text + dnf config-manager --set-enabled PowerTools + ``` +- On RHEL 8 + + ```text + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" + ``` + +5. Disable the built-in PostgreSQL module: + + ```text + dnf -qy module disable postgresql + ``` +6. Install the selected package: + ```text + dnf -y install edb-efm40 + ``` + +## Debian or Ubuntu Host + +To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, visit the EnterpriseDB website at: + + + +The following steps will walk you through using the EnterpriseDB apt repository to install Failover Manager. + +### Debian Host + +1. Assume superuser privileges: + ```text + sudo su – + ``` +2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: + +- On Debian 9 + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` +- On Debian 10 + + ```text + sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + + sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` + +3. Add support to your system for secure APT repositories: + ```text + apt-get install apt-transport-https + ``` +4. Add the EDB signing key: + ```text + wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` +5. Update the repository meta data: + ```text + apt-get update + ``` +6. Install Failover Manager: + ```text + apt-get -y install edb-efm40 + ``` + +### Ububtu Host + +1. Assume superuser privileges: + ```text + sudo su – + ``` +2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: + +- On Ubuntu 18.04 + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` +- On Ubuntu 20.4 + + ```text + sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + + sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` + +3. Add support to your system for secure APT repositories: + ```text + apt-get install apt-transport-https + ``` +4. Add the EDB signing key: + ```text + wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` +5. Update the repository meta data: + ```text + apt-get update + ``` +6. Install Failover Manager: + ```text + apt-get -y install edb-efm40 + ``` + +## SLES Host + +To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, visit the EnterpriseDB website at: + + + +You can use the zypper package manager to install a Failover Manager agent on an SLES 12 host. zypper will attempt to satisfy package dependencies as it installs a package, but requires access to specific repositories that are not hosted at EnterpriseDB. + +1. You must assume superuser privileges and stop any firewalls before installing Failover Manager. Then, use the following commands to add EnterpriseDB repositories to your system: + + ```text + zypper addrepo https://zypp.enterprisedb.com/suse/edb-sles.repo + ``` + +2. The commands create the repository configuration files in the /etc/zypp/repos.d directory. Then, use the following command to refresh the metadata on your SLES host to include the EnterpriseDB repository: + + ```text + zypper refresh + ``` + + When prompted, provide credentials for the repository, and specify a to always trust the provided key, and update the metadata to include the EnterpriseDB repository. + +3. You must also add SUSEConnect and the SUSE Package Hub extension to the SLES host, and register the host with SUSE, allowing access to SUSE repositories. Use the commands: + + ```text + zypper install SUSEConnect + SUSEConnect -r -e + SUSEConnect -p PackageHub/12.4/x86_64 + SUSEConnect -p sle-sdk/12.4/x86_64 + ``` + +4. Install SUSEConnect to register the host with SUSE, allowing access to SUSE repositories: + + ```text + zypper addrepo https://download.opensuse.org/repositories/Apache:/Modules/SLE_12_SP4/Apache:Modules.repo + ``` + +5. Install OpenJDK (version 1.8) for Java based components: + + ```text + zypper -n install java-1_8_0-openjdk + ``` + +6. Now you can use the zypper utility to install a Failover Manager agent: + + ```text + zypper -n install edb-efm40 + ``` + +## Performing post-installation tasks + +If you are using Failover Manager to monitor a cluster owned by a user other than `enterprisedb` or `postgres`, see [Extending Failover Manager Permissions](04_configuring_efm/04_extending_efm_permissions/#extending_efm_permissions) . + +After installing on each node of the cluster, you must: + +1. Modify the [cluster properties file](04_configuring_efm/01_cluster_properties/#cluster_properties) on each node. +2. Modify the [cluster members file](04_configuring_efm/03_cluster_members/#cluster_members) on each node. +3. If applicable, configure and test virtual IP address settings and any scripts that are identified in the cluster properties file. +4. Start the agent on each node of the cluster. For more information about controlling the service, see [Section 5](08_controlling_efm_service/#controlling-the-failover-manager-service). + +### Installation Locations + +components are installed in the following locations: + +| Component | Location | +| --------------------------------- | --------------------------- | +| Executables | /usr/edb/efm-4.0/bin | +| Libraries | /usr/edb/efm-4.0/lib | +| Cluster configuration files | /etc/edb/efm-4.0 | +| Logs | /var/log/efm- 4.0 | +| Lock files | /var/lock/efm-4.0 | +| Log rotation file | /etc/logrotate.d/efm-4.0 | +| sudo configuration file | /etc/sudoers.d/efm-40 | +| Binary to access VIP without sudo | /usr/edb/efm-4.0/bin/secure | diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx new file mode 100644 index 00000000000..77f0a39f8c9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx @@ -0,0 +1,77 @@ +--- +title: "Encrypting Your Database Password" +--- + + + +Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-4.0/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. + +To encrypt a password, use the command: + +```text +# efm encrypt [ --from-env ] +``` + +Where `` specifies the name of the Failover Manager cluster. + +If you include the `--from-env` option, you must export the value you wish to encrypt before invoking the encryption utility. For example: + +```text +export EFMPASS=password +``` + +If you do not include the `--from-env` option, Failover Manager will prompt you to enter the database password twice before generating an encrypted password for you to place in your cluster property file. When the utility shares the encrypted password, copy and paste the encrypted password into the cluster property files. + +!!! Note + Many Java vendors ship their version of Java with full-strength encryption included, but not enabled due to export restrictions. If you encounter an error that refers to an illegal key size when attempting to encrypt the database password, you should download and enable a Java Cryptography Extension (JCE) that provides an unlimited policy for your platform. + +The following example demonstrates using the encrypt utility to encrypt a password for the `acctg` cluster: + +```text +# efm encrypt acctg +This utility will generate an encrypted password for you to place in + your EFM cluster property file: +/etc/edb/efm-4.0/acctg.properties +Please enter the password and hit enter: +Please enter the password again to confirm: +The encrypted password is: 516b36fb8031da17cfbc010f7d09359c +Please paste this into your acctg.properties file +db.password.encrypted=516b36fb8031da17cfbc010f7d09359c +``` + +!!! Note + The utility will notify you if a properties file does not exist. + +After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: + +```text +[witness@localhost ~]# systemctl start edb-efm-4.0 +Job for edb-efm-4.0.service failed because the control process exited with error code. See "systemctl status edb-efm-4.0.service" and "journalctl -xe" for details. +``` + +If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-4.0/startup-efm.log`) for more information. + +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, startup information is also available with the following command: + +```text +systemctl status edb-efm-4.0 +``` + +To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. + +**Using the EFMPASS Environment Variable** + +The following example demonstrates using the --from-env environment variable when encrypting a password. Before invoking the `efm encrypt` command, set the value of `EFMPASS` to the password (`1safepassword`): + +```text +# export EFMPASS=1safepassword +``` + +Then, invoke `efm encrypt`, specifying the `--from-env` option: + +```text +# efm encrypt acctg --from-env +# 7ceecd8965fa7a5c330eaa9e43696f83 +``` + +The encrypted password (`7ceecd8965fa7a5c330eaa9e43696f83`) is returned as a text value; when using a script, you can check the exit code of the command to confirm that the command succeeded. A successful execution returns `0`. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx new file mode 100644 index 00000000000..a447ad897d6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx @@ -0,0 +1,1186 @@ +--- +title: "The Cluster Properties File" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/cluster_properties.html" +--- + + + +Each node in a Failover Manager cluster has a properties file (by default, named `efm.properties`) that contains the properties of the individual node on which it resides. The Failover Manager installer creates a file template for the properties file named `efm.properties.in` in the `/etc/edb/efm-4.0` directory. + +After completing the Failover Manager installation, you must make a working copy of the template before modifying the file contents: + +```text +# cp /etc/edb/efm-4.0/efm.properties.in /etc/edb/efm-4.0/efm.properties +``` + +After copying the template file, change the owner of the file to `efm`: + +```text +# chown efm:efm efm.properties +``` + +!!! Note + By default, Failover Manager expects the cluster properties file to be named `efm.properties`. If you name the properties file something other than `efm.properties`, you must modify the service script or unit file to instruct Failover Manager to use a different name. + +After creating the cluster properties file, add (or modify) configuration parameter values as required. For detailed information about each property, see [Specifying Cluster Properties](#specifying-cluster-properties). + +The property files are owned by `root`. The Failover Manager service script expects to find the files in the `/etc/edb/efm-4.0 directory`. If you move the property file to another location, you must create a symbolic link that specifies the new location. + +!!! Note + All user scripts referenced in the properties file will be invoked as the Failover Manager user. + + + +## Specifying Cluster Properties + +You can use the properties listed in the cluster properties file to specify connection properties and behaviors for your Failover Manager cluster. Modifications to property settings will be applied when Failover Manager starts. If you modify a property value you must restart Failover Manager to apply the changes. + +Property values are case-sensitive. Note that while Postgres uses quoted strings in parameter values, Failover Manager does not allow quoted strings in property values. For example, while you might specify an IP address in a Postgres configuration parameter as: + +> `listen_addresses='192.168.2.47'` + +Failover Manager requires that the value *not* be enclosed in quotes: + +> `bind.address=192.168.2.54:7800` + +Use the properties in the `efm.properties` file to specify connection, administrative, and operational details for Failover Manager. + +**Legends**: In the following table: + +- `A`: Required on Primary or Standby node +- `W`: Required on Witness node +- `Y` : Yes + +| **Property Name** | **A** | **W** | **Default Value** | **Comments** | +| ------------------------------------------------------------- | ----- | ----- | ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [db.user](#db_user) | Y | Y | | Username for the database | +| [db.password.encrypted](#db_password_encrypted) | Y | Y | | Password encrypted using 'efm encrypt' | +| [db.port](#db_port) | Y | Y | | This value must be same for all the agents | +| [db.database](#db_database) | Y | Y | | Database name | +| [db.service.owner](#db_service_owner) | Y | | | Owner of $PGDATA dir for db.database | +| [db.service.name](#db_service_name) | | | | Required if running the database as a service | +| [db.bin](#db_bin) | Y | | | Directory containing the pg_controldata/pg_ctl commands such as '/usr/edb/as12/bin' | +| [db.data.dir](#db_data_dir) | Y | | | Same as the output of query 'show data_directory;' | +| [db.config.dir](#db_config_dir) | | | | Same as the output of query 'show config_file;'. Should be specified if it is not same as *db.data.dir* | +| [jdbc.sslmode](#jdbc_sslmode) | Y | Y | disable | See the [note](#jdbc_note) | +| [user.email](#user_email) | | | | This value must be same for all the agents; can be left blank if using a notification script | +| [from.email](#from_email) | | | [efm@localhost](mailto:efm@localhost) | Leave blank to use the default [efm@localhost](mailto:efm@localhost) | +| [notification.level](#notification_level) | Y | Y | INFO | See the [list of notifications](../../10_notifications/#notifications) | +| [notification.text.prefix](#notification_text_prefix) | | | | | +| [script.notification](#script_notification) | | | | Required if user.email property is not used; both parameters can be used together | +| [bind.address](#bind_address) | Y | Y | | Example: <ip_address>:<port> | +| [external.address](#external_address) | | | | Example: <ip_address/hostname> | +| [admin.port](#admin_port) | Y | Y | 7809 | Modify if the default port is already in use | +| [is.witness](#is_witness) | Y | Y | | See description | +| [local.period](#local_period) | Y | | 10 | | +| [local.timeout](#local_timeout) | Y | | 60 | | +| [local.timeout.final](#local_timeout_final) | Y | | 10 | | +| [remote.timeout](#remote_timeout) | Y | Y | 10 | | +| [node.timeout](#node_timeout) | Y | Y | 50 | This value must be same for all the agents | +| [encrypt.agent.messages](#encrypt_agent_messages) | Y | Y | false | This value must be same for all the agents | +| [stop.isolated.primary](#stop_isolated_primary) | Y | | true | | +| [stop.failed.primary](#stop_failed_primary) | Y | | true | | +| [primary.shutdown.as.failure](#primary_shutdown_as_failure) | Y | Y | false | | +| [update.physical.slots.period](#update_physical_slots_period) | Y | | 0 | | +| [ping.server.ip](#ping_server_ip) | Y | Y | 8.8.8.8 | | +| [ping.server.command](#ping_server_command) | Y | Y | /bin/ping -q -c3 -w5 | | +| [auto.allow.hosts](#auto_allow_hosts) | Y | Y | false | | +| [stable.nodes.file](#stable_nodes_file) | Y | Y | false | | +| [db.reuse.connection.count](#db_reuse_connection_count) | Y | | 0 | | +| [auto.failover](#auto_failover) | Y | Y | true | | +| [auto.reconfigure](#auto_reconfigure) | Y | | true | This value must be same for all the agents | +| [promotable](#promotable) | Y | | true | | +| [use.replay.tiebreaker](#use_replay_tiebreaker) | Y | Y | true | This value must be same for all the agents | +| [standby.restart.delay](#standby_restart_delay) | | | 0 | | +| [application.name](#application_name) | | | | Set to replace the application_name portion of the primary_conninfo entry with this property value before starting the original primary database as a standby. | +| [restore.command](#restore_command) | | | | Example: restore.command=scp <db_service_owner>@%h: <archive_path>/%f %p | +| [reconfigure.num.sync](#reconfigure_num_sync) | Y | | false | | +| [reconfigure.sync.primary](#reconfigure_sync_primary) | Y | | false | | +| [minimum.standbys](#minimum_standbys) | Y | Y | 0 | This value must be same for all the nodes | +| [recovery.check.period](#recovery_check_period) | Y | | 1 | | +| [restart.connection.timeout](#restart_connection_timeout) | | | 60 | | +| [auto.resume.period](#auto_resume_period) | Y | | 0 | | +| [virtual.ip](#virtual_ip) | | | (see virtual.ip.single) | Leave blank if you do not specify a VIP | +| [virtual.ip.interface](#virtual_ip) | | | | Required if you specify a VIP | +| [virtual.ip.prefix](#virtual_ip) | | | | Required if you specify a VIP | +| [virtual.ip.single](#virtual_ip) | Y | Y | Yes | This value must be same for all the nodes | +| [check.vip.before.promotion](#check_vip_before_promotion) | Y | Y | Yes | | +| [script.load.balancer.attach](#script_load_balancer) | | | | Example: script.load.balancer.attach= /<path>/<attach_script> %h %t | +| [script.load.balancer.detach](#script_load_balancer) | | | | Example: script.load.balancer.detach= /<path>/<detach_script> %h %t | +| [script.fence](#script_fence) | | | | Example: script.fence= /<path>/<script_name> %p %f | +| [script.post.promotion](#script_post_promotion) | | | | Example: script.post.promotion= /<path>/<script_name> %f %p | +| [script.resumed](#script_resumed) | | | | Example: script.resumed= /<path>/<script_name> | +| [script.db.failure](#script_db_failure) | | | | Example: script.db.failure= /<path>/<script_name> | +| [script.primary.isolated](#script_primary_isolated) | | | | Example: script.primary.isolated= /<path>/<script_name> | +| [script.remote.pre.promotion](#script_remote_pre_promotion) | | | | Example: script.remote.pre.promotion= /<path>/<script_name> %p | +| [script.remote.post.promotion](#script_remote_post_promotion) | | | | Example: script.remote.post.promotion= /<path>/<script_name> %p | +| [script.custom.monitor](#script_custom_monitor) | | | | Example: script.custom.monitor= /<path>/<script_name> | +| [custom.monitor.interval](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | +| [custom.monitor.timeout](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | +| [custom.monitor.safe.mode](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | +| [sudo.command](#sudo_command) | Y | Y | sudo | | +| [sudo.user.command](#sudo_command) | Y | Y | sudo -u %u | | +| [lock.dir](#lock_dir) | | | | If not specified, defaults to '/var/lock/efm-<version>' | +| [log.dir](#log_dir) | | | | If not specified, defaults to '/var/log/efm-<version>' | +| [syslog.host](#syslog_logging) | | | localhost | | +| [syslog.port](#syslog_logging) | | | 514 | | +| [syslog.protocol](#syslog_logging) | | | | | +| [syslog.facility](#syslog_logging) | | | UDP | | +| [file.log.enabled](#logtype_enabled) | Y | Y | true | | +| [syslog.enabled](#logtype_enabled) | Y | Y | false | | +| [jgroups.loglevel](#loglevel) | | | info | | +| [efm.loglevel](#loglevel) | | | info | | +| [jvm.options](#jvm_options) | | | -Xmx128m | | + +**Cluster Properties** + + + + + + + + + +Use the following properties to specify connection details for the Failover Manager cluster: + +```text +# The value for the password property should be the output from +# 'efm encrypt' -- do not include a cleartext password here. To +# prevent accidental sharing of passwords among clusters, the +# cluster name is incorporated into the encrypted password. If +# you change the cluster name (the name of this file), you must +# encrypt the password again with the new name. +# The db.port property must be the same for all nodes. +db.user= +db.password.encrypted= +db.port= +db.database= +``` + +The `db.user` specified must have sufficient privileges to invoke selected PostgreSQL commands on behalf of Failover Manager. For more information, please see [Prerequisites](../../02_failover_manager_overview/01_prerequisites/#prerequisites). + +For information about encrypting the password for the database user, see [Encrypting Your Database Password](01_encrypting_database_password/#encrypting_database_password). + + + +Use the `db.service.owner` property to specify the name of the operating system user that owns the cluster that is being managed by Failover Manager. This property is not required on a dedicated witness node. + +```text +# This property tells EFM which OS user owns the $PGDATA dir for +# the 'db.database'. By default, the owner is either 'postgres' +# for PostgreSQL or 'enterprisedb' for EDB Postgres Advanced +# Server. However, if you have configured your db to run as a +# different user, you will need to copy the /etc/sudoers.d/efm-XX +# conf file to grant the necessary permissions to your db owner. +# +# This username must have write permission to the +# 'db.data.dir' specified below. +db.service.owner= +``` + + + +Specify the name of the database service in the `db.service.name` property if you use the service or systemctl command when starting or stopping the service. + +```text +# Specify the proper service name in order to use service commands +# rather than pg_ctl to start/stop/restart a database. For example, if +# this property is set, then 'service restart' or 'systemctl +# restart ' +# (depending on OS version) will be used to restart the database rather +# than pg_ctl. +# This property is required if running the database as a service. +db.service.name= +``` + + + +You should use the same service control mechanism (pg_ctl, service, or systemctl) each time you start or stop the database service. If you use the `pg_ctl` program to control the service, specify the location of the `pg_ctl` program in the `db.bin` property. + +```text +# Specify the directory containing the pg_controldata/pg_ctl commands, +# for example: +# /usr/edb/as11/bin. Unless the db.service.name property is used, the +# pg_ctl command is used to start/stop/restart databases as needed +# after a failover or switchover. This property is required. +db.bin= +``` + + + +Use the `db.data.dir` property to specify the location to which a recovery file will be written on the Primary node of the cluster during promotion. This property is required on primary and standby nodes; it is not required on a dedicated witness node. + +```text +# For database version 12 and up, this is the directory where a +# standby.signal file will exist for a standby node. For previous +# versions, this is the location of the db recovery.conf file on +# the node. +# After a failover, the recovery.conf files on remaining standbys are +# changed to point to the new primary db (a copy of the original is made +# first). On a primary node, a recovery.conf file will be written during +# failover and promotion to ensure that the primary node can not be +# restarted as the primary database. +# This corresponds to database environment variable PGDATA and should +# be same as the output of query 'show data_directory;' on respective +# database. +db.data.dir= +``` + + + +Use the `db.config.dir` property to specify the location of database configuration files if they are not stored in the same directory as the `recovery.conf` or `standby.signal` file. This should be the value specified by the `config_file` parameter directory of your Advanced Server or PostgreSQL installation. This value will be used as the location of the Postgres `data` directory when stopping, starting, or restarting the database. + +```text +# Specify the location of database configuration files if they are +# not contained in the same location as the recovery.conf or +# standby.signal file. This is most likely the case for Debian +# installations. The location specified will be used as the -D value +# (the location of the data directory for the cluster) when calling +# pg_ctl to start or stop the database. If this property is blank, +# the db.data.dir location specified by the db.data.dir property will +# be used. This corresponds to the output of query 'show config_file;' +# on respective database. +db.config.dir= +``` + +For more information about database configuration files, visit the [PostgreSQL website](https://www.postgresql.org/docs/current/runtime-config-file-locations.html). + + + +Use the `jdbc.sslmode` property to instruct Failover Manager to use SSL connections; by default, SSL is disabled. + +```text +# Use the jdbc.sslmode property to enable ssl for EFM +# connections. Setting this property to anything but 'disable' +# will force the agents to use 'ssl=true' for all JDBC database +# connections (to both local and remote databases). +# Valid values are: +# +# disable - Do not use ssl for connections. +# verify-ca - EFM will perform CA verification before allowing +# the certificate. +# require - Verification will not be performed on the server +# certificate. +jdbc.sslmode=disable +``` + + + +!!! Note + If you set the value of `jdbc.sslmode` to `verify-ca` and you want to use Java trust store for certificate validation, you need to set the following value: + + `jdbc.properties=sslfactory=org.postgresql.ssl.DefaultJavaSSLFactory` + +For information about configuring and using SSL, please see: + +> + +and + +> + + + +Use the `user.email` property to specify an email address (or multiple email addresses) that will receive any notifications sent by Failover Manager. + +```text +# Email address(es) for notifications. The value of this +# property must be the same across all agents. Multiple email +# addresses must be separated by space. If using a notification +# script instead, this property can be left blank. +user.email= +``` + + + +The `from.email` property specifies the value that will be used as the sender's address on any email notifications from Failover Manager. You can: + +- leave `from.email` blank to use the default value (`efm@localhost`). +- specify a custom value for the email address. +- specify a custom email address, using the `%h` placeholder to represent the name of the node host (e.g., [example@%h](mailto:example@%h)). The placeholder will be replaced with the name of the host as returned by the Linux hostname utility. + +For more information about notifications, see [Notifications](../../10_notifications/#notifications). + +```text +# Use the from.email property to specify the from email address that +# will be used for email notifications. Use the %h placeholder to +# represent the name of the node host (e.g. example@%h). The +# placeholder will be replaced with the name of the host as returned +# by the hostname command. +# Leave blank to use the default, efm@localhost. +from.email= +``` + + + +Use the `notification.level` property to specify the minimum severity level at which Failover Manager will send user notifications or when a notification script is called. For a complete list of notifications, please see [Notifications](../../10_notifications/#notifications). + +```text +# Minimum severity level of notifications that will be sent by +# the agent. The minimum level also applies to the notification +# script (below). Valid values are INFO, WARNING, and SEVERE. +# A list of notifications is grouped by severity in the user's +# guide. +notification.level=INFO +``` + + + +Use the `notification.text.prefix` property to specify the text to be added to the beginning of every notification. + +```text +# Text to add to the beginning of every notification. This could +# be used to help identify what the cluster is used for, the role +# of this node, etc. To use multiple lines, add a backslash \ to +# the end of a line of text. To include a newline use \n. +# Example: +# notification.text.prefix=Development cluster for Example dept.\n\ +# Used by Dev and QA \ +# See Example group for questions. +notification.text.prefix= +``` + + + +Use the `script.notification` property to specify the path to a user-supplied script that acts as a notification service; the script will be passed a message subject and a message body. The script will be invoked each time Failover Manager generates a user notification. + +```text +# Absolute path to script run for user notifications. +# +# This is an optional user-supplied script that can be used for +# notifications instead of email. This is required if not using +# email notifications. Either/both can be used. The script will +# be passed two parameters: the message subject and the message +# body. +script.notification= +``` + + + +The `bind.address` property specifies the IP address and port number of the agent on the current node of the Failover Manager cluster. + +```text +# This property specifies the ip address and port that jgroups +# will bind to on this node. The value is of the form +# :. +# Note that the port specified here is used for communicating +# with other nodes, and is not the same as the admin.port below, +# used only to communicate with the local agent to send control +# signals. +# For example, :7800 +bind.address= +``` + + + +Use the `external.address` property to specify the IP address or hostname that should be used for communication with all other Failover Manager agents in a NAT environment. + +```text +# This is the ip address/hostname to be used for communication with all +# other Failover Manager agents. All traffic towards this address +# should be routed by the network to the bind.address of the node. +# The value is in the ip/hostname format only. This address will be +# used in scenarios where nodes are on different networks and broadcast +# an IP address other than the bind.address to the external world. +external.address= +``` + + + +Use the `admin.port` property to specify a port on which Failover Manager listens for administrative commands. + +```text +# This property controls the port binding of the administration +# server which is used for some commands (ie cluster-status). The +# default is 7809; you can modify this value if the port is +# already in use. +admin.port=7809 +``` + + + +Set the `is.witness` property to true to indicate that the current node is a witness node. If is.witness is true, the local agent will not check to see if a local database is running. + +```text +# Specifies whether or not this is a witness node. Witness nodes +# do not have local databases running. +is.witness= +``` + +The Postgres `pg_is_in_recovery()` function is a boolean function that reports the recovery state of a database. The function returns `true` if the database is in recovery, or false if the database is not in recovery. When an agent starts, it connects to the local database and invokes the `pg_is_in_recovery()` function. If the server responds true, the agent assumes the role of standby; if the server responds false, the agent assumes the role of primary. If there is no local database, the agent will assume an idle state. + +!!! Note + If `is.witness` is `true`, Failover Manager will not check the recovery state. + + + + + + + +The following properties specify properties that apply to the local server: + +- The `local.period` property specifies how many seconds between attempts to contact the database server. +- The `local.timeout` property specifies how long an agent will wait for a positive response from the local database server. +- The `local.timeout.final` property specifies how long an agent will wait after the above-mentioned previous checks have failed to contact the database server on the current node. If a response is not received from the database within the number of seconds specified by the `local.timeout.final` property, the database is assumed to have failed. + +For example, given the default values of these properties, a check of the local database happens once every 10 seconds. If an attempt to contact the local database does not come back positive within 60 seconds, Failover Manager makes a final attempt to contact the database. If a response is not received within 10 seconds, Failover Manager declares database failure and notifies the administrator listed in the user.email property. These properties are not required on a dedicated witness node. + +```text +# These properties apply to the connection(s) EFM uses to monitor +# the local database. Every 'local.period' seconds, a database +# check is made in a background thread. If the main monitoring +# thread does not see that any checks were successful in +# 'local.timeout' seconds, then the main thread makes a final +# check with a timeout value specified by the +# 'local.timeout.final' value. All values are in seconds. +# Whether EFM uses single or multiple connections for database +# checks is controlled by the 'db.reuse.connection.count' +# property. +local.period=10 +local.timeout=60 +local.timeout.final=10 +``` + +If necessary, you should modify these values to suit your business model. + + + +Use the `remote.timeout` property to specify how many seconds an agent waits for a response from a remote database server (i.e., how long a standby agent waits to verify that the primary database is actually down before performing failover). The `remote.timeout` property value specifies a timeout value for agent-to-agent communication; other timeout properties in the cluster properties file specify values for agent-to-database communication. + +```text +# Timeout for a call to check if a remote database is responsive. +# For example, this is how long a standby would wait for a +# DB ping request from itself and the witness to the primary DB +# before performing failover. +remote.timeout=10 +``` + + + +Use the `node.timeout` property to specify the number of seconds that an agent will wait for a response from a node when determining if a node has failed. + +```text +# The total amount of time in seconds to wait before determining +# that a node has failed or been disconnected from this node. +# +# The value of this property must be the same across all agents. +node.timeout=50 +``` + + + +Use the `encrypt.agent.messages` property to specify if the messages sent between agents should be encrypted. + +```text +# Set to true to encrypt messages that are sent between agents. +# This property must be the same on all agents or else the agents +# will not be able to connect. +encrypt.agent.messages=false +``` + + + +Use the `stop.isolated.primary` property to instruct Failover Manager to shut down the database if a primary agent detects that it is isolated. When true (the default), Failover Manager will stop the database before invoking the script specified in the `script.primary.isolated` property. + +```text +# Shut down the database after a primary agent detects that it has +# been isolated from the majority of the efm cluster. If set to +# true, efm will stop the database before running the +# 'script.primary.isolated' script, if a script is specified. +stop.isolated.primary=true +``` + + + +Use the `stop.failed.primary` property to instruct Failover Manager to attempt to shut down a primary database if it can not reach the database. If `true`, Failover Manager will run the script specified in the `script.db.failure` property after attempting to shut down the database. + +```text +# Attempt to shut down a failed primary database after EFM can no +# longer connect to it. This can be used for added safety in the +# case a failover is caused by a failure of the network on the +# primary node. +# If specified, a 'script.db.failure' script is run after this attempt. +stop.failed.primary=true +``` + + + +Use the `primary.shutdown.as.failure` parameter to indicate that any shutdown of the Failover Manager agent on the primary node should be treated as a failure. If this parameter is set to `true` and the primary agent stops (for any reason), the cluster will attempt to confirm if the database on the primary node is running: + +- If the database is reached, a notification will be sent informing you of the agent status. +- If the database is not reached, a failover will occur. + +```text +# Treat a primary agent shutdown as an agent failure. This can be set +# to true to treat a primary agent shutdown as a failure situation, +# e.g. during the shutdown of a node, accidental or otherwise. +# Caution should be used when using this feature, as it could +# cause an unwanted promotion in the case of performing primary +# database maintenance. +# Please see the user's guide for more information. +primary.shutdown.as.failure=false +``` + +The `primary.shutdown.as.failure` property is meant to catch user error, rather than failures, such as the accidental shutdown of a primary node. The proper shutdown of a node can appear to the rest of the cluster like a user has stopped the primary Failover Manager agent (for example to perform maintenance on the primary database). If you set the `primary.shutdown.as.failure` property to `true`, care must be taken when performing maintenance. + +To perform maintenance on the primary database when `primary.shutdown.as.failure` is `true`, you should stop the primary agent and wait to receive a notification that the primary agent has failed but the database is still running. Then it is safe to stop the primary database. Alternatively, you can use the `efm stop-cluster` command to stop all of the agents without failure checks being performed. + + + +Use the `update.physical.slots.period` property to define the slot advance frequency for database version 12 and above. When `update.physical.slots.period` is set to a non-zero value, the primary agent will read the current `restart_lsn` of the physical replication slots after every `update.physical.slots.period` seconds, and send this information with its `pg_current_wal_lsn` and `primary_slot_name` (If it is set in the postgresql.conf file) to the standbys. If physical slots do not already exist, setting this parameter to a non-zero value will create the slots and then update the `restart_lsn parameter` for these slots. A non-promotable standby will not create new slots but will update them if they exist. + +```text +# Period in seconds between having the primary agent update promotable +# standbys with physical replication slot information so that +# the cluster will continue to use replication slots after a failover. +# Set to zero to turn off. +update.physical.slots.period=0 +``` + + + +Use the `ping.server.ip` property to specify the IP address of a server that Failover Manager can use to confirm that network connectivity is not a problem. + +```text +# This is the address of a well-known server that EFM can ping +# in an effort to determine network reachability issues. It +# might be the IP address of a nameserver within your corporate +# firewall or another server that *should* always be reachable +# via a 'ping' command from each of the EFM nodes. +# +# There are many reasons why this node might not be considered +# reachable: firewalls might be blocking the request, ICMP might +# be filtered out, etc. +# +# Do not use the IP address of any node in the EFM cluster +# (primary, standby, or witness) because this ping server is meant +# to provide an additional layer of information should the EFM +# nodes lose sight of each other. +# +# The installation default is Google's DNS server. +ping.server.ip=8.8.8.8 +``` + + + +Use the `ping.server.command` property to specify the command used to test network connectivity. + +```text +# This command will be used to test the reachability of certain +# nodes. +# +# Do not include an IP address or hostname on the end of +# this command - it will be added dynamically at runtime with the +# values contained in 'virtual.ip' and 'ping.server.ip'. +# +# Make sure this command returns reasonably quickly - test it +# from a shell command line first to make sure it works properly. +ping.server.command=/bin/ping -q -c3 -w5 +``` + + + +Use the `auto.allow.hosts` property to instruct the server to use the addresses specified in the .nodes file of the first node started to update the allowed host list. Enabling this property (setting `auto.allow.hosts` to true) can simplify cluster start-up. + +```text +# Have the first node started automatically add the addresses +# from its .nodes file to the allowed host list. This will make +# it faster to start the cluster when the initial set of hosts +# is already known. +auto.allow.hosts=false +``` + + + +Use the `stable.nodes.file` property to instruct the server to not rewrite the nodes file when a node joins or leaves the cluster. This property is most useful in clusters with unchanging IP addresses. + +```text +# When set to true, EFM will not rewrite the .nodes file whenever +# new nodes join or leave the cluster. This can help starting a +# cluster in the cases where it is expected for member addresses +# to be mostly static, and combined with 'auto.allow.hosts' makes +# startup easier when learning failover manager. +stable.nodes.file=false +``` + + + +The `db.reuse.connection.count` property allows the administrator to specify the number of times Failover Manager reuses the same database connection to check the database health. The default value is 0, indicating that Failover Manager will create a fresh connection each time. This property is not required on a dedicated witness node. + +```text +# This property controls how many times a database connection is +# reused before creating a new one. If set to zero, a new +# connection will be created every time an agent pings its local +# database. +db.reuse.connection.count=0 +``` + + + +The `auto.failover` property enables automatic failover. By default, auto.failover is set to true. + +```text +# Whether or not failover will happen automatically when the primary +# fails. Set to false if you want to receive the failover notifications +# but not have EFM actually perform the failover steps. +# The value of this property must be the same across all agents. +auto.failover=true +``` + + + +Use the `auto.reconfigure` property to instruct Failover Manager to enable or disable automatic reconfiguration of remaining Standby servers after the primary standby is promoted to Primary. Set the property to `true` to enable automatic reconfiguration (the default) or `false` to disable automatic reconfiguration. This property is not required on a dedicated witness node. If you are using Advanced Server or PostgreSQL version 11 or earlier, the `recovery.conf` file will be backed up during the reconfiguration process. + +```text +# After a standby is promoted, Failover Manager will attempt to +# update the remaining standbys to use the new primary. For database +# versions before 12, Failover Manager will back up recovery.conf. +# Then it will change the host parameter of the primary_conninfo entry +# in recovery.conf or postgresql.auto.conf, and restart the database. +# The restart command is contained in either the efm_db_functions or +# efm_root_functions file; default when not running db as an os +# service is: "pg_ctl restart -m fast -w -t -D " +# where the timeout is the local.timeout property value and the +# directory is specified by db.data.dir. To turn off +# automatic reconfiguration, set this property to false. +auto.reconfigure=true +``` + +!!! Note + `primary_conninfo` is a space-delimited list of keyword=value pairs. + + + +Use the `promotable` property to indicate that a node should not be promoted. The `promotable` property is ignored when a primary agent is started. This simplifies switching back to the original primary after a switchover or failover. To override the setting, use the efm set-priority command at runtime; for more information about the efm set-priority command, see [Using the efm Utility](../../07_using_efm_utility/#using_efm_utility). + +```text +# A standby with this set to false will not be added to the +# failover priority list, and so will not be available for +# promotion. The property will be used whenever an agent starts +# as a standby or resumes as a standby after being idle. After +# startup/resume, the node can still be added or removed from the +# priority list with the 'efm set-priority' command. This +# property is required for all non-witness nodes. +promotable=true +``` + + + +If the same amount of data has been written to more than one standby node, and a failover occurs, the `use.replay.tiebreaker` value will determine how Failover Manager selects a replacement primary. Set the `use.replay.tiebreaker` property to `true` to instruct Failover Manager to failover to the node that will come out of recovery faster, as determined by the log sequence number. To ignore the log sequence number and promote a node based on user preference, set `use.replay.tiebreaker` to `false`. + +```text +# Use replay LSN value for tiebreaker when choosing a standby to +# promote before using failover priority. Set this property to true to +# consider replay location as more important than failover priority +# (as seen in cluster-status command) when choosing the "most ahead" +# standby to promote. +use.replay.tiebreaker=true +``` + + + +Use the `standby.restart.delay` property to specify the time in seconds that the standby should wait before it gets reconfigured (stopped/started) to follow the new primary after a promotion. + +```text +# Time in seconds for this standby to delay restarting to follow the +# primary after a promotion. This can be used to have standbys restart +# at different times to increase availability. Caution should be used +# when using this feature, as a delayed standby will not be following +# the new primary and care must be taken that the new primary retains +# enough WAL for the standby to follow it. +# Please see the user's guide for more information. +standby.restart.delay=0 +``` + + + +You can use the `application.name` property to provide the name of an application that will be copied to the `primary_conninfo` parameter before restarting an old primary node as a standby. + +```text +# During a switchover, recovery settings are copied from a standby +# to the original primary. If the application.name property is set, +# Failover Manager will replace the application_name portion of the +# primary_conninfo entry with this property value before starting +# the original primary database as a standby. If this property is +# not set, Failover Manager will remove the parameter value +# from primary_conninfo. +application.name= +``` + +!!! Note + You should set the `application.name` property on the primary and any promotable standby; in the event of a failover/switchover, the primary node could potentially become a standby node again. + + + +Use the `restore.command` property to instruct Failover Manager to update the `restore_command` when a new primary is promoted. `%h` represents the address of the new primary; Failover Manager will replace `%h` with the address of the new primary. `%f` and `%p` are placeholders used by the server. If the property is left blank, Failover Manager will not update the `restore_command` values on the standbys after a promotion. + +See the PostgreSQL documentation for more information about using a [restore_command](https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-ARCHIVE-RECOVERY). + +```text +# If the restore_command on a standby restores directly from the +# primary node, use this property to have Failover Manager change +# the command when a new primary is promoted. +# +# Use the %h placeholder to represent the address of the new primary. +# During promotion it will be replaced with the address of the new +# primary. +# +# If not specified, failover manager will not change the +# restore_command value, if any, on standby nodes. +# +# Example: +# restore.command=scp @%h:/var/lib/edb/as12/data/archive/%f %p +restore.command= +``` + + + +The database parameter `synchronous_standby_names` on the primary node specifies the names and count of the synchronous standby servers that will confirm receipt of data, to ensure that the primary nodes can accept write transactions. When `reconfigure.num.sync` property is set to true, Failover Manager will reduce the number of synchronous standby servers and reload the configuration of the primary node to reflect the current value. + +```text +# Reduce num_sync when the number of synchronous standbys drops below +# the value required by the primary database. If set to true, Failover +# Manager will reduce the number of standbys needed in the primary's +# synchronous_standby_names property and reload the primary +# configuration. Failover Manager will not reduce the number below 1, +# taking the primary out of synchronous replication, unless the +# reconfigure.sync.primary property is also set to true. +# To raise num_sync, see the reconfigure.num.sync.max property below. +reconfigure.num.sync=false +``` + + + +Set the `reconfigure.sync.primary` property to `true` to take the primary database out of synchronous replication mode if the number of standby nodes drops below the level required. Set `reconfigure.sync.primary` to `false` to send a notification if the standby count drops, but not interrupt synchronous replication. + +```text +# Take the primary database out of synchronous replication mode when +# needed. If set to true, Failover Manager will clear the +# synchronous_standby_names configuration parameter on the primary +# if the number of synchronous standbys drops below the required +# level for the primary to accept writes. +# If set to false, Failover Manager will detect the situation but +# will only send a notification if the standby count drops below the +# required level. +# +# CAUTION: TAKING THE PRIMARY DATABASE OUT OF SYNCHRONOUS MODE MEANS +# THERE MAY ONLY BE ONE COPY OF DATA. DO NOT MAKE THIS CHANGE UNLESS +# YOU ARE SURE THIS IS OK. +reconfigure.sync.primary=false +``` + + + +Use the `minimum.standbys` property to specify the minimum number of standby nodes that will be retained on a cluster; if the standby count drops to the specified minimum, a replica node will not be promoted in the event of a failure of the primary node. + +```text +# Instead of setting specific standbys as being unavailable for +# promotion, this property can be used to set a minimum number +# of standbys that will not be promoted. Set to one, for +# example, promotion will not happen if it will drop the number +# of standbys below this value. This property must be the same on +# each node. +minimum.standbys=0 +``` + + + +Use the `recovery.check.period` property to specify the number of seconds that Failover Manager will wait before checks to see if a database is out of recovery. + +```text +# Time in seconds between checks to see if a promoting database +# is out of recovery. +recovery.check.period=1 +``` + + + +Use the `restart.connection.timeout` property to specify the number of seconds that Failover Manager will attempt to connect to a newly reconfigured primary or standby node while the database on that node prepares to accept connections. + +```text +# Time in seconds to keep trying to connect to a database after a +# start or restart command returns successfully but the database +# is not ready to accept connections yet (a rare occurance). This +# applies to standby databases that are restarted when being +# reconfigured for a new primary, and to primary databases that +# are stopped and started as standbys during a switchover. +# This retry mechanism is unrelated to the auto.resume.period +# parameter. +restart.connection.timeout=60 +``` + + + +Use the `auto.resume.period` property to specify the number of seconds (after a monitored database fails and an agent has assumed an idle state, or when starting in IDLE mode) during which an agent will attempt to resume monitoring that database. + +```text +# Period in seconds for IDLE agents to try to resume monitoring +# after a database failure or when starting in IDLE mode. Set to +# 0 for agents to not try to resume (in which case the +# 'efm resume ' command is used after bringing a +# database back up). +auto.resume.period=0 +``` + + + +Failover Manager provides support for clusters that use a virtual IP. If your cluster uses a virtual IP, provide the host name or IP address in the `virtual.ip` property; specify the corresponding prefix in the `virtual.ip.prefix` property. If `virtual.ip` is left blank, virtual IP support is disabled. + +Use the `virtual.ip.interface` property to provide the network interface used by the VIP. + +The specified virtual IP address is assigned only to the primary node of the cluster. If you specify `virtual.ip.single=true`, the same VIP address will be used on the new primary in the event of a failover. Specify a value of false to provide a unique IP address for each node of the cluster. + +For information about using a virtual IP address, see [Using Failover Manager with Virtual IP Addresses](../05_using_vip_addresses/#using_vip_addresses). + +```text +# These properties specify the IP and prefix length that will be +# remapped during failover. If you do not use a VIP as part of +# your failover solution, leave the virtual.ip property blank to +# disable Failover Manager support for VIP processing (assigning, +# releasing, testing reachability, etc). +# +# If you specify a VIP, the interface and prefix are required. +# +# If you specify a host name, it will be resolved to an IP address +# when acquiring or releasing the VIP. If the host name resolves +# to more than one IP address, there is no way to predict which +# address Failover Manager will use. +# +# By default, the virtual.ip and virtual.ip.prefix values must be +# the same across all agents. If you set virtual.ip.single to +# false, you can specify unique values for virtual.ip and +# virtual.ip.prefix on each node. +# +# If you are using an IPv4 address, the virtual.ip.interface value +# should not contain a secondary virtual ip id (do not include +# ":1", etc). +virtual.ip= +virtual.ip.interface= +virtual.ip.prefix= +virtual.ip.single=true +``` + +!!! Note + If a primary agent is started and the node does not currently have the VIP, the EFM agent will acquire it. Stopping a primary agent does not drop the VIP from the node. + + + +Set the `check.vip.before.promotion` property to false to indicate that Failover Manager will not check to see if a VIP is in use before assigning it to a a new primary in the event of a failure. Note that this could result in multiple nodes broadcasting on the same VIP address; unless the primary node is isolated or can be shut down via another process, you should set this property to true. + +```text +# Whether to check if the VIP (when used) is still in use before +# promoting after a primary failure. Turning this off may allow +# the new primary to have the VIP even though another node is also +# broadcasting it. This should only be used in environments where +# it is known that the failed primary node will be isolated or +# shut down through other means. +check.vip.before.promotion=true +``` + + + +Use the following properties to provide paths to scripts that reconfigure your load balancer in the event of a switchover or primary failure scenario. The scripts will also be invoked in the event of a standby failure. If you are using these properties, they should be provided on every node of the cluster (primary, standby, and witness) to ensure that if a database node fails, another node will call the detach script with the failed node's address. + +You do not need to set the below properties if you are using Pgpool as Load Balancer solution and have set the Pgpool integration properties. + +Provide a script name after the `script.load.balancer.attach` property to identify a script that will be invoked when a node should be attached to the load balancer. Use the `script.load.balancer.detach` property to specify the name of a script that will be invoked when a node should be detached from the load balancer. Include the `%h` placeholder to represent the IP address of the node that is being attached or removed from the cluster. Include the `%t` placeholder to instruct Failover Manager to include an p (for a primary node) or an s (for a standby node) in the string. + +```text +# Absolute path to load balancer scripts +# The attach script is called when a node should be attached to +# the load balancer, for example after a promotion. The detach +# script is called when a node should be removed, for example +# when a database has failed or is about to be stopped. Use %h to +# represent the IP/hostname of the node that is being +# attached/detached. Use %t to represent the type of node being +# attached or detached: the letter m will be passed in for primary nodes +#and the letter s for standby nodes. +# +# Example: +# script.load.balancer.attach=/somepath/attachscript %h %t +script.load.balancer.attach= +script.load.balancer.detach= +``` + + + +`script.fence` specifies the path to an optional user-supplied script that will be invoked during the promotion of a standby node to primary node. + +```text +# absolute path to fencing script run during promotion +# +# This is an optional user-supplied script that will be run +# during failover on the standby database node. If left blank, +# no action will be taken. If specified, EFM will execute this +# script before promoting the standby. +# +# Parameters can be passed into this script for the failed primary +# and new primary node addresses. Use %p for new primary and %f +# for failed primary. On a node that has just been promoted, %p +# should be the same as the node's efm binding address. +# +# Example: +# script.fence=/somepath/myscript %p %f +# +# NOTE: FAILOVER WILL NOT OCCUR IF THIS SCRIPT RETURNS A NON-ZERO EXIT +# CODE. +script.fence= +``` + +
+ +Use the `script.post.promotion` property to specify the path to an optional user-supplied script that will be invoked after a standby node has been promoted to primary. + +```text +# Absolute path to fencing script run after promotion +# +# This is an optional user-supplied script that will be run after +# failover on the standby node after it has been promoted and +# is no longer in recovery. The exit code from this script has +# no effect on failover manager, but will be included in a +# notification sent after the script executes. +# +# Parameters can be passed into this script for the failed primary +# and new primary node addresses. Use %p for new primary and %f +# for failed primary. On a node that has just been promoted, %p +# should be the same as the node's efm binding address. +# +# Example: +# script.post.promotion=/somepath/myscript %f %p +script.post.promotion= +``` + + + +Use the `script.resumed property` to specify an optional path to a user-supplied script that will be invoked when an agent resumes monitoring of a database. + +```text +# Absolute path to resume script +# +# This script is run before an IDLE agent resumes +# monitoring its local database. +script.resumed= +``` + + + +Use the `script.db.failure` property to specify the complete path to an optional user-supplied script that Failover Manager will invoke if an agent detects that the database that it monitors has failed. + +```text +# Absolute path to script run after database failure +# This is an optional user-supplied script that will be run after +# an agent detects that its local database has failed. +script.db.failure= +``` + + + +Use the `script.primary.isolated` property to specify the complete path to an optional user-supplied script that Failover Manager will invoke if the agent monitoring the primary database detects that the primary is isolated from the majority of the Failover Manager cluster. This script is called immediately after the VIP is released (if a VIP is in use). + +```text +# Absolute path to script run on isolated primary +# This is an optional user-supplied script that will be run after +# a primary agent detects that it has been isolated from the +# majority of the efm cluster. +script.primary.isolated= +``` + + + +Use the `script.remote.pre.promotion` property to specify the path and name of a script that will be invoked on any agent nodes not involved in the promotion when a node is about to promote its database to primary. + +Include the %p placeholder to identify the address of the new primary node. + +```text +# Absolute path to script invoked on non-promoting agent nodes +# before a promotion. +# +# This optional user-supplied script will be invoked on other +# agents when a node is about to promote its database. The exit +# code from this script has no effect on Failover Manager, but +# will be included in a notification sent after the script +# executes. +# +# Pass a parameter (%p) with the script to identify the new +# primary node address. +# +# Example: +# script.remote.pre.promotion=/path_name/script_name %p +script.remote.pre.promotion= +``` + +
+ +Use the `script.remote.post.promotion` property to specify the path and name of a script that will be invoked on any non-primary nodes after a promotion occurs. + +Include the %p placeholder to identify the address of the new primary node. + +```text +# Absolute path to script invoked on non-primary agent nodes +# after a promotion. +# +# This optional user-supplied script will be invoked on nodes +# (except the new primary) after a promotion occurs. The exit code +# from this script has no effect on Failover Manager, but will be +# included in a notification sent after the script executes. +# +# Pass a parameter (%p) with the script to identify the new +# primary node address. +# +# Example: +# script.remote.post.promotion=/path_name/script_name %p +script.remote.post.promotion= +``` + + + +Use the `script.custom.monitor` property to provide the name and location of an optional script that will be invoked on regular intervals (specified in seconds by the `custom.monitor.interval` property). + +Use `custom.monitor.timeout` to specify the maximum time that the script will be allowed to run; if script execution does not complete within the time specified, Failover Manager will send a notification. + +Set `custom.monitor.safe.mode` to `true` to instruct Failover Manager to report non-zero exit codes from the script, but not promote a standby as a result of an exit code. + +```text +# Absolute path to a custom monitoring script. +# +# Use script.custom.monitor to specify the location and name of +# an optional user-supplied script that will be invoked +# periodically to perform custom monitoring tasks. A non-zero +# exit value means that a check has failed; this will be treated +# as a database failure. On a primary node, script failure will +# cause a promotion. On a standby node script failure will +# generate a notification and the agent will become IDLE. +# +# The custom.monitor.\* properties are required if a custom +# monitoring script is specified: +# +# custom.monitor.interval is the time in seconds between executions +# of the script. +# +# custom.monitor.timeout is a timeout value in seconds for how +# long the script will be allowed to run. If script execution +# exceeds the specified time, the task will be stopped and a +# notification sent. Subsequent runs will continue. +# +# If custom.monitor.safe.mode is set to true, non-zero exit codes +# from the script will be reported but will not cause a promotion +# or be treated as a database failure. This allows testing of the +# script without affecting EFM. +# +script.custom.monitor= +custom.monitor.interval= +custom.monitor.timeout= +custom.monitor.safe.mode= +``` + + + +Use the `sudo.command` property to specify a command that will be invoked by Failover Manager when performing tasks that require extended permissions. Use this option to include command options that might be specific to your system authentication. + +Use the `sudo.user.command` property to specify a command that will be invoked by Failover Manager when executing commands that will be performed by the database owner. + +```text +# Command to use in place of 'sudo' if desired when efm runs +# the efm_db_functions or efm_root_functions, or efm_address +# scripts. +# Sudo is used in the following ways by efm: +# +# sudo /usr/edb/efm-/bin/efm_address +# sudo /usr/edb/efm-/bin/efm_root_functions +# sudo -u /usr/edb/efm-/bin/efm_db_functions +# +# 'sudo' in the first two examples will be replaced by the value +# of the sudo.command property. 'sudo -u ' will +# be replaced by the value of the sudo.user.command property. +# The '%u' field will be replaced with the db owner. +sudo.command=sudo +sudo.user.command=sudo -u %u +``` + + + +Use the `lock.dir` property to specify an alternate location for the Failover Manager lock file; the file prevents Failover Manager from starting multiple (potentially orphaned) agents for a single cluster on the node. + +```text +# Specify the directory of lock file on the node. Failover +# Manager creates a file named .lock at this location to +# avoid starting multiple agents for same cluster. If the path +# does not exist, Failover Manager will attempt to create it. If +# not specified defaults to '/var/lock/efm-' +lock.dir= +``` + + + +Use the `log.dir` property to specify the location to which agent log files will be written; Failover Manager will attempt to create the directory if the directory does not exist. + +```text +# Specify the directory of agent logs on the node. If the path +# does not exist, Failover Manager will attempt to create it. If +# not specified defaults to '/var/log/efm-'. (To store +# Failover Manager startup logs in a custom location, modify the +# path in the service script to point to an existing, writable +# directory.) +# If using a custom log directory, you must configure +# logrotate separately. Use 'man logrotate' for more information. +log.dir= +``` + + + +After enabling the UDP or TCP protocol on a Failover Manager host, you can enable logging to syslog. Use the `syslog.protocol` parameter to specify the protocol type (UDP or TCP) and the `syslog.port` parameter to specify the listener port of the syslog host. The `syslog.facility` value may be used as an identifier for the process that created the entry; the value must be between LOCAL0 and LOCAL7. + +```text +# Syslog information. The syslog service must be listening on +# the port for the given protocol, which can be UDP or TCP. +# The facilities supported are LOCAL0 through LOCAL7. +syslog.host=localhost +syslog.port=514 +syslog.protocol=UDP +syslog.facility=LOCAL1 +``` + + + +Use the `file.log.enabled` and `syslog.enabled` properties to specify the type of logging that you wish to implement. Set `file.log.enabled` to `true` to enable logging to a file; enable the UDP protocol or TCP protocol and set `syslog.enabled` to `true` to enable logging to syslog. You can enable logging to both a file and syslog. + +```text +# Which logging is enabled. +file.log.enabled=true +syslog.enabled=false +``` + +For more information about configuring syslog logging, see [Enabling syslog Log File Entries](../../09_controlling_logging/#enabling_syslog). + + + +Use the `jgroups.loglevel` and `efm.loglevel` parameters to specify the level of detail logged by Failover Manager. The default value is INFO. For more information about logging, see [Controlling Logging](../../09_controlling_logging/#controlling_logging). + +```text +# Logging levels for JGroups and EFM. +# Valid values are: TRACE, DEBUG, INFO, WARN, ERROR +# Default value: INFO +# It is not necessary to increase these values unless debugging a +# specific issue. If nodes are not discovering each other at +# startup, increasing the jgroups level to DEBUG will show +# information about the TCP connection attempts that may help +# diagnose the connection failures. +jgroups.loglevel=INFO +efm.loglevel=INFO +``` + + + +Use the `jvm.options` property to pass JVM-related configuration information. The default setting specifies the amount of memory that the Failover Manager agent will be allowed to use. + +```text +# Extra information that will be passed to the JVM when starting +# the agent. +jvm.options=-Xmx128m +``` + diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx new file mode 100644 index 00000000000..c0456f97d8c --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx @@ -0,0 +1,81 @@ +--- +title: "Encrypting Your Database Password" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/encrypting_database_password.html" +--- + + + +Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-4.0/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. + +To encrypt a password, use the command: + +```text +# efm encrypt [ --from-env ] +``` + +Where `` specifies the name of the Failover Manager cluster. + +If you include the `--from-env` option, you must export the value you wish to encrypt before invoking the encryption utility. For example: + +```text +export EFMPASS=password +``` + +If you do not include the `--from-env` option, Failover Manager will prompt you to enter the database password twice before generating an encrypted password for you to place in your cluster property file. When the utility shares the encrypted password, copy and paste the encrypted password into the cluster property files. + +!!! Note + Many Java vendors ship their version of Java with full-strength encryption included, but not enabled due to export restrictions. If you encounter an error that refers to an illegal key size when attempting to encrypt the database password, you should download and enable a Java Cryptography Extension (JCE) that provides an unlimited policy for your platform. + +The following example demonstrates using the encrypt utility to encrypt a password for the `acctg` cluster: + +```text +# efm encrypt acctg +This utility will generate an encrypted password for you to place in + your EFM cluster property file: +/etc/edb/efm-4.0/acctg.properties +Please enter the password and hit enter: +Please enter the password again to confirm: +The encrypted password is: 516b36fb8031da17cfbc010f7d09359c +Please paste this into your acctg.properties file +db.password.encrypted=516b36fb8031da17cfbc010f7d09359c +``` + +!!! Note + The utility will notify you if a properties file does not exist. + +After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: + +```text +[witness@localhost ~]# systemctl start edb-efm-4.0 +Job for edb-efm-4.0.service failed because the control process exited with error code. See "systemctl status edb-efm-4.0.service" and "journalctl -xe" for details. +``` + +If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-4.0/startup-efm.log`) for more information. + +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, startup information is also available with the following command: + +```text +systemctl status edb-efm-4.0 +``` + +To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. + +**Using the EFMPASS Environment Variable** + +The following example demonstrates using the --from-env environment variable when encrypting a password. Before invoking the `efm encrypt` command, set the value of `EFMPASS` to the password (`1safepassword`): + +```text +# export EFMPASS=1safepassword +``` + +Then, invoke `efm encrypt`, specifying the `--from-env` option: + +```text +# efm encrypt acctg --from-env +# 7ceecd8965fa7a5c330eaa9e43696f83 +``` + +The encrypted password (`7ceecd8965fa7a5c330eaa9e43696f83`) is returned as a text value; when using a script, you can check the exit code of the command to confirm that the command succeeded. A successful execution returns `0`. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx new file mode 100644 index 00000000000..5b7b5b030bf --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx @@ -0,0 +1,35 @@ +--- +title: "The Cluster Members File" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/cluster_members.html" +--- + + + +Each node in a Failover Manager cluster has a cluster members file (by default, named efm.nodes) that contains a list of the current Failover Manager cluster members. When an agent starts, it uses the file to locate other cluster members. The Failover Manager installer creates a file template for the cluster members file named `efm.nodes.in` in the `/etc/edb/efm-4.0` directory. + +After completing the Failover Manager installation, you must make a working copy of the template: + +```text +cp /etc/edb/efm-4.0/efm.nodes.in /etc/edb/efm-4.0/efm.nodes +``` + +After copying the template file, change the owner of the file to `efm`: + +```text +chown efm:efm efm.nodes +``` + +By default, Failover Manager expects the cluster members file to be named `efm.nodes`. If you name the cluster members file something other than `efm.nodes`, you must modify the Failover Manager service script to instruct Failover Manager to use the new name. + +The cluster members file on the first node started can be empty; this node will become the Membership Coordinator. On each subsequent node, the cluster member file must contain the address and port number of the Membership Coordinator. Each entry in the cluster members file must be listed in an address:port format, with multiple entries separated by white space. + +The agents will update the contents of the `efm.nodes` file to match the current members of the cluster. As agents join or leave the cluster, the `efm.nodes` files on other agents are updated to reflect the current cluster membership. If you invoke the [efm stop-cluster](../07_using_efm_utility/#efm_stop_cluster) command, Failover Manager does not modify the file. + +If the Membership Coordinator leaves the cluster, another node will assume the role. You can use the [efm cluster-status](../07_using_efm_utility/#efm_cluster_status) command to find the address of the Membership Coordinator. If a node joins or leaves a cluster while an agent is down, before starting that agent you must manually ensure that the file includes at least the current Membership Coordinator's address and port. + +If you know the addresses and ports of the nodes that will be joining the cluster, you can include the addresses in the cluster members file at any time. At startup, any addresses that do not identify cluster members will be ignored unless the `auto.allow.hosts` property (in the [cluster properties file](01_cluster_properties/#auto_allow_hosts)) is set to `true`. + +If the `stable.nodes.file` property (located in the [cluster properties file](01_cluster_properties/#auto_allow_hosts)) is set to `true`, the agent will not update the `.nodes` file when cluster members join or leave the cluster; this behavior is most useful when the IP addresses of cluster members do not change often. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx new file mode 100644 index 00000000000..f49e8489cfb --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx @@ -0,0 +1,116 @@ +--- +title: "Extending Failover Manager Permissions" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/extending_efm_permissions.html" +--- + + + +During the Failover Manager installation, the installer creates a user named `efm`. `efm` does not have sufficient privileges to perform management functions that are normally limited to the database owner or operating system superuser. + +- When performing management functions requiring database superuser privileges, `efm` invokes the `efm_db_functions` script. +- When performing management functions requiring operating system superuser privileges, `efm` invokes the `efm_root_functions` script. +- When assigning or releasing a virtual IP address, `efm` invokes the `efm_address` script. +- When enabling Pgpool integration, `efm` invokes the `efm_pgpool_functions` script. + +The `efm_db_functions` or `efm_root_functions` scripts perform management functions on behalf of the `efm` user. + +The sudoers file contains entries that allow the user `efm` to control the Failover Manager service for clusters owned by `postgres` or `enterprisedb`. You can modify a copy of the sudoers file to grant permission to manage Postgres clusters owned by other users to `efm`. + +The `efm-41` file is located in `/etc/sudoers.d`, and contains the following entries: + +```text +# Copyright EnterpriseDB Corporation, 2014-2020. All Rights Reserved. +# +# Do not edit this file. Changes to the file may be overwritten +# during an upgrade. +# +# This file assumes you are running your efm cluster as user 'efm'. If not, +# then you will need to copy this file. + +# Allow user 'efm' to sudo efm_db_functions as either 'postgres' or 'enterprisedb'. +# If you run your db service under a non-default account, you will need to copy +# this file to grant the proper permissions and specify the account in your efm +# cluster properties file by changing the 'db.service.owner' property. +efm ALL=(postgres) NOPASSWD: /usr/edb/efm-4.0/bin/efm_db_functions +efm ALL=(enterprisedb) NOPASSWD: /usr/edb/efm-4.0/bin/efm_db_functions + +# Allow user 'efm' to sudo efm_root_functions as 'root' to write/delete the PID file, +# validate the db.service.owner property, etc. +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_root_functions + +# Allow user 'efm' to sudo efm_address as root for VIP tasks. +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_address + +# Allow user 'efm' to sudo efm_pgpool_functions as root for pgpool tasks. +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_pgpool_functions + +# relax tty requirement for user 'efm' +Defaults:efm !requiretty +``` + +If you are using Failover Manager to monitor clusters that are owned by users other than `postgres` or `enterprisedb`, make a copy of the `efm-41` file, and modify the content to allow the user to access the `efm_functions` script to manage their clusters. + +If an agent cannot start because of permission problems, make sure the default `/etc/sudoers` file contains the following line at the end of the file: + +```text +## Read drop-in files from /etc/sudoers.d (the # here does not # mean a comment) + +#includedir /etc/sudoers.d +``` + + + +## Running Failover Manager without sudo + +By default, Failover Manager uses sudo to securely manage access to system functionality. If you choose to configure Failover Manager to run without sudo access, Note that root access is still required to: + +- install the Failover Manager RPM. +- perform Failover Manager setup tasks. + +To run Failover Manager without sudo, you must select a database process owner that will have privileges to perform management functions on behalf of Failover Manager. The user could be the default database superuser (for example, enterprisedb or postgres) or another privileged user. After selecting the user: + +1. Use the following command to add the user to the `efm` group: + + ```text + usermod -a -G efm enterprisedb + ``` + + This should allow the user to write to `/var/run/efm-4.0` and `/var/lock/efm-4.0`. + +2. If you are reusing a cluster name, remove any previously created log files; the new user will not be able to write to log files created by the default (or other) owner. + +3. Copy the cluster properties template file and the nodes template file: + + ```text + su - enterprisedb + + cp /etc/edb/efm-4.0/efm.properties.in .properties + + cp /etc/edb/efm-4.0/efm.nodes.in /.nodes + ``` + +Then, modify the cluster properties file, providing the name of the user in the `db.service.owner` property. You must also ensure that the `db.service.name` property is blank; without sudo, you cannot run services without root access. + +After modifying the configuration, the new user can control Failover Manager with the following command: + +```text +/usr/edb/efm-4.0/bin/runefm.sh start|stop .properties +``` + +Where `` specifies the full path of the cluster properties file. Note that the user must ensure that the full path to the properties file must be provided whenever the non-default user is controlling agents or using the efm script. + +To allow the new user to manage Failover Manager as a service, you must provide a custom script or unit file. + +Failover Manager uses a binary named `manage-vip` that resides in `/usr/edb/efm-4.0/bin/secure/` to perform VIP management operations without sudo privileges. This script uses setuid to acquire with the privileges needed to manage Virtual IP addresses. + +- This directory is only accessible to root and users in the `efm` group. +- The binary is only executable by root and the `efm` group. + +For security reasons, we recommend against modifying the access privileges of the `/usr/edb/efm-4.0/bin/secure/` directory or the `manage-vip` script. + +For more information about using Failover Manager without sudo, visit: + + diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx new file mode 100644 index 00000000000..99f5a68f4a2 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx @@ -0,0 +1,148 @@ +--- +title: "Using Failover Manager with Virtual IP Addresses" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_vip_addresses.html" +--- + + + +Failover Manager uses the `efm_address` script to assign or release a virtual IP address. + +!!! Note + Virtual IP addresses are not supported by many cloud providers. In those environments, another mechanism should be used (such as an Elastic IP Address on AWS), which can be changed when needed by a fencing or post-promotion script. + +By default, the script resides in: + + `/usr/edb/efm-4.0/bin/efm_address` + +Failover Manager uses the following command variations to assign or release an IPv4 or IPv6 IP address. + +To assign a virtual IPv4 IP address: + +```text +# efm_address add4 / +``` + +To assign a virtual IPv6 IP address: + +```text +# efm_address add6 / +``` + +To release a virtual address: + +```text +# efm_address del +``` + +Where: + + `` matches the name specified in the `virtual.ip.interface` property in the cluster properties file. + + `` or `` matches the value specified in the `virtual.ip` property in the cluster properties file. + + `prefix` matches the value specified in the `virtual.ip.prefix` property in the cluster properties file. + +For more information about properties that describe a virtual IP address, see [The Cluster Properties File](01_cluster_properties/#virtual_ip). + +You must invoke the `efm_address` script as the root user. The `efm` user is created during the installation, and is granted privileges in the sudoers file to run the `efm_address` script. For more information about the sudoers file, see [Extending Failover Manager Permissions](04_extending_efm_permissions/#extending_efm_permissions). + +!!! Note + If a VIP address (or any address other than the `bind.address`) is assigned to a node, the operating system can choose the source address used when contacting the database. Be sure that you modify the `pg_hba.conf` file on all monitored databases to allow contact from all addresses within your replication scenario. + +**Testing the VIP** + +When using a virtual IP (VIP) address with Failover Manager, it is important to test the VIP functionality manually before starting Failover manager. This will catch any network-related issues before they cause a problem during an actual failover. While testing the VIP, ensure that Failover Manager is not running. + +The following steps test the actions that Failover Manager will take. The example uses the following property values: + +```text +virtual.ip=172.24.38.239 +virtual.ip.interface=eth0 +virtual.ip.prefix=24 +ping.server.command=/bin/ping -q -c3 -w5 +``` + +!!! Note + The `virtual.ip.prefix` specifies the number of significant bits in the virtual Ip address. + +When instructed to ping the VIP from a node, use the command defined by the `ping.server.command` property. + +1. Ping the VIP from all nodes to confirm that the address is not already in use: + +```text +# /bin/ping -q -c3 -w5 172.24.38.239 +PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. +--- 172.24.38.239 ping statistics --- +4 packets transmitted, 0 received, +3 errors, 100% packet loss, + time 3000ms +``` + +You should see 100% packet loss. + +2. Run the `efm_address add4` command on the Primary node to assign the VIP and then confirm with ip address: + +```text +# efm_address add4 eth0 172.24.38.239/24 +# ip address + +eth0 Link encap:Ethernet HWaddr 36:AA:A4:F4:1C:40 +inet addr:172.24.38.239 Bcast:172.24.38.255 +... +``` + +3. Ping the VIP from the other nodes to verify that they can reach the VIP: + +```text +# /bin/ping -q -c3 -w5 172.24.38.239 +PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. +--- 172.24.38.239 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 1999ms +rtt min/avg/max/mdev = 0.023/0.025/0.029/0.006 ms +``` + +You should see no packet loss. + +4. Use the `efm_address del` command to release the address on the primary node and confirm the node has been released with ip address: + +```text +# efm_address del eth0 172.24.38.239/24 +# ip address +eth0 Link encap:Ethernet HWaddr 22:00:0A:89:02:8E +inet addr:10.137.2.142 Bcast:10.137.2.191 +... +``` + +The output from this step should not show an eth0 interface + +5. Repeat step 3, this time verifying that the Standby and Witness do not see the VIP in use: + +```text +# /bin/ping -q -c3 -w5 172.24.38.239 +PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. +--- 172.24.38.239 ping statistics --- +4 packets transmitted, 0 received, +3 errors, 100% packet loss, + time 3000ms +``` + +You should see 100% packet loss. Repeat this step on all nodes. + +6. Repeat step 2 on all Standby nodes to assign the VIP to every node. You can ping the VIP from any node to verify that it is in use. + +```text +# efm_address add4 eth0 172.24.38.239/24 +# ip address + +eth0 Link encap:Ethernet HWaddr 36:AA:A4:F4:1C:40 +inet addr:172.24.38.239 Bcast:172.24.38.255 +... +``` + +After the test steps above, release the VIP from any non-Primary node before attempting to start Failover Manager. + +!!! Note + The network interface used for the VIP does not have to be the same interface used for the Failover Manager agent's `bind.address` value. The primary agent will drop the VIP as needed during a failover, and Failover Manager will verify that the VIP is no longer available before promoting a standby. A failure of the bind address network will lead to primary isolation and failover. + +If the VIP uses a different interface, you may encounter a timing condition where the rest of the cluster checks for a reachable VIP before the primary agent has dropped it. In this case, EFM will retry the VIP check for the number of seconds specified in the `node.timeout` property to help ensure that a failover happens as expected. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx new file mode 100644 index 00000000000..a89e7f687b9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Configuring Failover Manager" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_efm.html" +--- + + + +Configurable Failover Manager properties are specified in two user-modifiable files: + +- [efm.properties](01_cluster_properties/#cluster_properties) +- [efm.nodes](03_cluster_members/#cluster_members) + +
+ +cluster_properties encrypting_database_password cluster_members extending_efm_permissions using_vip_addresses + +
diff --git a/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx b/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx new file mode 100644 index 00000000000..25fb74d736d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx @@ -0,0 +1,318 @@ +--- +title: "Using Failover Manager" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_efm.html" +--- + + + +Failover Manager offers support for monitoring and failover of clusters with one or more Standby servers. You can add or remove nodes from the cluster as your demand for resources grows or shrinks. + +If a primary node reboots, Failover Manager may detect the database is down on the Primary node and promote a Standby node to the role of Primary. If this happens, the Failover Manager agent on the (rebooted) Primary node will not get a chance to write the `recovery.conf` file; the rebooted Primary node will return to the cluster as a second Primary node. To prevent this, start the Failover Manager agent before starting the database server. The agent will start in idle mode, and check to see if there is already a primary in the cluster. If there is a primary node, the agent will verify that a `recovery.conf` or `standby.signal` file exists, and the database will not start as a second primary. + +## Managing a Failover Manager Cluster + +Once configured, a Failover Manager cluster requires no regular maintenance. The following sections provide information about performing the management tasks that may occasionally be required by a Failover Manager Cluster. + +By default, [some of the efm commands](07_using_efm_utility/#using_efm_utility) must be invoked by `efm` or an OS superuser; an administrator can selectively permit users to invoke these commands by adding the user to the `efm` group. The commands are: + +- [efm allow-node](07_using_efm_utility/#efm_allow_node) +- [efm disallow-node](07_using_efm_utility/#efm_disallow_node) +- [efm promote](07_using_efm_utility/#efm_promote) +- [efm resume](07_using_efm_utility/#efm_resume) +- [efm set-priority](07_using_efm_utility/#efm_set_priority) +- [efm stop-cluster](07_using_efm_utility/#efm_stop_cluster) +- [efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf) + + + +### Starting the Failover Manager Cluster + +You can start the nodes of a Failover Manager cluster in any order. + +To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/CentOS 8.x, assume superuser privileges, and invoke the command: + +```text +systemctl start edb-efm-4.0 +``` + +If the cluster properties file for the node specifies that `is.witness` is `true`, the node will start as a Witness node. + +If the node is not a dedicated Witness node, Failover Manager will connect to the local database and invoke the `pg_is_in_recovery()` function. If the server responds `false`, the agent assumes the node is a Primary node, and assigns a virtual IP address to the node (if applicable). If the server responds `true`, the Failover Manager agent assumes that the node is a Standby server. If the server does not respond, the agent will start in an idle state. + +After joining the cluster, the Failover Manager agent checks the supplied database credentials to ensure that it can connect to all of the databases within the cluster. If the agent cannot connect, the agent will shut down. + +If a new primary or standby node joins a cluster, all of the existing nodes will also confirm that they can connect to the database on the new node. + + + +!!! Note + If you are running `/var/lock` or `/var/run` on `tmpfs` (Temporary File System), make sure that the systemd service file for Failover Manager has a dependency on `systemd-tmpfiles-setup.service`. + +### Adding Nodes to a Cluster + +You can add a node to a Failover Manager cluster at any time. When you add a node to a cluster, you must modify the cluster to allow the new node, and then tell the new node how to find the cluster. The following steps detail adding a node to a cluster: + +1. Unless `auto.allow.hosts` is set to `true`, use the `efm allow-node` command, to add the address of the new node to the Failover Manager allowed node host list. When invoking the command, specify the cluster name and the address of the new node: + + ```text + efm allow-node
+ ``` + + For more information about using the `efm allow-node` command or controlling a Failover Manager service, see [Using the EFM Utility](07_using_efm_utility/#efm_allow_node). + + Install a Failover Manager agent and configure the cluster properties file on the new node. For more information about modifying the properties file, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). + +2. Configure the cluster members file on the new node, adding an entry for the Membership Coordinator. For more information about modifying the cluster members file, see [The Cluster Members File](04_configuring_efm/03_cluster_members/#cluster_members). + +3. Assume superuser privileges on the new node, and start the Failover Manager agent. To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/CentOS 8.x, invoke the command: + + ```text + systemctl start edb-efm-4.0 + ``` + +When the new node joins the cluster, Failover Manager will send a notification to the administrator email provided in the `user.email` property, and/or will invoke the specified notification script. + + + +!!! Note + To be a useful Standby for the current node, the node must be a standby in the PostgreSQL Streaming Replication scenario. + +### Changing the Priority of a Standby + +If your Failover Manager cluster includes more than one Standby server, you can use the `efm set-priority` command to influence the promotion priority of a Standby node. Invoke the command on any existing member of the Failover Manager cluster, and specify a priority value after the IP address of the member. + +For example, the following command instructs Failover Manager that the `acctg` cluster member that is monitoring `10.0.1.9` is the primary Standby `(1)`: + +```text +efm set-priority acctg 10.0.1.9 1 +``` + +You can set the priority of a standby to `0` to make the standby non-promotable. Setting the priority of a standby to a value greater than `0` overrides a property value of `promotable=false`. + +For example, if the properties file on node `10.0.1.10` includes a setting of `promotable=false` and you use `efm set-priority` to set the promotion priority of `10.0.1.10` to be the standby used in the event of a failover, the value designated by the `efm set-priority` command will override the value in the property file: + +```text +efm set-priority acctg 10.0.1.10 1 +``` + +In the event of a failover, Failover Manager will first retrieve information from Postgres streaming replication to confirm which Standby node has the most recent data, and promote the node with the least chance of data loss. If two Standby nodes contain equally up-to-date data, the node with a higher user-specified priority value will be promoted to Primary unless [use.replay.tiebreaker](04_configuring_efm/01_cluster_properties/#use_replay_tiebreaker) is set to `false` . To check the priority value of your Standby nodes, use the command: + +```text +efm cluster-status +``` + + + +!!! Note + The promotion priority may change if a node becomes isolated from the cluster, and later re-joins the cluster. + +### Promoting a Failover Manager Node + +You can invoke `efm promote` on any node of a Failover Manager cluster to start a manual promotion of a Standby database to Primary database. + +Manual promotion should only be performed during a maintenance window for your database cluster. If you do not have an up-to-date Standby database available, you will be prompted before continuing. To start a manual promotion, assume the identity of `efm` or the OS superuser, and invoke the command: + +```text +efm promote [-switchover] [-sourcenode
] [-quiet] [-noscripts]` +``` + +Where: + + `` is the name of the Failover Manager cluster. + + Include the `–switchover` option to reconfigure the original Primary as a Standby. If you include the `–switchover` keyword, the cluster must include a primary node and at least one standby, and the nodes must be in sync. + + Include the `–sourcenode` keyword to specify the node from which the recovery settings will be copied to the primary. + + Include the `-quiet` keyword to suppress notifications during switchover. + + Include the `-noscripts` keyword to prevent instruct Failover Manager to not invoke fencing and post-promotion scripts. + +During switchover: + +- For server versions 11 and prior, the `recovery.conf` file is copied from an existing standby to the primary node. For server version 12 and later, the `primary_conninfo` and `restore_command` parameters are copied and stored in memory. +- The primary database is stopped. +- If you are using a VIP, the address is released from the primary node. +- A standby is promoted to replace the primary node, and acquires the VIP. +- The address of the new primary node is added to the `recovery.conf` file or the `primary_conninfo` details are stored in memory. +- If the `application.name` property is set for this node, the application_name property will be added to the `recovery.conf` file or the `primary_conninfo` information will be stored in memory. +- If you are using server version 12 or later, the recovery settings that have been stored in memory are written to the `postgresql.auto.conf` file. A `standby.signal` file is created. +- The old primary is started; the agent will resume monitoring it as a standby. + +During a promotion, the Primary agent releases the virtual IP address. If it is not a switchover, a recovery.conf file is created in the directory specified by the db.data.dir property. The recovery.conf file is used to prevent the old primary database from starting until the file is removed, preventing the node from starting as a second primary in the cluster. If the promotion is part of a switchover, recovery settings are handled as described above. + +The Primary agent remains running, and assumes a status of Idle. + +The Standby agent confirms that the virtual IP address is no longer in use before pinging a well- known address to ensure that the agent is not isolated from the network. The Standby agent runs the fencing script and promotes the Standby database to Primary. The Standby agent then assigns the virtual IP address to the Standby node, and runs the post-promotion script (if applicable). + +Note that this command instructs the service to ignore the value specified in the `auto.failover` parameter in the cluster properties file. + +To return a node to the role of primary, place the node first in the promotion list: + +```text +efm set-priority
+``` + +Then, perform a manual promotion: + +```text +efm promote ‑switchover +``` + +For more information about the efm utility, see [Using the EFM Utility](07_using_efm_utility/#using_efm_utility). + + + +### Stopping a Failover Manager Agent + +When you stop an agent, Failover Manager will remove the node's address from the cluster members list on all of the running nodes of the cluster, but will not remove the address from the Failover Manager Allowed node host list. + +To stop the Failover Manager agent on RHEL/CentOS 7.x or RHEL/CentOS 8.x, assume superuser privileges, and invoke the command: + +```text +systemctl stop edb-efm-4.0 +``` + +Until you invoke the `efm disallow-node` command (removing the node's address of the node from the Allowed node host list), you can use the `service edb-efm-4.0 start` command to restart the node at a later date without first running the `efm allow-node` command again. + + +Note that stopping an agent does not signal the cluster that the agent has failed unless the [primary.shutdown.as.failure](04_configuring_efm/01_cluster_properties/cluster_properties/#primary_shutdown_as_failure) property is set to `true`. + +### Stopping a Failover Manager Cluster + +To stop a Failover Manager cluster, connect to any node of a Failover Manager cluster, assume the identity of `efm` or the OS superuser, and invoke the command: + +```text +efm stop-cluster +``` + +The command will cause *all* Failover Manager agents to exit. Terminating the Failover Manager agents completely disables all failover functionality. + +!!! Note + When you invoke the `efm stop-cluster` command, all authorized node information is lost from the Allowed node host list. + +### Removing a Node from a Cluster + +The `efm disallow-node` command removes the IP address of a node from the Failover Manager Allowed Node host list. Assume the identity of `efm` or the OS superuser on any existing node (that is currently part of the running cluster), and invoke the `efm disallow-node` command, specifying the cluster name and the IP address of the node: + +```text +efm disallow-node
+``` + +The `efm disallow-node` command will not stop a running agent; the service will continue to run on the node until you [stop the agent](#stop_efm_agent). If the agent or cluster is subsequently stopped, the node will not be allowed to rejoin the cluster, and will be removed from the failover priority list (and will be ineligible for promotion). + +After invoking the `efm disallow-node` command, you must use the [efm allow-node](07_using_efm_utility/#efm_allow_node) command to add the node to the cluster again. + + + +## Running Multiple Agents on a Single Node + +You can monitor multiple database clusters that reside on the same host by running multiple Primary or Standby agents on that Failover Manager node. You may also run multiple Witness agents on a single node. To configure Failover Manager to monitor more than one database cluster, while ensuring that Failover Manager agents from different clusters do not interfere with each other, you must: + +1. Create a cluster properties file for each member of each cluster that defines a unique set of properties and the role of the node within the cluster. +2. Create a cluster members file for each member of each cluster that lists the members of the cluster. +3. Customize the unit file (on a RHEL/CentOS 7.x or RHEL/CentOS 8.x system) for each cluster to specify the names of the cluster properties and the cluster members files. +4. Start the services for each cluster. + +The examples that follow uses two database clusters (acctg and sales) running on the same node: + +- Data for `acctg` resides in `/opt/pgdata1`; its server is monitoring port `5444`. +- Data for `sales` resides in `/opt/pgdata2`; its server is monitoring port `5445`. + +To run a Failover Manager agent for both of these database clusters, use the `efm.properties.in` template to create two properties files. Each cluster properties file must have a unique name. For this example, we create `acctg.properties` and `sales.properties` to match the `acctg` and `sales` database clusters. + +The following parameters must be unique in each cluster properties file: + + `admin.port` + + `bind.address` + + `db.port` + + `db.data.dir` + + `virtual.ip` (if used) + + `virtual.ip.interface` (if used) + +Within each cluster properties file, the `db.port` parameter should specify a unique value for each cluster, while the `db.user` and `db.database` parameter may have the same value or a unique value. For example, the `acctg.properties` file may specify: + + `db.user=efm_user` + + `db.password.encrypted=7c801b32a05c0c5cb2ad4ffbda5e8f9a` + + `db.port=5444` + + `db.database=acctg_db` + +While the `sales.properties` file may specify: + + `db.user=efm_user` + + `db.password.encrypted=e003fea651a8b4a80fb248a22b36f334` + + `db.port=5445` + + `db.database=sales_db` + +Some parameters require special attention when setting up more than one Failover Manager cluster agent on the same node. If multiple agents reside on the same node, each port must be unique. Any two ports will work, but it may be easier to keep the information clear if using ports that are not too close to each other. + +When creating the cluster properties file for each cluster, the `db.data.dir` parameters must also specify values that are unique for each respective database cluster. + +The following parameters are used when assigning the virtual IP address to a node. If your Failover Manager cluster does not use a virtual IP address, leave these parameters blank. + + `virtual.ip` + + `virtual.ip.interface` + + `virtual.ip.prefix` + +This parameter value is determined by the virtual IP addresses being used and may or may not be the same for both `acctg.properties` and `sales.properties`. + +After creating the `acctg.properties` and `sales.properties` files, create a service script or unit file for each cluster that points to the respective property files; this step is platform specific. If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, see [RHEL/CentOS 7.x or RHEL/CentOS 8.x](#rhelcentos-7x-or-rhelcentos-8x). + +!!! Note + If you are using a unit file, you must manually update the file to reflect the new service name when you upgrade Failover Manager. + +### RHEL/CentOS 7.x or RHEL/CentOS 8.x + +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, you should copy the `edb-efm-4.0` unit file to new file with a name that is unique for each cluster. For example, if you have two clusters (named acctg and sales), the unit file names might be: + +```text +/usr/lib/systemd/system/efm-acctg.service + +/usr/lib/systemd/system/efm-sales.service +``` + +Then, edit the `CLUSTER` variable within each unit file, changing the specified cluster name from `efm` to the new cluster name. For example, for a cluster named `acctg`, the value would specify: + +```text +Environment=CLUSTER=acctg +``` + +You must also update the value of the `PIDfile` parameter to specify the new cluster name. For example: + +```text +PIDFile=/var/run/efm-4.0/acctg.pid +``` + +After copying the service scripts, use the following commands to enable the services: + +```text +# systemctl enable efm-acctg.service + +# systemctl enable efm-sales.service +``` + +Then, use the new service scripts to start the agents. For example, you can start the `acctg` agent with the command: + +```text +# systemctl start efm-acctg` +``` + +For information about customizing a unit file, please visit: + + diff --git a/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx b/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx new file mode 100644 index 00000000000..fe731b8548d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx @@ -0,0 +1,142 @@ +--- +title: "Monitoring a Failover Manager Cluster" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/monitoring_efm_cluster.html" +--- + + + +You can use either the Failover Manager `efm cluster-status` command or the PEM Client graphical interface to check the current status of a monitored node of a Failover Manager cluster. + +## Reviewing the Cluster Status Report + +The efm cluster-status [cluster properties file](07_using_efm_utility/#efm_cluster_status) command returns a report that contains information about the status of the Failover Manager cluster. To invoke the command, enter: + +```text +# efm cluster-status +``` + +The following status report is for a cluster named edb that has three nodes running: + +```text +Agent Type Address Agent DB VIP +----------------------------------------------------------------------- +Standby 172.19.10.2 UP UP 192.168.225.190 +Standby 172.19.12.163 UP UP 192.168.225.190 +Primary 172.19.14.9 UP UP 192.168.225.190* + + +Allowed node host list: +172.19.14.9 172.19.12.163 172.19.10.2 + + +Membership coordinator: 172.19.14.9 + + +Standby priority host list: +172.19.12.163 172.19.10.2 + +Promote Status: + +DB Type Address WAL Received LSN WAL Replayed LSN Info +-------------------------------------------------------------------- +Primary 172.19.14.9 0/4000638 +Standby 172.19.12.163 0/4000638 0/4000638 +Standby 172.19.10.2 0/4000638 0/4000638 + + +Standby database(s) in sync with primary. It is safe to promote. +``` + +The cluster status section provides an overview of the status of the agents that reside on each node of the cluster: + +```text +Agent Type Address Agent DB VIP +----------------------------------------------------------------------- +Standby 172.19.10.2 UP UP 192.168.225.190 +Standby 172.19.12.163 UP UP 192.168.225.190 +Primary 172.19.14.9 UP UP 192.168.225.190* +``` + +The asterisk (\*) after the VIP address indicates that the address is available for connections. If a VIP address is not followed by an asterisk, the address has been associated with the node (in the properties file), but the address is not currently in use. + +Failover Manager agents provide the information displayed in the Cluster Status section. + +The `Allowed node host list` and `Standby priority host list` provide an easy way to tell which nodes are allowed to join the cluster, and the promotion order of the nodes. The IP address of the Membership coordinator is also displayed in the report: + +```text +Allowed node host list: +172.19.14.9 172.19.12.163 172.19.10.2 +Membership coordinator: 172.19.14.9 +Standby priority host list: +172.19.12.163 172.19.10.2 +``` + +The `Promote Status` section of the report is the result of a direct query from the node on which you are invoking the cluster-status command to each database in the cluster; the query also returns the transaction log location of each database. Because the queries to each database return at different points in time, the LSNs may not match even if streaming replication is working normally for the cluster. + +```text +Promote Status: + +DB Type Address WAL Received LSN WAL Replayed LSN Info +------------------------------------------------------------------- +Primary 172.19.14.9 0/4000638 +Standby 172.19.12.163 0/4000638 0/4000638 +Standby 172.19.10.2 0/4000638 0/4000638 +``` + +If a database is down (or if the database has been restarted, but the resume command has not yet been invoked), the state of the agent that resides on that host will be Idle. If an agent is idle, the cluster status report will include a summary of the condition of the idle node. For example: + +```text +Agent Type Address Agent DB VIP +----------------------------------------------------- +Idle 172.19.18.105 UP UP 172.19.13.105 +``` + +**Exit Codes** + +The cluster status process returns an exit code that is based on the state of the cluster: + +- An exit code of `0` indicates that all agents are running, and the databases on the Primary and Standby nodes are running and in sync. + +- A non-zero exit code indicates that there is a problem. The following problems can trigger a non-zero exit code: + + A database is down or unknown (or has an idle agent). + + Failover Manager cannot decrypt the provided database password. + + There is a problem contacting the databases to get WAL locations. + + There is no Primary agent. + + There are no Standby agents. + + One or more Standby nodes are not in sync with the Primary. + +## Monitoring Streaming Replication with Postgres Enterprise Manager + +If you use Postgres Enterprise Manager (PEM) to monitor your servers, you can configure the Streaming Replication Analysis dashboard (part of the PEM graphical interface) to display the state of a Primary or Standby node that is part of a Streaming Replication scenario. + +![The Streaming Replication dashboard (Primary node)](images/str_replication_dashboard_master.png) + +The Streaming Replication Analysis Dashboard displays statistical information about activity for any monitored server on which streaming replication is enabled. The dashboard header identifies the status of the monitored server (either Replication Primary or Replication Slave), and displays the date and time that the server was last started, the date and time that the page was last updated, and a current count of triggered alerts for the server. + +When reviewing the dashboard for a Replication Slave (a Standby node), a label at the bottom of the dashboard confirms the status of the server. + +![The Streaming Replication dashboard (Standby node)](images/str_replication_dashboard_standby.png) + +By default, the PEM replication probes that provide information for the Streaming Replication Analysis dashboard are disabled. + +To view the Streaming Replication Analysis dashboard for the Primary node of a replication scenario, you must enable the following probes: + +- Streaming Replication +- WAL Archive Status + +To view the Streaming Replication Analysis dashboard for the Standby node of a replication scenario, you must enable the following probes: + +- Streaming Replication Lag Time + +For more information about PEM, please visit the EnterpriseDB website at: + + diff --git a/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx b/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx new file mode 100644 index 00000000000..6db8691d3e0 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx @@ -0,0 +1,213 @@ +--- +title: "Using the efm Utility" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_efm_utility.html" +--- + + + +Failover Manager provides the efm utility to assist with cluster management. The RPM installer adds the utility to the `/usr/edb/efm-4.0/bin` directory when you install Failover Manager. + +**efm allow-node** + + + +```text +efm allow-node +``` + +Invoke the `efm allow-node` command to allow the specified node to join the cluster. When invoking the command, provide the name of the cluster and the IP address of the joining node. + +This command must be invoked by `efm`, a member of the `efm` group, or root. + +**efm disallow-node** + + + +```text +efm disallow-node
+``` + +Invoke the `efm disallow-node` command to remove the specified node from the allowed hosts list, and prevent the node from joining a cluster. Provide the name of the cluster and the address of the node when calling the `efm disallow-node` command. This command must be invoked by `efm`, a member of the `efm` group, or root. + +**efm cluster-status** + + + +```text +efm cluster-status +``` + +Invoke the `efm cluster-status` command to display the status of a Failover Manager cluster. For more information about the status report, see [Monitoring a Failover Manager Cluster](06_monitoring_efm_cluster/#monitoring_efm_cluster). + +**efm cluster-status-json** + + + +```text +efm cluster-status-json +``` + +Invoke the `efm cluster-status-json` command to display the status of a Failover Manager cluster in json format. While the format of the displayed information is different than the display generated by the efm cluster-status command, the information source is the same. + +The following example is generated by querying the status of a healthy cluster with three nodes: + +```text +{ + "nodes": { + "172.16.144.176": { + "type": "Witness", + "agent": "UP", + "db": "N\/A", + "vip": "", + "vip_active": false + }, + "172.16.144.177": { + "type": "Primary", + "agent": "UP", + "db": "UP", + "vip": "", + "vip_active : false" + "xlogReceive : 0/14001478" + "xlog : 0/14001478" + "xloginfo :" + }, + "172.16.144.180": { + "type": "Standby", + "agent": "UP", + "db": "UP", + "vip": "", + "vip_active : false" + "xlogReceive : 0/14001478" + "xlog : 0/14001478" + "xloginfo :" + } + }, + "allowednodes": [ + "172.16.144.177", + "172.16.144.160", + "172.16.144.180", + "172.16.144.176" + ], + "membershipcoordinator": "172.16.144.177", + "failoverpriority": [ + "172.16.144.180" + ], + "minimumstandbys": 0, + "missingnodes": [], + "messages": [] +} +``` + +**efm encrypt** + + + +```text +efm encrypt [--from-env] +``` + +Invoke the `efm encrypt` command to encrypt the database password before include the password in the cluster properties file. Include the `--from-env` option to instruct Failover Manager to use the value specified in the `EFMPASS` environment variable, and execute without user input. For more information, see [Encrypting Your Database Password](04_configuring_efm/01_cluster_properties/01_encrypting_database_password/#encrypting_database_password). + +**efm promote** + + + +```text +efm promote cluster_name [-switchover [-sourcenode
][-quiet][-noscripts] +``` + +The `efm promote` command instructs Failover Manager to perform a manual failover of standby to primary. + +Manual promotion should only be attempted if the status command reports that the cluster includes a Standby node that is up-to-date with the Primary. If there is no up-to-date Standby, Failover Manager will prompt you before continuing. + +Include the `–switchover` clause to promote a standby node, and reconfigure a primary node as a standby node. Include the `-sourcenode` keyword, and specify a node address to indicate the node whose recovery settings will be copied to the old primary node (making it a standby). Include the `-quiet` keyword to suppress notifications during the switchover process. Include the `-noscripts` keyword to instruct Failover Manager to not invoke fencing or post-promotion scripts. + +This command must be invoked by `efm`, a member of the `efm` group, or root. + +!!! Note + This command instructs the service to ignore the value specified in the `auto.failover` parameter in the cluster properties file. + +**efm resume** + + + +```text +efm resume +``` + +Invoke the `efm resume` command to resume monitoring a previously stopped database. This command must be invoked by efm, a member of the efm group, or root. + +**efm set-priority** + + + +```text +efm set-priority
+``` + +Invoke the `efm set-priority` command to assign a failover priority to a standby node. The value specifies the order in which the node will be used in the event of a failover. This command must be invoked by `efm`, a member of the `efm` group, or root. + +Use the priority option to specify the place for the node in the priority list. For example, specify a value of 1 to indicate that the node is the primary standby, and will be the first node promoted in the event of a failover. A priority value of 0 instructs Failover Manager to not promote the standby. + +**efm stop-cluster** + + + +```text +efm stop-cluster +``` + +Invoke the `efm stop-cluster` command to stop Failover Manager on all nodes. This command instructs Failover Manager to connect to each node on the cluster and instruct the existing members to shut down. The command has no effect on running databases, but when the command completes, there is no failover protection in place. + +!!! Note + When you invoke the `efm stop-cluster` command, all authorized node information is removed from the Allowed node host list. + +This command must be invoked by `efm`, a member of the `efm` group, or root. + +**efm upgrade-conf** + + + +```text +efm upgrade-conf [-source ] +``` + +Invoke the `efm upgrade-conf` command to copy the configuration files from an existing Failover Manager installation, and add parameters required by a Failover Manager installation. Provide the name of the previous cluster when invoking the utility. This command must be invoked with root privileges. + +If you are upgrading from a Failover Manager configuration that does not use sudo, include the `-source` flag and specify the name of the *directory* in which the configuration files reside when invoking upgrade-conf. + +**efm node-status-json** + + + +```text +efm node-status-json +``` + +Invoke the `efm node-status-json` command to display the status of a local node in json format. A successful execution of this command returns `0` as its exit code. In case of a database failure or an agent status becoming IDLE, the command returns `1` as exit code. + +The following is an example output of the `efm node-status-json` command: + +> ```text +> { +> "type":"Standby", +> "address":"172.16.144.130", +> "agent":"UP", +> "db":"UP", +> "vip":"", +> "vip_active":"false" +> } +> ``` + +**efm --help** + + + +```text +efm --help +``` + +Invoke the `efm --help` command to display online help for the Failover Manager utility commands. diff --git a/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx b/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx new file mode 100644 index 00000000000..4ab2f2aa6ff --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx @@ -0,0 +1,55 @@ +--- +title: "Controlling the Failover Manager Service" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/controlling_efm_service.html" +--- + + + +Each node in a Failover Manager cluster hosts a Failover Manager agent that is controlled by a service script. By default, the service script expects to find: + +- A configuration file named `efm.properties` that contains the properties used by the Failover Manager service. Each node of a replication scenario must contain a properties file that provides information about the node. +- A cluster members file named `efm.nodes` that contains a list of the cluster members. Each node of a replication scenario must contain a cluster members list. + +Note that if you are running multiple clusters on a single node you will need to manually create configuration files with cluster-specific names and modify the service script for the corresponding clusters. + +The commands that control the Failover Manager service are platform-specific. + + + +## Using the systemctl Utility on RHEL/CentOS 7.x and RHEL/CentOS 8.x + +On RHEL/CentOS 7.x and RHEL/CentOS 8.x, Failover Manager runs as a Linux service named (by default) `edb-efm-4.0.service` that is located in `/usr/lib/systemd/system`. Each database cluster monitored by Failover Manager will run a copy of the service on each node of the replication cluster. + +Use the following systemctl commands to control a Failover Manager agent that resides on a RHEL/CentOS 7.x and RHEL/CentOS 8.x host: + +```text +systemctl start edb-efm-4.0 +``` + +The start command starts the Failover Manager agent on the current node. The local Failover Manager agent monitors the local database and communicates with Failover Manager on the other nodes. You can start the nodes in a Failover Manager cluster in any order. This command must be invoked by root. + +```text +systemctl stop edb-efm-4.0 +``` + +Stop the Failover Manager on the current node. This command must be invoked by root. + +```text +systemctl status edb-efm-4.0 +``` + +The status command returns the status of the Failover Manager agent on which it is invoked. You can invoke the status command on any node to instruct Failover Manager to return status and server startup information. + +```text +[root@ONE ~]}> systemctl status edb-efm-4.0 + edb-efm-4.0.service - EnterpriseDB Failover Manager 4.0 + Loaded: loaded (/usr/lib/systemd/system/edb-efm-4.0.service; disabled; vendor preset: disabled) + Active: active (running) since Wed 2013-02-14 14:02:16 EST; 4s ago + Process: 58125 ExecStart=/bin/bash -c /usr/edb/edb-efm-4.0/bin/runefm.sh start ${CLUSTER} (code=exited, status=0/SUCCESS) + Main PID: 58180 (java) + CGroup: /system.slice/edb-efm-4.0.service + └─58180 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-0.b14.el7_4.x86_64/jre/bin/java -cp /usr/edb/edb-efm-4.0/lib/EFM-4.0.0.jar -Xmx128m... +``` diff --git a/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx new file mode 100644 index 00000000000..af66915f9c0 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx @@ -0,0 +1,90 @@ +--- +title: "Controlling Logging" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/controlling_logging.html" +--- + + + +Failover Manager writes and stores one log file per agent and one startup log per agent in `/var/log/-4.0` (where `` specifies the name of the cluster). + +You can control the level of detail written to the agent log by modifying the `jgroups.loglevel` and `efm.loglevel` parameters in the [cluster properties file](04_configuring_efm/01_cluster_properties/#loglevel): + +```text +# Logging levels for JGroups and EFM. +# Valid values are: TRACE, DEBUG, INFO, WARN, ERROR +# Default value: INFO +# It is not necessary to increase these values unless debugging a +# specific issue. If nodes are not discovering each other at +# startup, increasing the jgroups level to DEBUG will show +# information about the TCP connection attempts that may help +# diagnose the connection failures. +jgroups.loglevel=INFO +efm.loglevel=INFO +``` + +The logging facilities use the Java logging library and logging levels. The log levels (in order from most logging output to least) are: + +> - `TRACE` +> - `DEBUG` +> - `INFO` +> - `WARN` +> - `ERROR` + +For example, if you set the `efm.loglevel` parameter to `WARN`, Failover Manager will only log messages at the `WARN` level and above (`WARN` and `ERROR`). + +By default, Failover Manager log files are rotated daily, compressed, and stored for a week. You can modify the file rotation schedule by changing settings in the log rotation file (`/etc/logrotate.d/efm-4.0`). For more information about modifying the log rotation schedule, consult the logrotate man page: + +> `$ man logrotate` + + + +## Enabling syslog Log File Entries + +Failover Manager supports syslog logging. To implement syslog logging, you must configure syslog to allow UDP or TCP connections. + +To allow a connection to syslog, edit the `/etc/rsyslog.conf` file and uncomment the protocol you wish to use. You must also ensure that the `UDPServerRun` or `TCPServerRun` entry associated with the protocol includes the port number to which log entries will be sent. For example, the following configuration file entries enable UDP connections to port 514: + +```text +# Provides UDP syslog reception +$ModLoad imudp +$UDPServerRun 514 +``` + +The following configuration file entries enable TCP connections to port 514: + +```text +# Provides TCP syslog reception +$ModLoad imtcp +$InputTCPServerRun 514 +``` + +After modifying the syslog configuration file, restart the `rsyslog` service to enable the connections: + +> `systemctl restart rsyslog.service` + +After modifying the `rsyslog.conf` file on the Failover Manager host, you must modify the Failover Manager properties to enable logging. Use your choice of editor to [modify the properties file](04_configuring_efm/01_cluster_properties/#logtype_enabled) (`/etc/edb/efm-4.1/efm.properties.in`) specifying the type of logging that you wish to implement: + +```text +# Which logging is enabled. +file.log.enabled=true +syslog.enabled=false +``` + +You must also [specify syslog details](04_configuring_efm/01_cluster_properties/#syslog_logging) for your system. Use the `syslog.protocol` parameter to specify the protocol type (UDP or TCP) and the `syslog.port` parameter to specify the listener port of the syslog host. The `syslog.facility` value may be used as an identifier for the process that created the entry; the value must be between `LOCAL0` and `LOCAL7`. + +```text +# Syslog information. The syslog service must be listening # on the + port for the given protocol, which can be UDP or +# TCP. The facilities supported are LOCAL0 through LOCAL7. +# syslog.host=localhost +syslog.port=514 +syslog.protocol=UDP +syslog.facility=LOCAL1 +``` + +For more information about syslog, please see the syslog man page: + +> `syslog man` diff --git a/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx b/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx new file mode 100644 index 00000000000..3087ea24575 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx @@ -0,0 +1,157 @@ +--- +title: "Notifications" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/notifications.html" +--- + + + +Failover Manager will send e-mail notifications and/or invoke a notification script when a notable event occurs that affects the cluster. If you have configured Failover Manager to send an email notification, you must have an SMTP server running on port 25 on each node of the cluster. Use the following parameters to configure notification behavior for Failover Manager: + +```text +user.email +script.notification +from.email +``` + +For more information about editing the configuration properties, see [Specifying Cluster Properties](04_configuring_efm/01_cluster_properties/#cluster_properties). + +The body of the notification contains details about the event that triggered the notification, and about the current state of the cluster. For example: + +```text +EFM node: 10.0.1.11 +Cluster name: acctg +Database name: postgres +VIP: ip_address (Active|Inactive) +Database health is not being monitored. +``` + +The VIP field displays the IP address and state of the virtual IP if implemented for the node. + +Failover Manager assigns a severity level to each notification. The following levels indicate increasing levels of attention required: + +- `INFO` indicates an informational message about the agent and does not require any manual intervention (for example, Failover Manager has started or stopped). See [List of INFO level notifications](#notifications_info) +- `WARNING` indicates that an event has happened that requires the administrator to check on the system (for example, failover has occurred). See [List of WARNING level notifications](#notifications_warning) +- `SEVERE` indicates that a serious event has happened and requires the immediate attention of the administrator (for example, failover was attempted, but was unable to complete). See [List of SEVERE level notifications](#notifications_severe) + +The severity level designates the urgency of the notification. A notification with a severity level of `SEVERE` requires user attention immediately, while a notification with a severity level of `INFO` will call your attention to operational information about your cluster that does not require user action. Notification severity levels are not related to logging levels; all notifications are sent regardless of the log level detail specified in the configuration file. + +You can use the [notification.level](04_configuring_efm/01_cluster_properties/#notification_level) property to specify the minimum severity level that will trigger a notification. + +!!! Note + In addition to sending notices to the administrative email address, all notifications are recorded in the cluster log file (`/var/log/efm-4.0/.log`). + +The conditions listed in the table below will trigger an `INFO` level notification: + + + +| Subject | Description | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Executed fencing script | Executed fencing script script_name Results: script_results | +| Executed post-promotion script | Executed post-promotion script script_name Results: script_results | +| Executed remote pre-promotion script | Executed remote pre-promotion script script_name Results: script_results | +| Executed remote post-promotion script | Executed remote post-promotion script script_name Results: script_results | +| Executed post-database failure script | Executed post-database failure script script_name Results: script_results | +| Executed primary isolation script | Executed primary isolation script script_name Results: script_results | +| Witness agent running on node_address for cluster cluster_name | Witness agent is running. | +| Primary agent running on node_address for cluster cluster_name | Primary agent is running and database health is being monitored. | +| Standby agent running on node_address for cluster cluster_name | Standby agent is running and database health is being monitored. | +| Idle agent running on node node_address for cluster cluster_name | Idle agent is running. After starting the local database, the agent can be resumed. | +| Assigning VIP to node node_address | Assigning VIP VIP_address to node node_address Results: script_results | +| Releasing VIP from node node_address | Releasing VIP VIP_address from node node_address Results: script_results | +| Starting auto resume check for cluster cluster_name | The agent on this node will check every auto.resume.period seconds to see if it can resume monitoring the failed database. The cluster should be checked during this time and the agent stopped if the database will not be started again. See the agent log for more details. | +| Executed agent resumed script | Executed agent resumed script script_name Results: script_results | +| WAL logs backed up during promotion | When reconfiguring this standby to follow the new primary, the pg_xlog or pg_wal contents were backed up in the pgdata directory. This backup should be removed when convenient to free up disk space. | + +The conditions listed in the table below will trigger a *WARNING* level notification: + + + +| Subject | Description | +| ------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Witness agent exited on node_address for cluster cluster_name | Witness agent has exited. | +| Primary agent exited on node_address for cluster cluster_name | Database health is not being monitored. | +| Cluster cluster_name notified that primary node has left | Failover is disabled for the cluster until the primary agent is restarted. | +| Standby agent exited on node_address for cluster cluster_name | Database health is not being monitored. | +| Agent exited during promotion on node_address for cluster cluster_name | Database health is not being monitored. | +| Agent exited on node_address for cluster cluster_name | The agent has exited. This is generated by an agent in the Idle state. | +| Agent exited for cluster cluster_name | The agent has exited. This notification is usually generated during startup when an agent exits before startup has completed. | +| Virtual IP address assigned to non-primary node | The virtual IP address appears to be assigned to a non-primary node. To avoid any conflicts, Failover Manager will release the VIP. You should confirm that the VIP is assigned to your primary node and manually reassign the address if it is not. | +| Virtual IP address not assigned to primary node. | The virtual IP address appears to not be assigned to a primary node. EDB Postgres Failover Manager will attempt to reacquire the VIP. | +| No standby agent in cluster for cluster cluster_name | The standbys on cluster_name have left the cluster. | +| Standby agent failed for cluster cluster_name | A standby agent on cluster_name has left the cluster, but the coordinator has detected that the standby database is still running. | +| Standby database failed for cluster cluster_name | A standby agent has signaled that its database has failed. The other nodes also cannot reach the standby database. | +| Standby agent cannot reach database for cluster cluster_name | A standby agent has signaled database failure, but the other nodes have detected that the standby database is still running. | +| Cluster cluster_name has dropped below three nodes | At least three nodes are required for full failover protection. Please add witness or agent node to the cluster. | +| Subset of cluster cluster_name disconnected from primary | This node is no longer connected to the majority of the cluster cluster_name. Because this node is part of a subset of the cluster, failover will not be attempted. Current nodes that are visible are: node_address | +| Promotion has started on cluster cluster_name. | The promotion of a standby has started on cluster cluster_name. | +| Witness failure for cluster cluster_name | Witness running at node_address has left the cluster. | +| Idle agent failure for cluster cluster_name. | Idle agent running at node_address has left the cluster. | +| One or more nodes isolated from network for cluster cluster_name | This node appears to be isolated from the network. Other members seen in the cluster are: node_name | +| Node no longer isolated from network for cluster cluster_name. | This node is no longer isolated from the network. | +| Standby agent tried to promote, but primary DB is still running | The standby EFM agent tried to promote itself, but detected that the primary DB is still running on node_address. This usually indicates that the primary EFM agent has exited. Failover has NOT occurred. | +| Standby agent started to promote, but primary has rejoined. | The standby EFM agent started to promote itself, but found that a primary agent has rejoined the cluster. Failover has NOT occurred. | +| Standby agent tried to promote, but could not verify primary DB | The standby EFM agent tried to promote itself, but could not detect whether or not the primary DB is still running on node_address. Failover has NOT occurred. | +| Standby agent tried to promote, but VIP appears to still be assigned | The standby EFM agent tried to promote itself, but could not because the virtual IP address (VIP_address) appears to still be assigned to another node. Promoting under these circumstances could cause data corruption. Failover has NOT occurred. | +| Standby agent tried to promote, but appears to be orphaned | The standby EFM agent tried to promote itself, but could not because the well-known server (server_address) could not be reached. This usually indicates a network issue that has separated the standby agent from the other agents. Failover has NOT occurred. | +| Potential manual failover required on cluster cluster_name. | A potential failover situation was detected for cluster cluster_name. Automatic failover has been disabled for this cluster, so manual intervention is required. | +| Failover has completed on cluster cluster_name | Failover has completed on cluster cluster_name. | +| Lock file for cluster cluster_name has been removed | The lock file for cluster cluster_name has been removed from: path_name on node node_address. This lock prevents multiple agents from monitoring the same cluster on the same node. Please restore this file to prevent accidentally starting another agent for cluster. | +| A recovery file for cluster cluster_name has been found on primary node | A recovery file for cluster cluster_name has been found at: path_name on primary node node_address. This may be problematic should you attempt to restart the DB on this node. | +| recovery_target_timeline is not set to latest in recovery settings | The recovery_target_timeline parameter is not set to latest in the recovery settings. The standby server will not be able to follow a timeline change that occurs when a new primary is promoted. | +| Promotion has not occurred for cluster cluster_name | A promotion was attempted but there is already a node being promoted: ip_address. | +| Standby not reconfigured after failover in cluster cluster_name | The auto.reconfigure property has been set to false for this node. The node has not been reconfigured to follow the new primary node after a failover. | +| Could not resume replay for standby standby_id. | Could not resume replay for standby. Manual intervention may be required. Error: error_message. | +| Possible problem with database timeout values | Your remote.timeout value (value) is higher than your local.timeout value (value). If the local database takes too long to respond, the local agent could assume that the database has failed though other agents can connect. While this will not cause a failover, it could force the local agent to stop monitoring, leaving you without failover protection. | +| No standbys available for promotion in cluster cluster_name | The current number of standby nodes in the cluster has dropped to the minimum number: number. There cannot be a failover unless another standby node(s) is added or made promotable. | +| No promotable standby for cluster cluster_name | The current failover priority list in the cluster is empty. You have removed the only promotable standby for the cluster cluster_name. There cannot be a failover unless another promotable standby node(s) is added or made promotable by adding to failover priority list. | +| Synchronous replication has been reconfigured for cluster cluster_name | The number of synchronous standby nodes in the cluster has dropped below number. The synchronous standby names on primary has been reconfigured to: new synchronous_standby_names value. | +| Synchronous replication has been disabled for cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped below count. The primary has been taken out of synchronous replication mode. | +| Could not reload database configuration. | Could not reload database configuration. Manual intervention is required. Error: error_message. | +| Custom monitor timeout for cluster cluster_name | The following custom monitoring script has timed out: script_name | +| Custom monitor 'safe mode' failure for cluster cluster_name | The following custom monitor script has failed, but is being run in "safe mode": script_name. Output: script_results | +| primary.shutdown.as.failure set to true for primary node | The primary.shutdown.as.failure property has been set to true for this cluster. Stopping the primary agent without stopping the entire cluster will be treated by the rest of the cluster as an immediate primary agent failure. If maintenance is required on the primary database, shut down the primary agent and wait for a notification from the remaining nodes that failover will not happen. | +| Primary cannot ping local database for cluster cluster_name | The Primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will become IDLE and attempt to resume monitoring the database. | + + + + +The conditions listed in the table below will trigger a *SEVERE* notification: + +| Subject | Description | +| -------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Standby database restarted but EFM cannot connect | The start or restart command for the database ran successfully but the database is not accepting connections. EFM will keep trying to connect for up to restart.connection.timeout seconds. | +| Unable to connect to DB on node_address | The maximum connections limit has been reached. | +| Unable to connect to DB on node_address | Invalid password for db.user=user_name. | +| Unable to connect to DB on node_address | Invalid authorization specification. | +| Primary cannot resume monitoring local database for cluster cluster_name | The primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will not release the VIP and/or create a recovery.conf file. The primary agent will remain IDLE until the resume command is run to resume monitoring the database. | +| Fencing script error | Fencing script script_name failed to execute successfully. Exit Value: exit_code Results: script_results Failover has NOT occurred. | +| Post-promotion script failed | Post-promotion script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Remote post-promotion script failed | Remote post-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | +| Remote pre-promotion script failed | Remote pre-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | +| Post-database failure script error | Post-database failure script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Agent resumed script error | Agent resumed script script_name failed to execute successfully. Results: script_results | +| Primary isolation script failed | Primary isolation script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Could not promote standby | The promote command failed on node. Could not promote standby. Error details: error_details | +| Error creating recovery.conf file on node_address for cluster cluster_name | There was an error creating the recovery.conf file on primary node node_address during promotion. Promotion has continued, but requires manual intervention to ensure that the old primary node can not be restarted. Error details: message_details | +| An unexpected error has occurred for cluster cluster_name | An unexpected error has occurred on this node. Please check the agent log for more information. Error: error_details | +| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. The cluster is telling the primary agent at ip_address to fence off the primary database to prevent two primarys when the rest of the failover manager cluster promotes a standby. | +| Isolated primary database shutdown. | The isolated primary database has been shutdown by failover manager. | +| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. Before the primary could finish detecting isolation, a standby was promoted and has rejoined this node in the cluster. This node is isolating itself to avoid more than one primary database. | +| Could not assign VIP to node node_address | Failover manager could not assign the VIP address for some reason. | +| primary_or_standby database failure for cluster cluster_name | The database has failed on the specified node. | +| Agent is timing out for cluster cluster_name | This agent has timed out trying to reach the local database. After the timeout, the agent could successfully ping the database and has resumed monitoring. However, the node should be checked to make sure it is performing normally to prevent a possible database or agent failure. | +| Resume timed out for cluster cluster_name | This agent could not resume monitoring after reconfiguring and restarting the local database. See agent log for details. | +| Internal state mismatch for cluster cluster_name | The failover manager cluster's internal state did not match the actual state of the cluster members. This is rare and can be caused by a timing issue of nodes joining the cluster and/or changing their state. The problem should be resolved, but you should check the cluster status as well to verify. Details of the mismatch can be found in the agent log file. | +| Failover has not occurred | An agent has detected that the primary database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | +| Database in wrong state on node_address | The standby agent has detected that the local database is no longer in recovery. The agent will now become idle. Manual intervention is required. | +| Database in wrong state on node_address | The primary agent has detected that the local database is in recovery. The agent will now become idle. Manual intervention is required. | +| Database connection failure for cluster cluster_name | This node is unable to connect to the database running on: node_addressUntil this is fixed, failover may not work properly because this node will not be able to check if the database is running or not. | +| Standby custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a standby node. The agent will stop monitoring the local database. Script location: script_name Script output: script_results | +| Primary custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a primary node. EFM will attempt to promote a standby. Script location: script_name Script output: script_results | +| Loopback address set for ping.server.ip | Loopback address is set for ping.server.ip property. This setting can interfere with the network isolation detection and hence it should be changed. | +| Load balancer attach script error | Load balancer attach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Load balancer detach script error | Load balancer detach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Not enough synchronous standbys available in cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped to count. All write queries on the primary will be blocked until enough synchronous standby nodes are added. | diff --git a/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx new file mode 100644 index 00000000000..f225d41f915 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx @@ -0,0 +1,117 @@ +--- +title: "Supported Failover and Failure Scenarios" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/supported_scenarios.html" +--- + + + +Failover Manager monitors a cluster for failures that may or may not result in failover. + +Failover Manager supports a very specific and limited set of failover scenarios. Failover can occur: + +- if the Primary database crashes or is shutdown. +- if the node hosting the Primary database crashes or becomes unreachable. + +Failover Manager makes every attempt to verify the accuracy of these conditions. If agents cannot confirm that the Primary database or node has failed, Failover Manager will not perform any failover actions on the cluster. + +Failover Manager also supports a *no* *auto*-*failover* mode for situations where you want Failover Manager to monitor and detect failover conditions, but not perform an automatic failover to a Standby. In this mode, a notification is sent to the administrator when failover conditions are met. To disable automatic failover, modify the cluster properties file, setting the [auto.failover](04_configuring_efm/01_cluster_properties/#auto_failover) parameter to false. + +Failover Manager will alert an administrator to situations that require administrator intervention, but that do not merit promoting a Standby database to Primary. + + + +## Primary Database is Down + +If the agent running on the Primary database node detects a failure of the Primary database, Failover Manager begins the process of confirming the failure. + +![Confirming the Failure of the Primary Database.](images/supported_scenarios_master_db_down.png) + +If the agent on the Primary node detects that the Primary database has failed, all agents attempt to connect directly to the Primary database. If an agent can connect to the database, Failover Manager sends a notification about the state of the Primary node. If no agent can connect, the Primary agent declares database failure and releases the VIP (if applicable). + +If no agent can reach the virtual IP address or the database server, Failover Manager starts the failover process. The Standby agent on the most up-to-date node runs a fencing script (if applicable), promotes the Standby database to Primary database, and assigns the virtual IP address to the Standby node. Any additional Standby nodes are configured to replicate from the new primary unless auto.reconfigure is set to false. If applicable, the agent runs a post-promotion script. + +**Returning the Node to the Cluster** + +To recover from this scenario without restarting the entire cluster, you should: + +1. Restart the database on the original Primary node as a Standby database. +2. Invoke the `efm resume` command on the original Primary node. + +**Returning the Node to the Role of Primary** + +After returning the node to the cluster as a Standby, you can easily return the node to the role of Primary: + +1. If the cluster has more than one Standby node, use the `efm set-priority` command to set the node's failover priority to 1. +2. Invoke the [efm promote -switchover](07_using_efm_utility/#efm_promote) command to promote the node to its original role of Primary node. + + + +## Standby Database is Down + +If a Standby agent detects a failure of its database, the agent notifies the other agents; the other agents confirm the state of the database. + +![Confirming the failure of a Standby Database.](images/supported_scenarios_standby_db_down.png) + +After returning the Standby database to a healthy state, invoke the `efm resume` command to return the Standby to the cluster. + + + +## Primary Agent Exits or Node Fails + +If the Failover Manager Primary agent crashes or the node fails, a Standby agent will detect the failure and (if appropriate) initiate a failover. + +![Confirming the failure of the Primary Agent.](images/supported_scenarios_master_agent_exits.png) + +If an agent detects that the Primary agent has left, all agents attempt to connect directly to the Primary database. If any agent can connect to the database, an agent sends a notification about the failure of the Primary agent. If no agent can connect, the agents attempt to ping the virtual IP address to determine if it has been released. + +If no agent can reach the virtual IP address or the database server, Failover Manager starts the failover process. The Standby agent on the most up-to-date node runs a fencing script (if applicable), promotes the Standby database to Primary database, and assigns the virtual IP address to the Standby node; if applicable, the agent runs a post-promotion script. Any additional Standby nodes are configured to replicate from the new primary unless auto.reconfigure is set to false. + +If this scenario has occurred because the primary has been isolated from network, the Primary agent will detect the isolation and release the virtual IP address and create the recovery.conf file. Failover Manager will perform the previously listed steps on the remaining nodes of the cluster. + +To recover from this scenario without restarting the entire cluster, you should: + +1. Restart the original Primary node. +2. Bring the original Primary database up as a Standby node. +3. Start the service on the original Primary node. + +Please note that stopping an agent does not signal the cluster that the agent has failed. + + + +## Standby Agent Exits or Node Fails + +If a Standby agent exits or a Standby node fails, the other agents will detect that it is no longer connected to the cluster. + +![Failure of Standby Agent.](images/supported_scenarios_standby_agent_exits.png) + +When the failure is detected, the agents attempt to contact the database that resides on the node; if the agents confirm that there is a problem, Failover Manager sends the appropriate notification to the administrator. + +If there is only one Primary and one Standby remaining, there is no failover protection in the case of a Primary node failure. In the case of a Primary database failure, the Primary and Standby agents can agree that the database failed and proceed with failover. + + + +## Dedicated Witness Agent Exits / Node Fails + +The following scenario details the actions taken if a dedicated Witness (a node that is not hosting a database) fails. + +![Confirming the Failure of a dedicated Witness.](images/supported_scenarios_witness_agent_exits.png) + +When an agent detects that the Witness node cannot be reached, Failover Manager notifies the administrator of the state of the Witness. + +!!! Note + If the witness fails and the cluster only has two nodes, then there is no failover protection because the standby node has no way to know if the primary failed or was disconnected. In a two node cluster, if the primary database fails but the nodes are still connected, failover will still occur since the standby can confirm the condition of the primary database. + + + +## Nodes Become Isolated from the Cluster + +The following scenario details the actions taken if one or more nodes (a minority of the cluster) become isolated from the majority of the cluster. + +![If members of the cluster become isolated.](images/supported_scenarios_node_becomes_isolated.png) + +If one or more nodes (but less than half of the cluster) become isolated from the rest of the cluster, the remaining cluster behaves as if the nodes have failed. The agents attempt to discern if the Primary node is among the isolated nodes; it is, the Primary fences itself off from the cluster, while a Standby node (from within the cluster majority) is promoted to replace it. Other Standby nodes are configured to replicate from the new primary unless `auto.reconfigure` is set to `false`. + +Failover Manager then notifies an administrator, and the isolated nodes rejoin the cluster when they are able. When the nodes rejoin the cluster, the failover priority may change. diff --git a/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx b/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx new file mode 100644 index 00000000000..15455f32cd9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx @@ -0,0 +1,108 @@ +--- +title: "Upgrading an Existing Cluster" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/upgrading_existing_cluster.html" +--- + + + +Failover Manager provides a utility to assist you when upgrading a Failover Manager cluster. To upgrade an existing cluster, you must: + +1. Install Failover Manager 4.0 on each node of the cluster. For detailed information about installing Failover Manager, see [Installing Failover Manager](03_installing_efm/#installing_efm). +2. After installing Failover Manager, invoke the `efm upgrade-conf` utility to create the `.properties` and `.nodes` files for Failover Manager 4.0. The Failover Manager installer installs the upgrade utility ([efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf)) to the `/usr/edb/efm-4.0/bin directory`. To invoke the utility, assume root privileges, and invoke the command: + +```text +efm upgrade-conf +``` + +The efm `upgrade-conf` utility locates the `.properties` and `.nodes` files of pre-existing clusters and copies the parameter values to a new configuration file for use by Failover Manager. The utility saves the updated copy of the configuration files in the `/etc/edb/efm-4.0` directory. + +1. Modify the `.properties` and `.nodes` files for EFM 4.0, specifying any new preferences. Use your choice of editor to modify any additional properties in the properties file (located in the `/etc/edb/efm-4.0` directory) before starting the service for that node. For detailed information about property settings, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). + +!!! Note + `db.bin` is a required property. When modifying the properties file, ensure that the `db.bin` property specifies the location of the Postgres `bin` directory. + +1. Use a version-specific command to stop the old Failover Manager cluster; for example, you can use the following command to stop a version 4.0 cluster: + +```text +/usr/efm-4.0/bin/efm stop-cluster efm +``` + +1. Start the new [Failover manager service](08_controlling_efm_service/#controlling_efm_service) (`edb-efm-4.0`) on each node of the cluster. + +The following example demonstrates invoking the upgrade utility to create the `.properties` and `.nodes` files for a Failover Manager installation: + +```text +[root@localhost efm-4.0]# /usr/edb/efm-4.0/bin/efm upgrade-conf efm +Checking directory /etc/edb/efm-3.10 +Processing efm.properties file +The following properties were added in addition to those in previous ˓→installed version: +notification.text.prefix +encrypt.agent.messages +standby.restart.delay +The following properties were renamed from those in previous installed ˓→version: +stop.failed.master => stop.failed.primary +master.shutdown.as.failure => primary.shutdown.as.failure +script.master.isolated => script.primary.isolated +stop.isolated.master => stop.isolated.primary +reconfigure.sync.master => reconfigure.sync.primary +Checking directory /etc/edb/efm-3.10 +Processing efm.nodes file +db.password.encrypted re-encoded with stronger encryption. +Upgrade of files is finished. The owner and group for properties and ˓→nodes files have been set as 'efm'. +[root@localhost efm-4.0]# +``` + +If you are [using a Failover Manager configuration without sudo](04_configuring_efm/04_extending_efm_permissions/#running_efm_without_sudo), include the `-source` flag and specify the name of the directory in which the configuration files reside when invoking `upgrade-conf`. If the directory is not the configuration default directory, the upgraded files will be created in the directory from which the `upgrade-conf` command was invoked. + +**Please note**: If you are using a unit file, you must manually update the file to reflect the new Failover Manager service name when you perform an upgrade. + +## Un-installing Failover Manager + +After upgrading to Failover Manager 4.0, you can use your native package manager to remove previous installations of Failover Manager. For example, use the following command to remove Failover Manager 3.10 and any unneeded dependencies: + +- On RHEL or CentOS 7.x: + +```text +yum remove edb-efm310 +``` + +- On RHEL or CentOS 8.x: + +```text +dnf remove edb-efm310 +``` + +- On Debian or Ubuntu: + +```text +apt-get remove edb-efm310 +``` + +- On SLES: + +```text +zypper remove edb-efm310 +``` + +## Performing a Database Update (Minor Version) + +This section describes how to perform a quick minor database version upgrade. You can use the steps that follow to upgrade from one minor version to another (for example, from 10.1.5 to version 10.2.7), or to apply a patch release for a version. + +You should first update the database server on each Standby node of the Failover Manager cluster. Then, perform a switchover, promoting a Standby node to the role of Primary within the Failover Manager cluster. Then, perform a database update on the old primary node. + +On each node of the cluster you must perform the following steps to update the database server: + +1. Stop the Failover Manager agent. +2. Stop the database server. +3. Update the database server. +4. Start the database service. +5. Start the Failover Manager agent. + +For detailed information about controlling the Advanced Server service, or upgrading your version of Advanced Server, please see the EDB Postgres Advanced Server Guide, available at: + +[https://www.enterprisedb.com/docs](/epas/latest/) + +When your updates are complete, you can use the [efm set-priority](07_using_efm_utility/#efm_set_priority) command to add the old primary to the front of the standby list (if needed), and then switchover to return the cluster to its original state. diff --git a/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx b/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx new file mode 100644 index 00000000000..f2c421c3ce1 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx @@ -0,0 +1,47 @@ +--- +title: "Troubleshooting" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/troubleshooting.html" +--- + + + +**Authorization file not found. Is the local agent running?** + +If you invoke an EFM cluster management command and EFM is not running on the node, the `efm` command will display an error: + +```text +Authorization file not found. Is the local agent running? +``` + +**Not authorized to run this command. User '<os user>' is not a member of the \`efm\` group.** + +You must have special privileges to invoke some of the `efm` commands documented in [Using the efm Utility](07_using_efm_utility/#using_efm_utility). If these commands are invoked by a user who isn't authorized to run them, the `efm` command will display an error: + +```text +Not authorized to run this command. User '' is not a member of the `efm` group. +``` + +**Notification; Unexpected error message** + +If you receive a notification message about an unexpected error message, check the [Failover Manager log file](09_controlling_logging/#controlling_logging) for an `OutOfMemory` message. Failover Manager runs with the default memory value set by this property: + +```text +# Extra information that will be passed to the JVM when starting the agent. +jvm.options=-Xmx128m +``` + +If you are running with less than 128 megabytes allocated, you should increase the value and restart the Failover Manager agent. + +**Confirming the OpenJDK version** + +Failover Manager is tested with OpenJDK; we strongly recommend using OpenJDK. You can use the following command to check the type of your Java installation: + +```text +# java -version +openjdk version "1.8.0_191" +OpenJDK Runtime Environment (build 1.8.0_191-b12) +OpenJDK 64-Bit Server VM (build 25.191-b12, mixed mode) +``` diff --git a/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx b/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx new file mode 100644 index 00000000000..9b19511aba2 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx @@ -0,0 +1,44 @@ +--- +title: "Configuring Streaming Replication" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_streaming_replication.html" +--- + + + +Configuring a replication scenario can be complex; for detailed information about configuration options, please see the PostgreSQL core documentation, available at: + + + +You may want to use a `.pgpass` file to enable md5 authentication for the replication user – this may or may not be the safest authentication method for your environment. For more information about the supported authentication options, please see the PostgreSQL core documentation at: + + + +!!! Note + From Version 3.10 onwards, EFM uses `pg_ctl` utility for standby promotion. You do not need to set the `trigger_file` or `promote_trigger_file` parameter for promotion of a standby server. + +## Limited Support for Cascading Replication + +While Failover Manager does not provide full support for cascading replication, it does provide limited support for simple failover in a cascading replication scenario. Cascading replication allows a Standby node to stream to another Standby node, reducing the number of connections (and processing overhead) to the primary node. + +![Cascading replication.](images/cascading_replication.png) + +For detailed information about configuring cascading replication, please see the PostgreSQL documentation at: + + + +To use Failover Manager in a cascading replication scenario, you should modify the cluster properties file, setting the following property values on Standby Node #2: + +```text +promotable=false +auto.reconfigure=false +``` + +In the event of a Failover, Standby Node #1 will be promoted to the role of Primary node. Should failover occur, Standby Node #2 will continue to act as a read-only replica for the new Primary node until you take actions to manually reconfigure the replication scenario to contain 3 nodes. + +In the event of a failure of Standby Node #1, you will not have failover protection, but you will receive an email notifying you of the failure of the node. + +!!! Note + Performing a switchover and switch back to the original primary may not preserve the cascading replication scenario. diff --git a/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx b/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx new file mode 100644 index 00000000000..61e1a171302 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx @@ -0,0 +1,69 @@ +--- +title: "Configuring SSL Authentication on a Failover Manager Cluster" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_ssl_authentication.html" +--- + + + +The following steps enable SSL authentication for Failover Manager. Note that all connecting clients will be required to use SSL authentication when connecting to any database server within the cluster; you will be required to modify the connection methods currently used by existing clients. + +To enable SSL on a Failover Manager cluster, you must: + +1. Place a `server.crt` and `server.key` file in the `data` directory (under your Advanced Server installation). You can purchase a certificate signed by an authority, or create your own self-signed certificate. For information about creating a self-signed certificate, see the PostgreSQL core documentation at: + + + +2. Modify the `postgresql.conf` file on each database within the Failover Manager cluster, enabling SSL: + + ```text + ssl=on + ``` + +> After modifying the postgresql.conf file, you must restart the server. + +1. Modify the `pg_hba.conf` file on each node of the Failover Manager cluster, adding the following line to the beginning of the file: + + ```text + hostnossl all all all reject + ``` + +> The line instructs the server to reject any connections that are not using SSL authentication; this enforces SSL authentication for any connecting clients. For information about modifying the pg_hba.conf file, see the PostgreSQL core documentation at: +> +> > + +1. After placing the server.crt and server.key file in the data directory, convert the certificate to a form that Java understands; you can use the command: + + ```text + openssl x509 -in server.crt -out server.crt.der -outform der + ``` + +> For more information, visit: +> +> > + +1. Then, add the certificate to the Java trusted certificates file: + + ```text + keytool -keystore $JAVA_HOME/lib/security/cacerts -alias -import -file server.crt.der + ``` + +> Where +> +> > `$JAVA_HOME` is the home directory of your Java installation. +> > +> > <alias_name> can be any string, but must be unique for each certificate. +> > +> > You can use the `keytool` command to review a list of the available certificates or retrieve information about a specific certificate. For more information about using the keytool command, enter: +> > +> > > ```text +> > > man keytool +> > > ``` +> +> The certificate from each database server must be imported into the trusted certificates file of each agent. Note that the location of the cacerts file may vary on each system. For more information, visit: +> +> > + +1. Modify the [efm.properties file](04_configuring_efm/01_cluster_properties/#jdbc_sslmode) on each node within the cluster, setting the `jdbc.sslmode` property. diff --git a/product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png new file mode 100644 index 00000000000..9f70a4f63fd --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9cc1551b6cb7ea81a2d3cae4593cb13bd34477c2882cc6ca5a63597fdf2af1b +size 53120 diff --git a/product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png new file mode 100644 index 00000000000..2477de6eba6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64acefbc9cbf3a086eec5c019e268e463357d6bb0620e7f7be7a34ffd906b49c +size 36920 diff --git a/product_docs/docs/efm/3.10/efm_user/images/edb_logo.png b/product_docs/docs/efm/3.10/efm_user/images/edb_logo.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/edb_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png b/product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png new file mode 100644 index 00000000000..a15a28d3cf3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d17e3763dc0e81372a7377e6cf7578e693cfeef91e21637b85f7e4818a37a03d +size 116126 diff --git a/product_docs/docs/efm/3.10/efm_user/images/placeholder.png b/product_docs/docs/efm/3.10/efm_user/images/placeholder.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png new file mode 100644 index 00000000000..435cc08ba1d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e40f70e02570dc7a8d3f1591f2311f431009719887073ed57585a593a76ac6 +size 327010 diff --git a/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png new file mode 100644 index 00000000000..c8a11e4fa42 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22525d4998a13c62071ea69c33eea474b6e773f3af6535bd8c62a2e36d906ca0 +size 337248 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png new file mode 100644 index 00000000000..f57c544993e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:216f36072c4fe21a71d5277fb2c0868f685b77ea1cb14b3092a16a8a6f3055e8 +size 217408 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png new file mode 100644 index 00000000000..df22dc9aa92 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a84eda4b8e16846448db35f2921da3ad6bb2b24ec5f0ebb828b1b3f0cb87fcf +size 266651 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png new file mode 100644 index 00000000000..269eeb1ea0f --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5c49c7e6672c7fc897dc156d3899f49efe3cf90087cf4aa9e6e2544f88e9508 +size 148435 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png new file mode 100644 index 00000000000..1b0a90cbe14 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07ad86e12732575d5698c652d7caadccc847d3e567f9109270b918b144527cd7 +size 56094 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png new file mode 100644 index 00000000000..e5ad35bae7a --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26fd9fe629f7acff573eddc0a7c81c697b1659345dc951dc1c24a0cc14787a1 +size 64713 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png new file mode 100644 index 00000000000..356b9a3912e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9565b2be9589b2d12536820247f1b5719f63a19d72e8dbe902b3bad5ca093cbd +size 37332 diff --git a/product_docs/docs/efm/3.10/efm_user/index.mdx b/product_docs/docs/efm/3.10/efm_user/index.mdx new file mode 100644 index 00000000000..ec9c73be97d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/index.mdx @@ -0,0 +1,26 @@ +--- +title: "EDB Failover Manager" + +#legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/whats_new.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/conclusion.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/index.html" +--- + +**EDB Failover Manager** + +EDB Postgres Failover Manager (EFM) is a high-availability module from EnterpriseDB that enables a Postgres primary node to automatically failover to a Standby node in the event of a software or hardware failure on the primary. + +This guide provides information about installing, configuring and using Failover Manager . For information about the platforms and versions supported by Failover Manager , visit the EnterpriseDB website at: + + + +This document uses Postgres to mean either the PostgreSQL or EDB Postgres Advanced Server database. + +
+ +whats_new failover_manager_overview installing_efm configuring_efm using_efm monitoring_efm_cluster using_efm_utility controlling_efm_service controlling_logging notifications supported_scenarios upgrading_existing_cluster troubleshooting configuring_streaming_replication configuring_ssl_authentication conclusion + +
From bd565a35fd0e266a94ff24d2019d2ca5aa62800f Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Thu, 22 Apr 2021 17:44:54 +0530 Subject: [PATCH 02/50] Version update in Quickstart guide Former-commit-id: 767c9694af67a0a5d89189f2478a2bc9dba8c1ab --- .../docs/efm/3.10/efm_quick_start/index.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/product_docs/docs/efm/3.10/efm_quick_start/index.mdx b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx index f196c7ac5c3..683ba65244f 100644 --- a/product_docs/docs/efm/3.10/efm_quick_start/index.mdx +++ b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx @@ -3,8 +3,8 @@ title: "Creating a Failover Manager Cluster" #legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/index.html" + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/index.html" --- EDB Postgres Failover Manager (Failover Manager) is a high-availability module from EnterpriseDB that enables a Postgres Primary node to automatically failover to a Standby node in the event of a software or hardware failure on the Primary. @@ -24,7 +24,7 @@ You must perform some basic installation and configuration steps before performi - You must also install Failover Manager on each primary and standby node. During Advanced Server installation, you configured an EnterpriseDB repository on each database host. You can use the EnterpriseDB repository and the `yum install` command to install Failover Manager on each node of the cluster: ```text - yum install edb-efm41 + yum install edb-efm310 ``` During the installation process, the installer will create a user named `efm` that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by `enterprisedb` or `postgres`. The example that follows creates a cluster named `efm`. @@ -36,7 +36,7 @@ Start the configuration process on a primary or standby node. Then, copy the con Copy the provided sample files to create EFM configuration files, and correct the ownership: ```text -cd /etc/edb/efm-4.0 +cd /etc/edb/efm-3.10 cp efm.properties.in efm.properties @@ -52,7 +52,7 @@ chown efm:efm efm.nodes Create the [encrypted password](/efm/latest/efm_user/04_configuring_efm/02_encrypting_database_password/) needed for the properties file: ```text -/usr/edb/efm-4.0/bin/efm encrypt efm +/usr/edb/efm-3.10/bin/efm encrypt efm ``` Follow the onscreen instructions to produce the encrypted version of your database password. @@ -96,29 +96,29 @@ Please note that the Failover Manager agent will not verify the content of the ` **Step 5: Configure the Other Nodes** -Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-4.0` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: +Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-3.10` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: - Modify the `bind.address` property to use the node’s local address. - Set `is.witness` to `true` if the node is a witness node. If the node is a witness node, the properties relating to a local database installation will be ignored. **Step 6: Start the EFM Cluster** -On any node, start the Failover Manager agent. The agent is named `edb-efm-4.0`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or CentOS/RHEL 8.x host use the command: +On any node, start the Failover Manager agent. The agent is named `edb-efm-3.10`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or CentOS/RHEL 8.x host use the command: ```text -systemctl start edb-efm-4.0 +systemctl start edb-efm-3.10 ``` On a a CentOS or RHEL 6.x host use the command: ```text -service edb-efm-4.0 start +service edb-efm-3.10 start ``` After the agent starts, run the following command to see the status of the single-node cluster. You should see the addresses of the other nodes in the `Allowed node host` list. ```text -/usr/edb/efm-4.0/bin/efm cluster-status efm +/usr/edb/efm-3.10/bin/efm cluster-status efm ``` Start the agent on the other nodes. Run the `efm cluster-status efm` command on any node to see the cluster status. @@ -126,7 +126,7 @@ Start the agent on the other nodes. Run the `efm cluster-status efm` command on If any agent fails to start, see the startup log for information about what went wrong: ```text -cat /var/log/efm-4.0/startup-efm.log +cat /var/log/efm-3.10/startup-efm.log ``` **Performing a Switchover** @@ -134,7 +134,7 @@ cat /var/log/efm-4.0/startup-efm.log If the cluster status output shows that the primary and standby(s) are in sync, you can perform a switchover with the following command: ```text -/usr/edb/efm-4.0/bin/efm promote efm -switchover +/usr/edb/efm-3.10/bin/efm promote efm -switchover ``` The command will promote a standby and reconfigure the primary database as a new standby in the cluster. To switch back, run the command again. @@ -142,5 +142,5 @@ The command will promote a standby and reconfigure the primary database as a new For quick access to online help, you can invoke the following command: ```text -/usr/edb/efm-4.0/bin/efm --help +/usr/edb/efm-3.10/bin/efm --help ``` From 20d353e9e7f0e35802b95dcfd294f350cd8d4236 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 23 Apr 2021 14:30:43 +0530 Subject: [PATCH 03/50] version change Former-commit-id: 4bbc1afbfcd9bedc810b8e229cffab66346f0c16 --- .../docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx | 4 ++-- .../docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx | 2 +- .../3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx | 2 +- product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx index 1d582eeb107..1298dfbc6c8 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx @@ -3,12 +3,12 @@ title: "Architecture Overview" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" --- This guide explains how to configure Failover Manager and Pgpool best to leverage the benefits that they provide for Advanced Server. Using the reference architecture described in the Architecture section, you can learn how to achieve high availability by implementing an automatic failover mechanism (with Failover Manager) while scaling the system for larger workloads and an increased number of concurrent clients with read-intensive or mixed workloads to achieve horizontal scaling/read-scalability (with Pgpool). -The architecture described in this document has been developed and tested for EFM 4.1, EDB Pgpool 4.1, and Advanced Server 13. +The architecture described in this document has been developed and tested for EFM 3.10, EDB Pgpool, and Advanced Server 12. Documentation for Advanced Server and Failover Manager are available from EnterpriseDB at: diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx index 8526284e6b5..100af8abea5 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx @@ -3,7 +3,7 @@ title: "Architecture" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/architecture.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/architecture.html" --- ![A typical EFM and Pgpool configuration](images/edb_ha_architecture.png) diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx index 8487a402cf1..310afb892ed 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx @@ -3,7 +3,7 @@ title: "Implementing High Availability with Pgpool" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/components_ha_pgpool.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/components_ha_pgpool.html" --- Failover Manager monitors the health of Postgres nodes; in the event of a database failure, Failover Manager performs an automatic failover to a Standby node. Note that Pgpool does not monitor the health of backend nodes and will not perform failover to any Standby nodes. diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx index 645be9cabc6..576f8db5f19 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx @@ -3,10 +3,10 @@ title: "EDB Postgres High Availability & Horizontal Read Scaling Architecture" #legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/conclusion.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/index.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/conclusion.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/index.html" --- Since high-availability and read scalability are not part of the core feature set of EDB Postgres Advanced Server, Advanced Server relies on external tools to provide this functionality. This document focuses on the functionality provided by EDB Failover Manager and Pgpool-II, and discusses the implications of a high-availability architecture formed around these tools. From 2259f2fa20f9ae0ef2b9ec2afe75d743a73d49fe Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Thu, 22 Apr 2021 11:07:26 +0530 Subject: [PATCH 04/50] Cloned the 4.0 docset to form 3.10 docset Former-commit-id: ad8be1a1681542e73dafeb98c10feb52d2d181d2 --- .../efm_pgpool_ha_guide/01_introduction.mdx | 60 + .../efm_pgpool_ha_guide/02_architecture.mdx | 32 + .../03_components_ha_pgpool.mdx | 161 +++ .../efm_pgpool_ha_guide/04_appendix_a.mdx | 70 + .../efm_pgpool_ha_guide/05_appendix_b.mdx | 17 + .../efm_pgpool_ha_guide/images/EDB_logo.png | 3 + .../images/EFM_PgPool_Azure.png | 3 + .../images/backend_pools.png | 3 + .../images/edb_ha_architecture.png | 3 + .../images/edb_ha_architecture1.png | 3 + .../edb_ha_architecture_separate_VM.png | 3 + .../efm_pgpool_ha_guide/images/edb_logo.svg | 56 + .../images/failover_manager_overview.png | 3 + .../images/health_probes.png | 3 + .../images/load_balancing_rules.png | 3 + .../images/placeholder.png | 3 + .../images/rule_port_9898.png | 3 + .../images/rule_port_9999.png | 3 + .../efm/3.10/efm_pgpool_ha_guide/index.mdx | 18 + .../3.10/efm_quick_start/images/edb_logo.png | 3 + .../3.10/efm_quick_start/images/edb_logo.svg | 56 + .../images/failover_manager_overview.png | 3 + .../efm_quick_start/images/placeholder.png | 3 + .../docs/efm/3.10/efm_quick_start/index.mdx | 146 ++ .../docs/efm/3.10/efm_user/01_whats_new.mdx | 17 + .../01_prerequisites.mdx | 102 ++ .../02_failover_manager_overview/index.mdx | 37 + .../efm/3.10/efm_user/03_installing_efm.mdx | 315 +++++ .../01_encrypting_database_password.mdx | 77 ++ .../01_cluster_properties/index.mdx | 1186 +++++++++++++++++ .../02_encrypting_database_password.mdx | 81 ++ .../04_configuring_efm/03_cluster_members.mdx | 35 + .../04_extending_efm_permissions.mdx | 116 ++ .../05_using_vip_addresses.mdx | 148 ++ .../efm_user/04_configuring_efm/index.mdx | 20 + .../docs/efm/3.10/efm_user/05_using_efm.mdx | 318 +++++ .../efm_user/06_monitoring_efm_cluster.mdx | 142 ++ .../3.10/efm_user/07_using_efm_utility.mdx | 213 +++ .../efm_user/08_controlling_efm_service.mdx | 55 + .../3.10/efm_user/09_controlling_logging.mdx | 90 ++ .../efm/3.10/efm_user/10_notifications.mdx | 157 +++ .../3.10/efm_user/11_supported_scenarios.mdx | 117 ++ .../12_upgrading_existing_cluster.mdx | 108 ++ .../efm/3.10/efm_user/13_troubleshooting.mdx | 47 + .../14_configuring_streaming_replication.mdx | 44 + .../15_configuring_ssl_authentication.mdx | 69 + .../efm_user/images/cascading_replication.png | 3 + .../images/cascading_replication1.png | 3 + .../efm/3.10/efm_user/images/edb_logo.png | 3 + .../images/failover_manager_overview.png | 3 + .../efm/3.10/efm_user/images/placeholder.png | 3 + .../str_replication_dashboard_master.png | 3 + .../str_replication_dashboard_standby.png | 3 + ...supported_scenarios_master_agent_exits.png | 3 + .../supported_scenarios_master_db_down.png | 3 + ...ported_scenarios_node_becomes_isolated.png | 3 + ...upported_scenarios_standby_agent_exits.png | 3 + .../supported_scenarios_standby_db_down.png | 3 + ...upported_scenarios_witness_agent_exits.png | 3 + product_docs/docs/efm/3.10/efm_user/index.mdx | 26 + 60 files changed, 4220 insertions(+) create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png create mode 100755 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png create mode 100644 product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png create mode 100755 product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png create mode 100644 product_docs/docs/efm/3.10/efm_quick_start/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/10_notifications.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx create mode 100644 product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png create mode 100755 product_docs/docs/efm/3.10/efm_user/images/edb_logo.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png create mode 100755 product_docs/docs/efm/3.10/efm_user/images/placeholder.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png create mode 100644 product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png create mode 100644 product_docs/docs/efm/3.10/efm_user/index.mdx diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx new file mode 100644 index 00000000000..1d582eeb107 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx @@ -0,0 +1,60 @@ +--- +title: "Architecture Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" +--- + +This guide explains how to configure Failover Manager and Pgpool best to leverage the benefits that they provide for Advanced Server. Using the reference architecture described in the Architecture section, you can learn how to achieve high availability by implementing an automatic failover mechanism (with Failover Manager) while scaling the system for larger workloads and an increased number of concurrent clients with read-intensive or mixed workloads to achieve horizontal scaling/read-scalability (with Pgpool). + +The architecture described in this document has been developed and tested for EFM 4.1, EDB Pgpool 4.1, and Advanced Server 13. + +Documentation for Advanced Server and Failover Manager are available from EnterpriseDB at: + + + +Documentation for pgPool-II can be found at: + + + +## Failover Manager Overview + +Failover Manager is a high-availability module that monitors the health of a Postgres streaming replication cluster and verifies failures quickly. When a database failure occurs, Failover Manager can automatically promote a streaming replication Standby node into a writable Primary node to ensure continued performance and protect against data loss with minimal service interruption. + +**Basic EFM Architecture Terminology** + +A Failover Manager cluster is comprised of EFM processes that reside on the following hosts on a network: + +- A **Primary** node is the Primary database server that is servicing database clients. +- One or more **Standby nodes** are streaming replication servers associated with the Primary node. +- The **Witness node** confirms assertions of either the Primary or a Standby in a failover scenario. If, during a failure situation, the Primary finds itself in a partition with half or more of the nodes, it will stay Primary. As such, EFM supports running in a cluster with an even number of agents. + +## Pgpool-II Overview + +Pgpool-II (Pgpool) is an open-source application that provides connection pooling and load balancing for horizontal scalability of SELECT queries on multiple Standbys in EPAS and community Postgres clusters. For every backend, a backend_weight parameter can set the ratio of read traffic to be directed to the backend node. To prevent read traffic on the Primary node, the backend_weight parameter can be set to 0. In such cases, data modification language (DML) queries (i.e., INSERT, UPDATE, and DELETE) will still be sent to the Primary node, while read queries are load-balanced to the Standbys, providing scalability with mixed and read-intensive workloads. + +EnterpriseDB supports the following Pgpool functionality: + +- Load balancing +- Connection pooling +- High availability +- Connection limits + +### PCP Overview + +Pgpool provides an interface called PCP for administrators that performs management operations such as retrieving the status of Pgpool or terminating Pgpool processes remotely. PCP commands are UNIX commands that manipulate Pgpool via the network. + +### Pgpool Watchdog + +`watchdog` is an optional sub process of Pgpool that provides a high availability feature. Features added by `watchdog` include: + +- Health checking of the pgpool service +- Mutual monitoring of other watchdog processes +- Changing leader/Standby state if certain faults are detected +- Automatic virtual IP address assigning synchronous to server switching +- Automatic registration of a server as a Standby during recovery + +More information about the `Pgpool watchdog` component can be found at: + + diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx new file mode 100644 index 00000000000..8526284e6b5 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx @@ -0,0 +1,32 @@ +--- +title: "Architecture" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/architecture.html" +--- + +![A typical EFM and Pgpool configuration](images/edb_ha_architecture.png) + +The sample architecture diagram shows four nodes as described in the table below: + +| **Systems** | **Components** | +| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Primary Pgpool/EFM witness node | The Primary Pgpool node will only run Pgpool, and EFM witness, as such leaving as much resources available to Pgpool as possible. During normal runmode (no Pgpool Failovers), the Primary Pgpool node has attached the Virtual IP address, and all applications connect through the Virtual IP address to Pgpool. Pgpool will forward all write traffic to the Primary Database node, and will balance all read across all Standby nodes.On the Primary Pgpool node, the EFM witness process ensures that a minimum quota of three EFM agents remains available even if one of the database nodes fails. Some examples are when a node is already unavailable due to maintenance, or failure, and another failure occurs. | +| Primary Database node | The Primary Database node will only run Postgres (Primary)and EFM, leaving all resources to Postgres. Read/Write traffic (i.e., INSERT, UPDATE, DELETE) is forwarded to this node by the Primary Pgpool node. | +| Standby nodes | The Standby nodes are running Postgres (Standby), EFM and an inactive Pgpool process. In case of a Primary database failure, EFM will promote Postgres on one of these Standby nodes to handle read-write traffic. In case of a Primary Pgpool failure, the Pgpool watchdog will activate Pgpool on one of the Standby nodes which will attach the VIP, and handle the forwarding of the application connections to the Database nodes. Note that in a double failure situation (both the Primary Pgpool node and the Primary Database node are in failure), both of these Primary processes might end up on the same node. | + +This architecture: + +- Achieves high availability by providing two Standbys that can be promoted in case of a Primary Postgres node failure. +- Achieves high availability by providing at least three Pgpool processes in a watchdog configuration. +- Increases performance with mixed and read-intensive workloads by introducing increased read scalability with more than one Standby for load balancing. +- Reduces load on the Primary database node by redirecting read-only traffic with the Primary pgpool node. +- Prevents resource contention between Pgpool and Postgres on the Primary Database node. By not running Pgpool on the Primary database node, the Primary Postgres process can utilize as much resources as possible. +- Prevents resource contention between pgpool and Postgres on the Primary Pgpool node. By not running Standby databases on the Primary Pgpool node, Pgpool can utilize as many resources as possible. +- Optionally, synchronous replication can be set up to achieve near-zero data loss in a failure event. + +!!! Note + The architecture also allows us to completely separate 3 virtual machines running Postgres from 3 virtual machines running Pgpool. This kind of setup requires 2 extra virtual machines, but it is a better choice if you want to prevent resource contention between Pgpool and Postgres in Failover scenarios. In this setup, the architecture can run without an extra 7th node running the EFM Witness Process. To increase failure resolution efm witness agents could be deployed on the Pgpool servers. + +![Deployment of EFM and Pgpool on separate virtual machines](images/edb_ha_architecture_separate_VM.png) diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx new file mode 100644 index 00000000000..8487a402cf1 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx @@ -0,0 +1,161 @@ +--- +title: "Implementing High Availability with Pgpool" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/components_ha_pgpool.html" +--- + +Failover Manager monitors the health of Postgres nodes; in the event of a database failure, Failover Manager performs an automatic failover to a Standby node. Note that Pgpool does not monitor the health of backend nodes and will not perform failover to any Standby nodes. + +## Configuring Failover Manager + +Failover Manager provides functionality that will remove failed database nodes from Pgpool load balancing; it can also re-attach nodes to Pgpool when returned to the Failover Manager cluster. To configure EFM for high availability using Pgpool, you must set the following properties in the cluster properties file: + +pgpool.enable =<true/false> + +'pcp.user' = <User that would be invoking PCP commands> + +'pcp.host' = <Virtual IP that would be used by pgpool. Same as pgpool parameter 'delegate_IP’> + +'pcp.port' = <The port on which pgpool listens for pcp commands> + +'pcp.pass.file' = <Absolute path of PCPPASSFILE> + +'pgpool.bin' = <Absolute path of pgpool bin directory> + +## Configuring Pgpool + +The section lists the configuration of some important parameters in the `pgpool.conf` file to integrate the Pgpool-II with EFM. + +**Backend node setting** + +There are three PostgreSQL backend nodes, one Primary and two Standby nodes. Configure using `backend_*` configuration parameters in `pgpool.conf`, and use the equal backend weights for all nodes. This will make the read queries to be distributed equally among all nodes. + +```text +backend_hostname0 = ‘server1_IP' +backend_port0 = 5444 +backend_weight0 = 1 +backend_flag0 = 'DISALLOW_TO_FAILOVER' + +backend_hostname1 = ‘server2_IP' +backend_port1 = 5444 +backend_weight1 = 1 +backend_flag1 = 'DISALLOW_TO_FAILOVER' + +backend_hostname2 = ‘server3_IP' +backend_port2 = 5444 +backend_weight2 = 1 +backend_flag2 = 'DISALLOW_TO_FAILOVER' +``` + +**Enable Load-balancing and streaming replication mode** + +Set the following configuration parameter in the `pgpool.conf` file to enable load balancing and streaming replication mode + +```text +master_slave_mode = on +master_slave_sub_mode = 'stream' +load_balance_mode = on +``` + +**Disable health-checking and failover** + +Health-checking and failover must be handled by EFM and hence, these must be disabled on Pgpool-II side. To disable the health-check and failover on pgpool-II side, assign the following values: + +```text +health_check_period = 0 +fail_over_on_backend_error = off +failover_if_affected_tuples_mismatch = off +failover_command = ‘’ +failback_command = ‘’ +``` + +Ensure the following while setting up the values in the `pgpool.conf` file: + +- Keep the value of wd_priority in pgpool.conf different on each node. The node with the highest value gets the highest priority. +- The properties backend_hostname0 , backend_hostname1, backend_hostname2 and so on are shared properties (in EFM terms) and should hold the same value for all the nodes in pgpool.conf file. +- Update the correct interface value in *if\_* \* and arping cmd props in the pgpool.conf file. +- Add the properties heartbeat_destination0, heartbeat_destination1, heartbeat_destination2 etc. as per the number of nodes in pgpool.conf file on every node. Here heartbeat_destination0 should be the ip/hostname of the local node. + +**Setting up PCP** + +Script uses the PCP interface, So we need to set up the PCP and .PCPPASS file to allow PCP connections without password prompt. + +setup PCP: + +setup PCPPASS: + +Note that the load-balancing is turned on to ensure read scalability by distributing read traffic across the standby nodes + +The health checking and error-triggered backend failover have been turned off, as Failover Manager will be responsible for performing health checks and triggering failover. It is not advisable for Pgpool to perform health checking in this case, so as not to create a conflict with Failover Manager, or prematurely perform failover. + +Finally, `search_primary_node_timeout` has been set to a low value to ensure prompt recovery of Pgpool services upon an Failover Manager-triggered failover. + +## Virtual IP Addresses + +Both Pgpool-II and Failover Manager provide functionality to employ a virtual IP for seamless failover. While both provide this capability, the pgpool-II leader is the process that receives the Application connections through the Virtual IP. As in this design, such Virtual IP management is performed by the Pgpool-II watchdog system. EFM VIP has no beneficial effect in this design and it must be disabled. + +Note that in a failure situation of the active instance of Pgpool (The Primary Pgpool Server in our sample architecture), the next available Standby Pgpool instance (according to watchdog priority) will be activated and takes charge as the leader Pgpool instance. + +## Configuring Pgpool-II Watchdog + +Watchdog provides the high availability of Pgpool-II nodes. This section lists the configuration required for watchdog on each Pgpool-II node. + +**Common watchdog configurations on all Pgpool nodes** + +The following configuration parameters enable and configure the watchdog. The interval and retry values can be adjusted depending upon the requirements and testing results. + +```text +use_watchdog = on # enable watchdog +wd_port = 9000 # watchdog port, can be changed +delegate_IP = ‘Virtual IP address’ +wd_lifecheck_method = 'heartbeat' +wd_interval = 10 # we can lower this value for quick detection +wd_life_point = 3 +# virtual IP control +ifconfig_path = '/sbin' # ifconfig command path +if_up_cmd = 'ifconfig eth0:0 inet $_IP_$ netmask 255.255.255.0' + # startup delegate IP command +if_down_cmd = 'ifconfig eth0:0 down' # shutdown delegate IP command +arping_path = '/usr/sbin' # arping command path +``` + +!!! Note + Replace the value of eth0 with the network interface on your system. See [Chapter 5](05_appendix_b/#configuration-for-number-of-connections-and-pooling) for tuning the number of connections, and pooling configuration. + +**Watchdog configurations on server 2** + +```text +other_pgpool_hostname0 = 'server 3 IP/hostname' +other_pgpool_port0 = 9999 +other_wd_port0 = 9000 +other_pgpool_hostname1 = 'server 4 IP/hostname' +other_pgpool_port1 = 9999 +other_wd_port1 = 9000 +wd_priority = 1 +``` + +**Watchdog configurations on server 3** + +```text +other_pgpool_hostname0 = 'server 2 IP/hostname' +other_pgpool_port0 = 9999 +other_wd_port0 = 9000 +other_pgpool_hostname1 = 'server 4 IP/hostname' +other_pgpool_port1 = 9999 +other_wd_port1 = 9000 +wd_priority = 3 +``` + +**Watchdog configurations on server 4** + +```text +other_pgpool_hostname0 = 'server 2 IP/hostname' +other_pgpool_port0 = 9999 +other_wd_port0 = 9000 +other_pgpool_hostname1 = 'server 3 IP/hostname' +other_pgpool_port1 = 9999 +other_wd_port1 = 9000 +wd_priority = 5 # use high watchdog priority on server 4 +``` diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx new file mode 100644 index 00000000000..5e3d3bb11e3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/04_appendix_a.mdx @@ -0,0 +1,70 @@ +--- +title: "EFM Pgpool Integration Using Azure Network Load Balancer" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/appendix_a.html" +--- + + + +This section describes a specific use case for EFM Pgpool integration, where the database, EFM, and Pgpool are installed on CentOS 8 Virtual Machines in Azure. For this specific use case, Azure Load Balancer (LNB) has been used to distribute the traffic amongst all the active Pgpool Instances instead of directing the traffic using Pgpool VIP. + +![Architecture diagram for EFM and Pgpool integration using Azure Load Balancer](images/EFM_PgPool_Azure.png) + +**Step 1 (Installation)**: + +Install and configure Advanced Server database, EFM, and Pgpool on Azure Virtual Machines as following: + +| **Systems** | **Components** | +| ----------- | ------------------------------------------------------------------------------ | +| Primary | Primary node running Advanced Server 13 and Failover Manager 4.1 | +| Standby 1 | Standby node running Advanced Server 13, Failover Manager 4.1, and Pgpool 4.1. | +| Standby 2 | Standby node running Advanced Server 13, Failover Manager 4.1, and Pgpool 4.1. | +| Witness | Witness node running Failover Manager 4.1 and Pgpool 4.1. | + +**Step 2 (Pgpool configuration)**: + +Configure Pgpool as per the steps given in chapter 3 (except for delegate_ip, which should be left empty in this architecture). + +**Step 3 (Azure Load Balancer configuration)**: + +You need to do the following configuration for using Azure NLB: + +**Networking**: You need to ensure the following settings for Network Load Balancer and for each of the virtual machines: Assign Public IP as well as private IP to the NLB, and only private IP to the virtual machines. The application server should connect to the NLB over public IP and NLB in turn should connect to the virtual machines over private IPs. + +In the current scenario, following are the IP addresses assigned to each component: + +- Public IP of NLB : 40.76.240.33 (pcp.host) +- Private IP of Primarydb : 172.16.1.3 (note that this is not part of the backend pool of the Load Balancer) +- Private IP of Standby 1 : 172.16.1.4 +- Private IP of Standby 2 : 172.16.1.5 +- Private IP of witness node: 172.16.1.6 + +Ensure that the ports required to run the database, EFM, and Pgpool are open for communication. Following is the list of default ports for each of these component (you can customize the ports for your environment): + +- Database: 5444 +- EFM: 7800 (bind.address) +- Pgpool: 9000, 9694, 9898, 9999 + +**Backend pool**: Create a Backend pool consisting of all the 3 virtual machines running Pgpool instances. Use the private IPs of the virtual machines to create the Backend pool. + +![Backend pool in Azure console](images/backend_pools.png) + +**Health Probe**: Add a health probe to check if the Pgpool instance is available on the virtual machines. The health probe periodically pings the virtual machines of the Backend pool on port 9999. If it does not receive any response from any of the virtual machines, it assumes that the Pgpool instance is not available and hence stops sending traffic towards that particular machine. + +![Health probes in Azure console](images/health_probes.png) + +**Load balancing rules**: Add two Load balancing rules - one each for port 9898 and port 9999. These rules should ensure that the network traffic coming towards that particular port gets distributed evenly among all the virtual machines present in the Backend pool. + +![Load balancing rules in Azure console](images/load_balancing_rules.png) + +1. Rule created for port 9999 (i.e. PCP port) + +![Load balancing rule for port 9999](images/rule_port_9898.png) + +1. Rule created for port 9999 (i.e. Pgpool port) + +![Load balancing rule for port 9999](images/rule_port_9999.png) + +After configuration of the above-mentioned setup, you can connect to Postgres on the IP address of the Network Load Balancer on port 9999. If a failure occurs on the Primary database server, EFM will promote a new Primary and then reconfigure Pgpool to redistribute traffic. If any one of the Pgpool processes is not available to accept traffic anymore, the Network Load Balancer will redistribute all the traffic to the remaining two Pgpool processes. Make sure that listen_backlog_multiplier is tuned to compensate for the higher number of connections in case of failover. diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx new file mode 100644 index 00000000000..042804c6718 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/05_appendix_b.mdx @@ -0,0 +1,17 @@ +--- +title: "Configuration for Number of Connections and Pooling" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/appendix_b.html" +--- + +Pgpool has some configuration to tune the pooling and connection processing. Depending on this configuration, also the Postgres configuration for `max_connections` should be set to make sure all connections can be accepted as required. Furthermore, note that the Cloud Architecture works with active/active instances, which requires to spread `num_init_children` over all Pgpool instances (divide the normally used value by the number of active instances). The below text describes the effect of changing the configuration, and advises values for both the on-premise and the Cloud architecture. + +**max_pool**: Generally, it is advised to set `max_pool` to 1. Alternatively, for applications with a lot of reconnects, `max_pool` can be set to the number of distinct combinations of users, databases and connection options for the application connections. All but one connection in the pool would be stale connections, which consumes a connection slot from Postgres, without adding to performance. It is therefore advised not to configure `max_pool` beyond 4 to preserve a healthy ratio between active and stale connections. As an example, for an application which constantly reconnects and uses 2 distinct users both connecting to their own database, set it to 2. If both users would be able to connect to both databases set it to 4. Note that increasing `max_pool` requires to tune down `num_init_children` in Pgpool, or tune up `max_connections` in Postgres. + +**num_init_children**: It is advised to set `num_init_children` to the number of connections that could be running active in parallel, but the value should be divided by the number of active Pgpool-II instances (one with the on-premise architecture, and all instances for the cloud architecture). As an example: In an architecture with 3 Pgpool instances, to allow the application to have 100 active connections in parallel, set `num_init_children` to 100 for the on-premise architecture, and set `num_init_children` to 33 for the cloud architecture. Note that increasing `num_init_children` generally requires to tune up `max_connections` in Postgres. + +**listen_backlog_multiplier**: Can be set to multiply the number of open connections (as perceived by the application) with the number of active connections (`num_init_children`). As an example, when the application might open 500 connections of which 100 should be active in parallel, with the on-premise architecture, `num_init_children` should be set to 100, and `listen_backlog_multiplier` should be set to 4. This setup can process 100 connections active in parallel, and another 400 (`listen_backlog_multiplier*num_init_children`) connections will be queued before connections will be blocked. The application would perceive a total of 500 open connections, and Postgres would process the load of 100 connections maximum at all times. Note that increasing `listen_backlog_multiplier` only causes the application to perceive more connections, but will not increase the number of parallel active connections (which is determined by `num_init_children`). + +**max_connections**: It is advised to set `max_connections` in Postgres higher than `[number of active pgpool instances]*[max_pool]*[num_init_children] + [superuser_reserved_connections] (Postgres)`. As an example: in the on-premise setup with 3 instances active/passive, `max_pool` set to 2, `num_init_children` set to 100, and `superuser_reserved_connections (Postgres)` set to 5, Postgres `max_connections` should be set equal or higher then `[1*2*100+5]` which is 205 connections or higher. A similar setup in the cloud setup would run with 3 active instances, `max_pool` set to 2, `num_init_children` set to 33, and `superuser_reserved_connections (Postgres)` set to 5, in which case Postgres `max_connections` should be set equal or higher than `[3*2*33+5]` which is 203 or higher. Note that configuring below the advised setting can cause issues opening new connections, and in a combination with `max_pool` can cause unexpected behaviour (low or no active connections but still connection issues due to stale pooled connections using connection slots from Postgres. For more information on the relation between `num_init_children`, `max_pool` and `max_connections`, see this background information. diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png new file mode 100755 index 00000000000..9ec76139f63 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EDB_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3f95f25c7493174f25102604b286ceb5116b7b41c15a0dc232c8fd852536de +size 13356 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png new file mode 100644 index 00000000000..5bde6798c07 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f96dc8dad8fb1514127e410dbe6bd668691a0138b731e150afb8b5cffb2f9e65 +size 38838 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png new file mode 100644 index 00000000000..927dbdbc997 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/backend_pools.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6674dda03b836ac7e5e06cb059a15650f966f3d816263a04ddbb7fba4ec74436 +size 147475 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png new file mode 100644 index 00000000000..cd42278ac4d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a08834d26e39190da4f533032ad9f78ec5f253c97167f504aee92da9ec9ce76 +size 35314 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png new file mode 100755 index 00000000000..547cbf01a6e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:950a1df9ad74895e52417a738a64014eed2203d7d98a1ee95c5aa86ba3078577 +size 116023 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png new file mode 100644 index 00000000000..826dfbabc8b --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c7ad7caf3ea611ac0d56dbdfdc3c67513863e0efd1b88dec306a77caa8d127c +size 39576 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg new file mode 100755 index 00000000000..74babf2f8da --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/edb_logo.svg @@ -0,0 +1,56 @@ + + + + +logo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png new file mode 100755 index 00000000000..0a3389950c6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/failover_manager_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5784411bf1d038252baba457c643c00d59a9ea67d3eaaab73b04b8025a62249 +size 87850 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png new file mode 100644 index 00000000000..d68d6e41fd9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/health_probes.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16026cb626476565b516885fd5dadc3dbceb933964d0189bb22a992cb4de8229 +size 114669 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png new file mode 100644 index 00000000000..081db02c30e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/load_balancing_rules.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f26fa44740e64aed35044635b87629b4561f083dd6ce950a88ba6a38c3008daa +size 138639 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png new file mode 100644 index 00000000000..290825aeeb3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9898.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593aa7ddebe937d7fb837b4784658abfa1733389cd09873a150b5ea66778a2d4 +size 118143 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png new file mode 100644 index 00000000000..8d19389dd7a --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/images/rule_port_9999.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:738e8fad910a66ce32c087cd410c6b9b06a7eff0b9388bc553b021b08f085301 +size 117221 diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx new file mode 100644 index 00000000000..645be9cabc6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx @@ -0,0 +1,18 @@ +--- +title: "EDB Postgres High Availability & Horizontal Read Scaling Architecture" + +#legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/conclusion.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/index.html" +--- + +Since high-availability and read scalability are not part of the core feature set of EDB Postgres Advanced Server, Advanced Server relies on external tools to provide this functionality. This document focuses on the functionality provided by EDB Failover Manager and Pgpool-II, and discusses the implications of a high-availability architecture formed around these tools. + +
+ +introduction architecture components_ha_pgpool appendix_a appendix_b conclusion + +
diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg new file mode 100755 index 00000000000..74babf2f8da --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/edb_logo.svg @@ -0,0 +1,56 @@ + + + + +logo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png b/product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png new file mode 100755 index 00000000000..0a3389950c6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/failover_manager_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5784411bf1d038252baba457c643c00d59a9ea67d3eaaab73b04b8025a62249 +size 87850 diff --git a/product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png b/product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/images/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_quick_start/index.mdx b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx new file mode 100644 index 00000000000..f196c7ac5c3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx @@ -0,0 +1,146 @@ +--- +title: "Creating a Failover Manager Cluster" + +#legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/index.html" +--- + +EDB Postgres Failover Manager (Failover Manager) is a high-availability module from EnterpriseDB that enables a Postgres Primary node to automatically failover to a Standby node in the event of a software or hardware failure on the Primary. + +This quick start guide describes configuring a Failover Manager cluster in a test environment. You should read and understand the [EDB Failover Manager User's Guide](/efm/latest/) before configuring Failover Manager for a production deployment. + +You must perform some basic installation and configuration steps before performing this tutorial: + +- You must install and initialize a database server on one primary and one or two standby nodes; for information about installing Advanced Server, visit: + + [https://www.enterprisedb.com/docs/p/edb-postgres-advanced-server](/epas/latest/) + +- Postgres streaming replication must be configured and running between the primary and standby nodes. For detailed information about configuring streaming replication, visit: + + . + +- You must also install Failover Manager on each primary and standby node. During Advanced Server installation, you configured an EnterpriseDB repository on each database host. You can use the EnterpriseDB repository and the `yum install` command to install Failover Manager on each node of the cluster: + + ```text + yum install edb-efm41 + ``` + +During the installation process, the installer will create a user named `efm` that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by `enterprisedb` or `postgres`. The example that follows creates a cluster named `efm`. + +Start the configuration process on a primary or standby node. Then, copy the configuration files to other nodes to save time. + +**Step 1: Create Working Configuration Files** + +Copy the provided sample files to create EFM configuration files, and correct the ownership: + +```text +cd /etc/edb/efm-4.0 + +cp efm.properties.in efm.properties + +cp efm.nodes.in efm.nodes + +chown efm:efm efm.properties + +chown efm:efm efm.nodes +``` + +**Step 2: Create an Encrypted Password** + +Create the [encrypted password](/efm/latest/efm_user/04_configuring_efm/02_encrypting_database_password/) needed for the properties file: + +```text +/usr/edb/efm-4.0/bin/efm encrypt efm +``` + +Follow the onscreen instructions to produce the encrypted version of your database password. + +**Step 3: Update the efm.properties File** + +The `.properties` file (efm.properties file in this example) contains parameters that specify connection properties and behaviors for your Failover Manager cluster. Modifications to property settings are applied when Failover Manager starts. + +The properties mentioned in this tutorial are the minimal properties required to configure a Failover Manager cluster. If you are configuring a production system, please review the *EDB Failover Manager Guide* for detailed information about Failover Manager options. + +Provide values for the following properties on all cluster nodes: + +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `db.user` | The name of the database user. | +| `db.password.encrypted` | The encrypted password of the database user. | +| `db.port` | The port monitored by the database. | +| `db.database` | The name of the database. | +| `db.service.owner` | The owner of the `data` directory (usually `postgres` or `enterprisedb`). Required only if the database is running as a service. | +| `db.service.name` | The name of the database service (used to restart the server). Required only if the database is running as a service. | +| `db.bin` | The path to the `bin` directory (used for calls to `pg_ctl`). | +| `db.recovery.dir` | The `data` directory in which EFM will find or create the `recovery.conf` file or the `standby.signal` file. | +| `user.email` | An email address at which to receive email notifications (notification text is also in the agent log file). | +| `bind.address` | The local address of the node and the port to use for EFM. The format is: `bind.address=1.2.3.4:7800` | +| `is.witness` | `true` on a witness node and `false` if it is a primary or standby. | +| `ping.server.ip` | If you are running on a network without Internet access, set `ping.server.ip` to an address that is available on your network. | +| `auto.allow.hosts` | On a test cluster, set to `true` to simplify startup; for production usage, consult the user's guide. | +| `stable.nodes.file` | On a test cluster, set to `true` to simplify startup; for production usage, consult the user's guide. | + +**Step 4: Update the efm.nodes File** + +The `.nodes` file (efm.nodes file in this example) is read at startup to tell an agent how to find the rest of the cluster or, in the case of the first node started, can be used to simplify authorization of subsequent nodes. Add the addresses and ports of each node in the cluster to this file. One node will act as the membership coordinator; the list should include at least the membership coordinator's address. For example: + + `1.2.3.4:7800` + + `1.2.3.5:7800` + + `1.2.3.6:7800` + +Please note that the Failover Manager agent will not verify the content of the `efm.nodes` file; the agent expects that some of the addresses in the file cannot be reached (e.g. that another agent hasn’t been started yet). + +**Step 5: Configure the Other Nodes** + +Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-4.0` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: + +- Modify the `bind.address` property to use the node’s local address. +- Set `is.witness` to `true` if the node is a witness node. If the node is a witness node, the properties relating to a local database installation will be ignored. + +**Step 6: Start the EFM Cluster** + +On any node, start the Failover Manager agent. The agent is named `edb-efm-4.0`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or CentOS/RHEL 8.x host use the command: + +```text +systemctl start edb-efm-4.0 +``` + +On a a CentOS or RHEL 6.x host use the command: + +```text +service edb-efm-4.0 start +``` + +After the agent starts, run the following command to see the status of the single-node cluster. You should see the addresses of the other nodes in the `Allowed node host` list. + +```text +/usr/edb/efm-4.0/bin/efm cluster-status efm +``` + +Start the agent on the other nodes. Run the `efm cluster-status efm` command on any node to see the cluster status. + +If any agent fails to start, see the startup log for information about what went wrong: + +```text +cat /var/log/efm-4.0/startup-efm.log +``` + +**Performing a Switchover** + +If the cluster status output shows that the primary and standby(s) are in sync, you can perform a switchover with the following command: + +```text +/usr/edb/efm-4.0/bin/efm promote efm -switchover +``` + +The command will promote a standby and reconfigure the primary database as a new standby in the cluster. To switch back, run the command again. + +For quick access to online help, you can invoke the following command: + +```text +/usr/edb/efm-4.0/bin/efm --help +``` diff --git a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx new file mode 100644 index 00000000000..33bc9b3f8f5 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx @@ -0,0 +1,17 @@ +--- +title: "What’s New" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/whats_new.html" +--- + + + +The following changes have been made to EDB Postgres Failover Manager to create version 4.0: + +- Encryption for database password has been improved. Encryption has also been enabled for communication between the agents. +- Standby servers are no longer stopped while selecting the new primary. This enhancement significantly speeds up the promotion process. +- To be consistent with community naming guidelines, the term Master has been replaced with Primary in the Failover Manager product and documentation. The upgrade-conf tool will handle the task of renaming the impacted properties post-upgrade. The load balancer scripts such as `script.load.balancer.attach`, `script.load. balancer.detach` will now accept character `p` instead of character `m` as an argument. +- Support has been added to delay the restart of standbys after a promotion. You can increase the availability by staggering the restart of standbys. +- A primary agent now attempts to resume health monitoring in a situation where the agent can not reach its local database but other agents can. diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx new file mode 100644 index 00000000000..dff775c99ff --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx @@ -0,0 +1,102 @@ +--- +title: "Prerequisites" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/prerequisites.html" +--- + + + +Before configuring a Failover Manager cluster, you must satisfy the prerequisites described below. + +**Install Java 1.8 (or later)** + +Before using Failover Manager, you must first install Java (version 1.8 or later). Failover Manager is tested with OpenJDK, and we strongly recommend installing that version of Java. [Installation instructions for Java](https://openjdk.java.net/install/) are platform specific. + +**Provide an SMTP Server** + +You can receive notifications from Failover Manager as specified by a user-defined notification script, by email, or both. + +- If you are using email notifications, an SMTP server must be running on each node of the Failover Manager scenario. +- If you provide a value in the script.notification property, you can leave the user.email field blank; an SMTP server is not required. + +If an event occurs, Failover Manager invokes the script (if provided), and can also send a notification email to any email addresses specified in the user.email parameter of the cluster properties file. For more information about using an SMTP server, visit: + +[https://access.redhat.com/site/documentation](https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/s1-email-mta.html) + +**Configure Streaming Replication** + +Failover Manager requires that PostgreSQL streaming replication be configured between the Primary node and the Standby node or nodes. Failover Manager does not support other types of replication. + +On database versions 11 (or prior), unless specified with the `-sourcenode` option, a `recovery.conf` file is copied from a random standby node to the stopped primary during switchover. You should ensure that the paths within the `recovery.conf` file on your standby nodes are consistent before performing a switchover. For more information about the `-sourcenode` option, please see [Promoting a Failover Manager Node](../05_using_efm/#promote_node). + +On database version 12 or later, the `primary_conninfo` and `restore_command` properties are copied from a random standby node to the stopped primary during switchover (unless otherwise specified with the `-sourcenode` option). + +**Modify the pg_hba.conf File** + +You must modify the `pg_hba.conf` file on the Primary and Standby nodes, adding entries that allow communication between the all of the nodes in the cluster. The following example demonstrates entries that might be made to the pg_hba.conf file on the Primary node: + +```text +# access for itself +host fmdb efm 127.0.0.1/32 md5 +# access for standby +host fmdb efm 192.168.27.1/32 md5 +# access for witness +host fmdb efm 192.168.27.34/32 md5 +``` + +Where: + + `efm` specifies the name of a valid database user. + + `fmdb` specifies the name of a database to which the efm user may connect. + +By default, the `pg_hba.conf` file resides in the `data` directory, under your Postgres installation. After modifying the `pg_hba.conf` file, you must reload the configuration file on each node for the changes to take effect. You can use the following command: + + `# systemctl reload edb-as-x` + +Where `x` specifies the Postgres version. + +**Using Autostart for the Database Servers** + +If a Primary node reboots, Failover Manager may detect the database is down on the Primary node and promote a Standby node to the role of Primary. If this happens, the Failover Manager agent on the (rebooted) Primary node will not get a chance to write the `recovery.conf` file; the `recovery.conf` file prevents the database server from starting. If this happens, the rebooted Primary node will return to the cluster as a second Primary node. + +To prevent this, ensure that the Failover Manager agent auto starts before the database server. The agent will start in idle mode, and check to see if there is already a primary in the cluster. If there is a primary node, the agent will verify that a `recovery.conf` or `standby.signal` file exists, and the database will not start as a second primary. + +**Ensure Communication Through Firewalls** + +If a Linux firewall (i.e. iptables) is enabled on the host of a Failover Manager node, you may need to add rules to the firewall configuration that allow tcp communication between the Failover Manager processes in the cluster. For example: + +```text +# iptables -I INPUT -p tcp --dport 7800 -j ACCEPT +/sbin/service iptables save +``` + +The command shown above opens the port 7800. Failover Manager will connect via the port that corresponds to the port specified in the cluster properties file. + +**Ensure that the Database user has Sufficient Privileges** + +The database user specified by the `db.user` property in the `efm.properties` file must have sufficient privileges to invoke the following functions on behalf of Failover Manager: + + `pg_current_wal_lsn()` + + `pg_last_wal_replay_lsn()` + + `pg_wal_replay_resume()` + + `pg_wal_replay_pause()` + + `pg_reload_conf()` + +The `pg_reload_conf()` privilege is required only if you have the `reconfigure.num.sync` or `reconfigure.sync.primary` property set to `true`. + +For detailed information about each of these functions, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/index.html). + +The user must also have permissions to read the values of configuration variables; a database superuser can use the PostgreSQL `GRANT` command to provide the permissions needed: + +```text +GRANT pg_read_all_settings TO user_name; +``` + +For more information about `pg_read_all_settings`, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/default-roles.html). diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx new file mode 100644 index 00000000000..fac4ec73d30 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx @@ -0,0 +1,37 @@ +--- +title: "Failover Manager Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/failover_manager_overview.html" +--- + + + +An EDB Postgres Failover Manager (EFM) cluster is comprised of Failover Manager processes that reside on the following hosts on a network: + +- A Primary node - The Primary node is the primary database server that is servicing database clients. +- One or more Standby nodes - A Standby node is a streaming replication server associated with the Primary node. +- A Witness node - The Witness node confirms assertions of either the Primary or a Standby in a failover scenario. A cluster does not need a dedicated witness node if the cluster contains three or more nodes. If you do not have a third cluster member that is a database host, you can add a dedicated Witness node. A cluster may include more than one witness node. + +Traditionally, a *cluster* is a single instance of Postgres managing multiple databases. In this document, the term cluster refers to a Failover Manager cluster. A Failover Manager cluster consists of a Primary agent, one or more Standby agents, and an optional Witness agent that reside on servers in a cloud or on a traditional network and communicate using the JGroups toolkit. + +![An EFM scenario employing a Virtual IP address.](../images/failover_manager_overview.png) + +When a non-witness agent starts, it connects to the local database and checks the state of the database: + +- If the agent cannot reach the database, it will start in idle mode. +- If it finds that the database is in recovery, the agent assumes the role of standby. +- If the database is not in recovery, the agent assumes the role of primary. + +In the event of a failover, Failover Manager attempts to ensure that the promoted standby is the most up-to-date standby in the cluster; please note that data loss is possible if the standby node is not in sync with the primary node. + +[JGroups](http://www.jgroups.org/) provides technology that allows Failover Manager to create clusters whose member nodes can communicate with each other and detect node failures. + +The figure shown above illustrates a Failover Manager cluster that employs a virtual IP address. You can use a load balancer in place of a [virtual IP address](../04_configuring_efm/05_using_vip_addresses/#using_vip_addresses) if you provide your own [script](../04_configuring_efm/01_cluster_properties/#cluster_properties) to re-configure the load balancer whenever databases are added or removed. You can also choose to enable native EFM-Pgpool integration for high availability. + +
+ +prerequisites + +
diff --git a/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx new file mode 100644 index 00000000000..add844dddfe --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx @@ -0,0 +1,315 @@ +--- +title: "Installing Failover Manager" +legacyRedirects: + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_debian_ubuntu.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_sles.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/post_install_tasks.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_rhel_centos_oel.html" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_efm.html" +--- + + + + + +To request credentials that allow you to access an EnterpriseDB repository, visit the EDB website at: + + + +## RedHat or CentOS Host + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter y, and press Return to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + +Failover Manager must be installed by root. During the installation process, the installer will also create a user named efm that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. + +After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster, and then modify the file to enable access. The following steps provide detailed information about accessing the EnterpriseDB repository; the steps must be performed on each node of the cluster. + +### RHEL or CentOS 7 PPCLE Host + +1. Use the following command to create a configuration file and install Advance Toolchain: + + ```text + rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b + + cat > /etc/yum.repos.d/advance-toolchain.repo <:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` + +4. Install the EPEL repository: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +5. On RHEL 7 PPCLE, enable the additional repositories to resolve EPEL dependencies: + + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` + +6. Install the selected package: + + ```text + yum -y install edb-efm40 + ``` + +### RHEL or CentOS 7 Host + +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: + + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` + +3. Install the EPEL repository: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +4. On RHEL 7, enable the additional repositories to resolve dependencies: + + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` + +5. Install the selected package: + + ```text + yum -y install edb-efm40 + ``` + +### RHEL or CentOS 8 Host + +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: + + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` + +3. Install the EPEL repository: + +- On CentOS 8 + ```text + dnf -y install epel-release + ``` +- On RHEL 8 + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +4. Enable the additional repositories to resolve dependencies: + +- On CentOS 8 + ```text + dnf config-manager --set-enabled PowerTools + ``` +- On RHEL 8 + + ```text + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" + ``` + +5. Disable the built-in PostgreSQL module: + + ```text + dnf -qy module disable postgresql + ``` +6. Install the selected package: + ```text + dnf -y install edb-efm40 + ``` + +## Debian or Ubuntu Host + +To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, visit the EnterpriseDB website at: + + + +The following steps will walk you through using the EnterpriseDB apt repository to install Failover Manager. + +### Debian Host + +1. Assume superuser privileges: + ```text + sudo su – + ``` +2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: + +- On Debian 9 + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` +- On Debian 10 + + ```text + sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + + sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` + +3. Add support to your system for secure APT repositories: + ```text + apt-get install apt-transport-https + ``` +4. Add the EDB signing key: + ```text + wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` +5. Update the repository meta data: + ```text + apt-get update + ``` +6. Install Failover Manager: + ```text + apt-get -y install edb-efm40 + ``` + +### Ububtu Host + +1. Assume superuser privileges: + ```text + sudo su – + ``` +2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: + +- On Ubuntu 18.04 + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` +- On Ubuntu 20.4 + + ```text + sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + + sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` + +3. Add support to your system for secure APT repositories: + ```text + apt-get install apt-transport-https + ``` +4. Add the EDB signing key: + ```text + wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` +5. Update the repository meta data: + ```text + apt-get update + ``` +6. Install Failover Manager: + ```text + apt-get -y install edb-efm40 + ``` + +## SLES Host + +To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, visit the EnterpriseDB website at: + + + +You can use the zypper package manager to install a Failover Manager agent on an SLES 12 host. zypper will attempt to satisfy package dependencies as it installs a package, but requires access to specific repositories that are not hosted at EnterpriseDB. + +1. You must assume superuser privileges and stop any firewalls before installing Failover Manager. Then, use the following commands to add EnterpriseDB repositories to your system: + + ```text + zypper addrepo https://zypp.enterprisedb.com/suse/edb-sles.repo + ``` + +2. The commands create the repository configuration files in the /etc/zypp/repos.d directory. Then, use the following command to refresh the metadata on your SLES host to include the EnterpriseDB repository: + + ```text + zypper refresh + ``` + + When prompted, provide credentials for the repository, and specify a to always trust the provided key, and update the metadata to include the EnterpriseDB repository. + +3. You must also add SUSEConnect and the SUSE Package Hub extension to the SLES host, and register the host with SUSE, allowing access to SUSE repositories. Use the commands: + + ```text + zypper install SUSEConnect + SUSEConnect -r -e + SUSEConnect -p PackageHub/12.4/x86_64 + SUSEConnect -p sle-sdk/12.4/x86_64 + ``` + +4. Install SUSEConnect to register the host with SUSE, allowing access to SUSE repositories: + + ```text + zypper addrepo https://download.opensuse.org/repositories/Apache:/Modules/SLE_12_SP4/Apache:Modules.repo + ``` + +5. Install OpenJDK (version 1.8) for Java based components: + + ```text + zypper -n install java-1_8_0-openjdk + ``` + +6. Now you can use the zypper utility to install a Failover Manager agent: + + ```text + zypper -n install edb-efm40 + ``` + +## Performing post-installation tasks + +If you are using Failover Manager to monitor a cluster owned by a user other than `enterprisedb` or `postgres`, see [Extending Failover Manager Permissions](04_configuring_efm/04_extending_efm_permissions/#extending_efm_permissions) . + +After installing on each node of the cluster, you must: + +1. Modify the [cluster properties file](04_configuring_efm/01_cluster_properties/#cluster_properties) on each node. +2. Modify the [cluster members file](04_configuring_efm/03_cluster_members/#cluster_members) on each node. +3. If applicable, configure and test virtual IP address settings and any scripts that are identified in the cluster properties file. +4. Start the agent on each node of the cluster. For more information about controlling the service, see [Section 5](08_controlling_efm_service/#controlling-the-failover-manager-service). + +### Installation Locations + +components are installed in the following locations: + +| Component | Location | +| --------------------------------- | --------------------------- | +| Executables | /usr/edb/efm-4.0/bin | +| Libraries | /usr/edb/efm-4.0/lib | +| Cluster configuration files | /etc/edb/efm-4.0 | +| Logs | /var/log/efm- 4.0 | +| Lock files | /var/lock/efm-4.0 | +| Log rotation file | /etc/logrotate.d/efm-4.0 | +| sudo configuration file | /etc/sudoers.d/efm-40 | +| Binary to access VIP without sudo | /usr/edb/efm-4.0/bin/secure | diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx new file mode 100644 index 00000000000..77f0a39f8c9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx @@ -0,0 +1,77 @@ +--- +title: "Encrypting Your Database Password" +--- + + + +Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-4.0/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. + +To encrypt a password, use the command: + +```text +# efm encrypt [ --from-env ] +``` + +Where `` specifies the name of the Failover Manager cluster. + +If you include the `--from-env` option, you must export the value you wish to encrypt before invoking the encryption utility. For example: + +```text +export EFMPASS=password +``` + +If you do not include the `--from-env` option, Failover Manager will prompt you to enter the database password twice before generating an encrypted password for you to place in your cluster property file. When the utility shares the encrypted password, copy and paste the encrypted password into the cluster property files. + +!!! Note + Many Java vendors ship their version of Java with full-strength encryption included, but not enabled due to export restrictions. If you encounter an error that refers to an illegal key size when attempting to encrypt the database password, you should download and enable a Java Cryptography Extension (JCE) that provides an unlimited policy for your platform. + +The following example demonstrates using the encrypt utility to encrypt a password for the `acctg` cluster: + +```text +# efm encrypt acctg +This utility will generate an encrypted password for you to place in + your EFM cluster property file: +/etc/edb/efm-4.0/acctg.properties +Please enter the password and hit enter: +Please enter the password again to confirm: +The encrypted password is: 516b36fb8031da17cfbc010f7d09359c +Please paste this into your acctg.properties file +db.password.encrypted=516b36fb8031da17cfbc010f7d09359c +``` + +!!! Note + The utility will notify you if a properties file does not exist. + +After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: + +```text +[witness@localhost ~]# systemctl start edb-efm-4.0 +Job for edb-efm-4.0.service failed because the control process exited with error code. See "systemctl status edb-efm-4.0.service" and "journalctl -xe" for details. +``` + +If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-4.0/startup-efm.log`) for more information. + +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, startup information is also available with the following command: + +```text +systemctl status edb-efm-4.0 +``` + +To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. + +**Using the EFMPASS Environment Variable** + +The following example demonstrates using the --from-env environment variable when encrypting a password. Before invoking the `efm encrypt` command, set the value of `EFMPASS` to the password (`1safepassword`): + +```text +# export EFMPASS=1safepassword +``` + +Then, invoke `efm encrypt`, specifying the `--from-env` option: + +```text +# efm encrypt acctg --from-env +# 7ceecd8965fa7a5c330eaa9e43696f83 +``` + +The encrypted password (`7ceecd8965fa7a5c330eaa9e43696f83`) is returned as a text value; when using a script, you can check the exit code of the command to confirm that the command succeeded. A successful execution returns `0`. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx new file mode 100644 index 00000000000..a447ad897d6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx @@ -0,0 +1,1186 @@ +--- +title: "The Cluster Properties File" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/cluster_properties.html" +--- + + + +Each node in a Failover Manager cluster has a properties file (by default, named `efm.properties`) that contains the properties of the individual node on which it resides. The Failover Manager installer creates a file template for the properties file named `efm.properties.in` in the `/etc/edb/efm-4.0` directory. + +After completing the Failover Manager installation, you must make a working copy of the template before modifying the file contents: + +```text +# cp /etc/edb/efm-4.0/efm.properties.in /etc/edb/efm-4.0/efm.properties +``` + +After copying the template file, change the owner of the file to `efm`: + +```text +# chown efm:efm efm.properties +``` + +!!! Note + By default, Failover Manager expects the cluster properties file to be named `efm.properties`. If you name the properties file something other than `efm.properties`, you must modify the service script or unit file to instruct Failover Manager to use a different name. + +After creating the cluster properties file, add (or modify) configuration parameter values as required. For detailed information about each property, see [Specifying Cluster Properties](#specifying-cluster-properties). + +The property files are owned by `root`. The Failover Manager service script expects to find the files in the `/etc/edb/efm-4.0 directory`. If you move the property file to another location, you must create a symbolic link that specifies the new location. + +!!! Note + All user scripts referenced in the properties file will be invoked as the Failover Manager user. + + + +## Specifying Cluster Properties + +You can use the properties listed in the cluster properties file to specify connection properties and behaviors for your Failover Manager cluster. Modifications to property settings will be applied when Failover Manager starts. If you modify a property value you must restart Failover Manager to apply the changes. + +Property values are case-sensitive. Note that while Postgres uses quoted strings in parameter values, Failover Manager does not allow quoted strings in property values. For example, while you might specify an IP address in a Postgres configuration parameter as: + +> `listen_addresses='192.168.2.47'` + +Failover Manager requires that the value *not* be enclosed in quotes: + +> `bind.address=192.168.2.54:7800` + +Use the properties in the `efm.properties` file to specify connection, administrative, and operational details for Failover Manager. + +**Legends**: In the following table: + +- `A`: Required on Primary or Standby node +- `W`: Required on Witness node +- `Y` : Yes + +| **Property Name** | **A** | **W** | **Default Value** | **Comments** | +| ------------------------------------------------------------- | ----- | ----- | ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [db.user](#db_user) | Y | Y | | Username for the database | +| [db.password.encrypted](#db_password_encrypted) | Y | Y | | Password encrypted using 'efm encrypt' | +| [db.port](#db_port) | Y | Y | | This value must be same for all the agents | +| [db.database](#db_database) | Y | Y | | Database name | +| [db.service.owner](#db_service_owner) | Y | | | Owner of $PGDATA dir for db.database | +| [db.service.name](#db_service_name) | | | | Required if running the database as a service | +| [db.bin](#db_bin) | Y | | | Directory containing the pg_controldata/pg_ctl commands such as '/usr/edb/as12/bin' | +| [db.data.dir](#db_data_dir) | Y | | | Same as the output of query 'show data_directory;' | +| [db.config.dir](#db_config_dir) | | | | Same as the output of query 'show config_file;'. Should be specified if it is not same as *db.data.dir* | +| [jdbc.sslmode](#jdbc_sslmode) | Y | Y | disable | See the [note](#jdbc_note) | +| [user.email](#user_email) | | | | This value must be same for all the agents; can be left blank if using a notification script | +| [from.email](#from_email) | | | [efm@localhost](mailto:efm@localhost) | Leave blank to use the default [efm@localhost](mailto:efm@localhost) | +| [notification.level](#notification_level) | Y | Y | INFO | See the [list of notifications](../../10_notifications/#notifications) | +| [notification.text.prefix](#notification_text_prefix) | | | | | +| [script.notification](#script_notification) | | | | Required if user.email property is not used; both parameters can be used together | +| [bind.address](#bind_address) | Y | Y | | Example: <ip_address>:<port> | +| [external.address](#external_address) | | | | Example: <ip_address/hostname> | +| [admin.port](#admin_port) | Y | Y | 7809 | Modify if the default port is already in use | +| [is.witness](#is_witness) | Y | Y | | See description | +| [local.period](#local_period) | Y | | 10 | | +| [local.timeout](#local_timeout) | Y | | 60 | | +| [local.timeout.final](#local_timeout_final) | Y | | 10 | | +| [remote.timeout](#remote_timeout) | Y | Y | 10 | | +| [node.timeout](#node_timeout) | Y | Y | 50 | This value must be same for all the agents | +| [encrypt.agent.messages](#encrypt_agent_messages) | Y | Y | false | This value must be same for all the agents | +| [stop.isolated.primary](#stop_isolated_primary) | Y | | true | | +| [stop.failed.primary](#stop_failed_primary) | Y | | true | | +| [primary.shutdown.as.failure](#primary_shutdown_as_failure) | Y | Y | false | | +| [update.physical.slots.period](#update_physical_slots_period) | Y | | 0 | | +| [ping.server.ip](#ping_server_ip) | Y | Y | 8.8.8.8 | | +| [ping.server.command](#ping_server_command) | Y | Y | /bin/ping -q -c3 -w5 | | +| [auto.allow.hosts](#auto_allow_hosts) | Y | Y | false | | +| [stable.nodes.file](#stable_nodes_file) | Y | Y | false | | +| [db.reuse.connection.count](#db_reuse_connection_count) | Y | | 0 | | +| [auto.failover](#auto_failover) | Y | Y | true | | +| [auto.reconfigure](#auto_reconfigure) | Y | | true | This value must be same for all the agents | +| [promotable](#promotable) | Y | | true | | +| [use.replay.tiebreaker](#use_replay_tiebreaker) | Y | Y | true | This value must be same for all the agents | +| [standby.restart.delay](#standby_restart_delay) | | | 0 | | +| [application.name](#application_name) | | | | Set to replace the application_name portion of the primary_conninfo entry with this property value before starting the original primary database as a standby. | +| [restore.command](#restore_command) | | | | Example: restore.command=scp <db_service_owner>@%h: <archive_path>/%f %p | +| [reconfigure.num.sync](#reconfigure_num_sync) | Y | | false | | +| [reconfigure.sync.primary](#reconfigure_sync_primary) | Y | | false | | +| [minimum.standbys](#minimum_standbys) | Y | Y | 0 | This value must be same for all the nodes | +| [recovery.check.period](#recovery_check_period) | Y | | 1 | | +| [restart.connection.timeout](#restart_connection_timeout) | | | 60 | | +| [auto.resume.period](#auto_resume_period) | Y | | 0 | | +| [virtual.ip](#virtual_ip) | | | (see virtual.ip.single) | Leave blank if you do not specify a VIP | +| [virtual.ip.interface](#virtual_ip) | | | | Required if you specify a VIP | +| [virtual.ip.prefix](#virtual_ip) | | | | Required if you specify a VIP | +| [virtual.ip.single](#virtual_ip) | Y | Y | Yes | This value must be same for all the nodes | +| [check.vip.before.promotion](#check_vip_before_promotion) | Y | Y | Yes | | +| [script.load.balancer.attach](#script_load_balancer) | | | | Example: script.load.balancer.attach= /<path>/<attach_script> %h %t | +| [script.load.balancer.detach](#script_load_balancer) | | | | Example: script.load.balancer.detach= /<path>/<detach_script> %h %t | +| [script.fence](#script_fence) | | | | Example: script.fence= /<path>/<script_name> %p %f | +| [script.post.promotion](#script_post_promotion) | | | | Example: script.post.promotion= /<path>/<script_name> %f %p | +| [script.resumed](#script_resumed) | | | | Example: script.resumed= /<path>/<script_name> | +| [script.db.failure](#script_db_failure) | | | | Example: script.db.failure= /<path>/<script_name> | +| [script.primary.isolated](#script_primary_isolated) | | | | Example: script.primary.isolated= /<path>/<script_name> | +| [script.remote.pre.promotion](#script_remote_pre_promotion) | | | | Example: script.remote.pre.promotion= /<path>/<script_name> %p | +| [script.remote.post.promotion](#script_remote_post_promotion) | | | | Example: script.remote.post.promotion= /<path>/<script_name> %p | +| [script.custom.monitor](#script_custom_monitor) | | | | Example: script.custom.monitor= /<path>/<script_name> | +| [custom.monitor.interval](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | +| [custom.monitor.timeout](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | +| [custom.monitor.safe.mode](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | +| [sudo.command](#sudo_command) | Y | Y | sudo | | +| [sudo.user.command](#sudo_command) | Y | Y | sudo -u %u | | +| [lock.dir](#lock_dir) | | | | If not specified, defaults to '/var/lock/efm-<version>' | +| [log.dir](#log_dir) | | | | If not specified, defaults to '/var/log/efm-<version>' | +| [syslog.host](#syslog_logging) | | | localhost | | +| [syslog.port](#syslog_logging) | | | 514 | | +| [syslog.protocol](#syslog_logging) | | | | | +| [syslog.facility](#syslog_logging) | | | UDP | | +| [file.log.enabled](#logtype_enabled) | Y | Y | true | | +| [syslog.enabled](#logtype_enabled) | Y | Y | false | | +| [jgroups.loglevel](#loglevel) | | | info | | +| [efm.loglevel](#loglevel) | | | info | | +| [jvm.options](#jvm_options) | | | -Xmx128m | | + +**Cluster Properties** + + + + + + + + + +Use the following properties to specify connection details for the Failover Manager cluster: + +```text +# The value for the password property should be the output from +# 'efm encrypt' -- do not include a cleartext password here. To +# prevent accidental sharing of passwords among clusters, the +# cluster name is incorporated into the encrypted password. If +# you change the cluster name (the name of this file), you must +# encrypt the password again with the new name. +# The db.port property must be the same for all nodes. +db.user= +db.password.encrypted= +db.port= +db.database= +``` + +The `db.user` specified must have sufficient privileges to invoke selected PostgreSQL commands on behalf of Failover Manager. For more information, please see [Prerequisites](../../02_failover_manager_overview/01_prerequisites/#prerequisites). + +For information about encrypting the password for the database user, see [Encrypting Your Database Password](01_encrypting_database_password/#encrypting_database_password). + + + +Use the `db.service.owner` property to specify the name of the operating system user that owns the cluster that is being managed by Failover Manager. This property is not required on a dedicated witness node. + +```text +# This property tells EFM which OS user owns the $PGDATA dir for +# the 'db.database'. By default, the owner is either 'postgres' +# for PostgreSQL or 'enterprisedb' for EDB Postgres Advanced +# Server. However, if you have configured your db to run as a +# different user, you will need to copy the /etc/sudoers.d/efm-XX +# conf file to grant the necessary permissions to your db owner. +# +# This username must have write permission to the +# 'db.data.dir' specified below. +db.service.owner= +``` + + + +Specify the name of the database service in the `db.service.name` property if you use the service or systemctl command when starting or stopping the service. + +```text +# Specify the proper service name in order to use service commands +# rather than pg_ctl to start/stop/restart a database. For example, if +# this property is set, then 'service restart' or 'systemctl +# restart ' +# (depending on OS version) will be used to restart the database rather +# than pg_ctl. +# This property is required if running the database as a service. +db.service.name= +``` + + + +You should use the same service control mechanism (pg_ctl, service, or systemctl) each time you start or stop the database service. If you use the `pg_ctl` program to control the service, specify the location of the `pg_ctl` program in the `db.bin` property. + +```text +# Specify the directory containing the pg_controldata/pg_ctl commands, +# for example: +# /usr/edb/as11/bin. Unless the db.service.name property is used, the +# pg_ctl command is used to start/stop/restart databases as needed +# after a failover or switchover. This property is required. +db.bin= +``` + + + +Use the `db.data.dir` property to specify the location to which a recovery file will be written on the Primary node of the cluster during promotion. This property is required on primary and standby nodes; it is not required on a dedicated witness node. + +```text +# For database version 12 and up, this is the directory where a +# standby.signal file will exist for a standby node. For previous +# versions, this is the location of the db recovery.conf file on +# the node. +# After a failover, the recovery.conf files on remaining standbys are +# changed to point to the new primary db (a copy of the original is made +# first). On a primary node, a recovery.conf file will be written during +# failover and promotion to ensure that the primary node can not be +# restarted as the primary database. +# This corresponds to database environment variable PGDATA and should +# be same as the output of query 'show data_directory;' on respective +# database. +db.data.dir= +``` + + + +Use the `db.config.dir` property to specify the location of database configuration files if they are not stored in the same directory as the `recovery.conf` or `standby.signal` file. This should be the value specified by the `config_file` parameter directory of your Advanced Server or PostgreSQL installation. This value will be used as the location of the Postgres `data` directory when stopping, starting, or restarting the database. + +```text +# Specify the location of database configuration files if they are +# not contained in the same location as the recovery.conf or +# standby.signal file. This is most likely the case for Debian +# installations. The location specified will be used as the -D value +# (the location of the data directory for the cluster) when calling +# pg_ctl to start or stop the database. If this property is blank, +# the db.data.dir location specified by the db.data.dir property will +# be used. This corresponds to the output of query 'show config_file;' +# on respective database. +db.config.dir= +``` + +For more information about database configuration files, visit the [PostgreSQL website](https://www.postgresql.org/docs/current/runtime-config-file-locations.html). + + + +Use the `jdbc.sslmode` property to instruct Failover Manager to use SSL connections; by default, SSL is disabled. + +```text +# Use the jdbc.sslmode property to enable ssl for EFM +# connections. Setting this property to anything but 'disable' +# will force the agents to use 'ssl=true' for all JDBC database +# connections (to both local and remote databases). +# Valid values are: +# +# disable - Do not use ssl for connections. +# verify-ca - EFM will perform CA verification before allowing +# the certificate. +# require - Verification will not be performed on the server +# certificate. +jdbc.sslmode=disable +``` + + + +!!! Note + If you set the value of `jdbc.sslmode` to `verify-ca` and you want to use Java trust store for certificate validation, you need to set the following value: + + `jdbc.properties=sslfactory=org.postgresql.ssl.DefaultJavaSSLFactory` + +For information about configuring and using SSL, please see: + +> + +and + +> + + + +Use the `user.email` property to specify an email address (or multiple email addresses) that will receive any notifications sent by Failover Manager. + +```text +# Email address(es) for notifications. The value of this +# property must be the same across all agents. Multiple email +# addresses must be separated by space. If using a notification +# script instead, this property can be left blank. +user.email= +``` + + + +The `from.email` property specifies the value that will be used as the sender's address on any email notifications from Failover Manager. You can: + +- leave `from.email` blank to use the default value (`efm@localhost`). +- specify a custom value for the email address. +- specify a custom email address, using the `%h` placeholder to represent the name of the node host (e.g., [example@%h](mailto:example@%h)). The placeholder will be replaced with the name of the host as returned by the Linux hostname utility. + +For more information about notifications, see [Notifications](../../10_notifications/#notifications). + +```text +# Use the from.email property to specify the from email address that +# will be used for email notifications. Use the %h placeholder to +# represent the name of the node host (e.g. example@%h). The +# placeholder will be replaced with the name of the host as returned +# by the hostname command. +# Leave blank to use the default, efm@localhost. +from.email= +``` + + + +Use the `notification.level` property to specify the minimum severity level at which Failover Manager will send user notifications or when a notification script is called. For a complete list of notifications, please see [Notifications](../../10_notifications/#notifications). + +```text +# Minimum severity level of notifications that will be sent by +# the agent. The minimum level also applies to the notification +# script (below). Valid values are INFO, WARNING, and SEVERE. +# A list of notifications is grouped by severity in the user's +# guide. +notification.level=INFO +``` + + + +Use the `notification.text.prefix` property to specify the text to be added to the beginning of every notification. + +```text +# Text to add to the beginning of every notification. This could +# be used to help identify what the cluster is used for, the role +# of this node, etc. To use multiple lines, add a backslash \ to +# the end of a line of text. To include a newline use \n. +# Example: +# notification.text.prefix=Development cluster for Example dept.\n\ +# Used by Dev and QA \ +# See Example group for questions. +notification.text.prefix= +``` + + + +Use the `script.notification` property to specify the path to a user-supplied script that acts as a notification service; the script will be passed a message subject and a message body. The script will be invoked each time Failover Manager generates a user notification. + +```text +# Absolute path to script run for user notifications. +# +# This is an optional user-supplied script that can be used for +# notifications instead of email. This is required if not using +# email notifications. Either/both can be used. The script will +# be passed two parameters: the message subject and the message +# body. +script.notification= +``` + + + +The `bind.address` property specifies the IP address and port number of the agent on the current node of the Failover Manager cluster. + +```text +# This property specifies the ip address and port that jgroups +# will bind to on this node. The value is of the form +# :. +# Note that the port specified here is used for communicating +# with other nodes, and is not the same as the admin.port below, +# used only to communicate with the local agent to send control +# signals. +# For example, :7800 +bind.address= +``` + + + +Use the `external.address` property to specify the IP address or hostname that should be used for communication with all other Failover Manager agents in a NAT environment. + +```text +# This is the ip address/hostname to be used for communication with all +# other Failover Manager agents. All traffic towards this address +# should be routed by the network to the bind.address of the node. +# The value is in the ip/hostname format only. This address will be +# used in scenarios where nodes are on different networks and broadcast +# an IP address other than the bind.address to the external world. +external.address= +``` + + + +Use the `admin.port` property to specify a port on which Failover Manager listens for administrative commands. + +```text +# This property controls the port binding of the administration +# server which is used for some commands (ie cluster-status). The +# default is 7809; you can modify this value if the port is +# already in use. +admin.port=7809 +``` + + + +Set the `is.witness` property to true to indicate that the current node is a witness node. If is.witness is true, the local agent will not check to see if a local database is running. + +```text +# Specifies whether or not this is a witness node. Witness nodes +# do not have local databases running. +is.witness= +``` + +The Postgres `pg_is_in_recovery()` function is a boolean function that reports the recovery state of a database. The function returns `true` if the database is in recovery, or false if the database is not in recovery. When an agent starts, it connects to the local database and invokes the `pg_is_in_recovery()` function. If the server responds true, the agent assumes the role of standby; if the server responds false, the agent assumes the role of primary. If there is no local database, the agent will assume an idle state. + +!!! Note + If `is.witness` is `true`, Failover Manager will not check the recovery state. + + + + + + + +The following properties specify properties that apply to the local server: + +- The `local.period` property specifies how many seconds between attempts to contact the database server. +- The `local.timeout` property specifies how long an agent will wait for a positive response from the local database server. +- The `local.timeout.final` property specifies how long an agent will wait after the above-mentioned previous checks have failed to contact the database server on the current node. If a response is not received from the database within the number of seconds specified by the `local.timeout.final` property, the database is assumed to have failed. + +For example, given the default values of these properties, a check of the local database happens once every 10 seconds. If an attempt to contact the local database does not come back positive within 60 seconds, Failover Manager makes a final attempt to contact the database. If a response is not received within 10 seconds, Failover Manager declares database failure and notifies the administrator listed in the user.email property. These properties are not required on a dedicated witness node. + +```text +# These properties apply to the connection(s) EFM uses to monitor +# the local database. Every 'local.period' seconds, a database +# check is made in a background thread. If the main monitoring +# thread does not see that any checks were successful in +# 'local.timeout' seconds, then the main thread makes a final +# check with a timeout value specified by the +# 'local.timeout.final' value. All values are in seconds. +# Whether EFM uses single or multiple connections for database +# checks is controlled by the 'db.reuse.connection.count' +# property. +local.period=10 +local.timeout=60 +local.timeout.final=10 +``` + +If necessary, you should modify these values to suit your business model. + + + +Use the `remote.timeout` property to specify how many seconds an agent waits for a response from a remote database server (i.e., how long a standby agent waits to verify that the primary database is actually down before performing failover). The `remote.timeout` property value specifies a timeout value for agent-to-agent communication; other timeout properties in the cluster properties file specify values for agent-to-database communication. + +```text +# Timeout for a call to check if a remote database is responsive. +# For example, this is how long a standby would wait for a +# DB ping request from itself and the witness to the primary DB +# before performing failover. +remote.timeout=10 +``` + + + +Use the `node.timeout` property to specify the number of seconds that an agent will wait for a response from a node when determining if a node has failed. + +```text +# The total amount of time in seconds to wait before determining +# that a node has failed or been disconnected from this node. +# +# The value of this property must be the same across all agents. +node.timeout=50 +``` + + + +Use the `encrypt.agent.messages` property to specify if the messages sent between agents should be encrypted. + +```text +# Set to true to encrypt messages that are sent between agents. +# This property must be the same on all agents or else the agents +# will not be able to connect. +encrypt.agent.messages=false +``` + + + +Use the `stop.isolated.primary` property to instruct Failover Manager to shut down the database if a primary agent detects that it is isolated. When true (the default), Failover Manager will stop the database before invoking the script specified in the `script.primary.isolated` property. + +```text +# Shut down the database after a primary agent detects that it has +# been isolated from the majority of the efm cluster. If set to +# true, efm will stop the database before running the +# 'script.primary.isolated' script, if a script is specified. +stop.isolated.primary=true +``` + + + +Use the `stop.failed.primary` property to instruct Failover Manager to attempt to shut down a primary database if it can not reach the database. If `true`, Failover Manager will run the script specified in the `script.db.failure` property after attempting to shut down the database. + +```text +# Attempt to shut down a failed primary database after EFM can no +# longer connect to it. This can be used for added safety in the +# case a failover is caused by a failure of the network on the +# primary node. +# If specified, a 'script.db.failure' script is run after this attempt. +stop.failed.primary=true +``` + + + +Use the `primary.shutdown.as.failure` parameter to indicate that any shutdown of the Failover Manager agent on the primary node should be treated as a failure. If this parameter is set to `true` and the primary agent stops (for any reason), the cluster will attempt to confirm if the database on the primary node is running: + +- If the database is reached, a notification will be sent informing you of the agent status. +- If the database is not reached, a failover will occur. + +```text +# Treat a primary agent shutdown as an agent failure. This can be set +# to true to treat a primary agent shutdown as a failure situation, +# e.g. during the shutdown of a node, accidental or otherwise. +# Caution should be used when using this feature, as it could +# cause an unwanted promotion in the case of performing primary +# database maintenance. +# Please see the user's guide for more information. +primary.shutdown.as.failure=false +``` + +The `primary.shutdown.as.failure` property is meant to catch user error, rather than failures, such as the accidental shutdown of a primary node. The proper shutdown of a node can appear to the rest of the cluster like a user has stopped the primary Failover Manager agent (for example to perform maintenance on the primary database). If you set the `primary.shutdown.as.failure` property to `true`, care must be taken when performing maintenance. + +To perform maintenance on the primary database when `primary.shutdown.as.failure` is `true`, you should stop the primary agent and wait to receive a notification that the primary agent has failed but the database is still running. Then it is safe to stop the primary database. Alternatively, you can use the `efm stop-cluster` command to stop all of the agents without failure checks being performed. + + + +Use the `update.physical.slots.period` property to define the slot advance frequency for database version 12 and above. When `update.physical.slots.period` is set to a non-zero value, the primary agent will read the current `restart_lsn` of the physical replication slots after every `update.physical.slots.period` seconds, and send this information with its `pg_current_wal_lsn` and `primary_slot_name` (If it is set in the postgresql.conf file) to the standbys. If physical slots do not already exist, setting this parameter to a non-zero value will create the slots and then update the `restart_lsn parameter` for these slots. A non-promotable standby will not create new slots but will update them if they exist. + +```text +# Period in seconds between having the primary agent update promotable +# standbys with physical replication slot information so that +# the cluster will continue to use replication slots after a failover. +# Set to zero to turn off. +update.physical.slots.period=0 +``` + + + +Use the `ping.server.ip` property to specify the IP address of a server that Failover Manager can use to confirm that network connectivity is not a problem. + +```text +# This is the address of a well-known server that EFM can ping +# in an effort to determine network reachability issues. It +# might be the IP address of a nameserver within your corporate +# firewall or another server that *should* always be reachable +# via a 'ping' command from each of the EFM nodes. +# +# There are many reasons why this node might not be considered +# reachable: firewalls might be blocking the request, ICMP might +# be filtered out, etc. +# +# Do not use the IP address of any node in the EFM cluster +# (primary, standby, or witness) because this ping server is meant +# to provide an additional layer of information should the EFM +# nodes lose sight of each other. +# +# The installation default is Google's DNS server. +ping.server.ip=8.8.8.8 +``` + + + +Use the `ping.server.command` property to specify the command used to test network connectivity. + +```text +# This command will be used to test the reachability of certain +# nodes. +# +# Do not include an IP address or hostname on the end of +# this command - it will be added dynamically at runtime with the +# values contained in 'virtual.ip' and 'ping.server.ip'. +# +# Make sure this command returns reasonably quickly - test it +# from a shell command line first to make sure it works properly. +ping.server.command=/bin/ping -q -c3 -w5 +``` + + + +Use the `auto.allow.hosts` property to instruct the server to use the addresses specified in the .nodes file of the first node started to update the allowed host list. Enabling this property (setting `auto.allow.hosts` to true) can simplify cluster start-up. + +```text +# Have the first node started automatically add the addresses +# from its .nodes file to the allowed host list. This will make +# it faster to start the cluster when the initial set of hosts +# is already known. +auto.allow.hosts=false +``` + + + +Use the `stable.nodes.file` property to instruct the server to not rewrite the nodes file when a node joins or leaves the cluster. This property is most useful in clusters with unchanging IP addresses. + +```text +# When set to true, EFM will not rewrite the .nodes file whenever +# new nodes join or leave the cluster. This can help starting a +# cluster in the cases where it is expected for member addresses +# to be mostly static, and combined with 'auto.allow.hosts' makes +# startup easier when learning failover manager. +stable.nodes.file=false +``` + + + +The `db.reuse.connection.count` property allows the administrator to specify the number of times Failover Manager reuses the same database connection to check the database health. The default value is 0, indicating that Failover Manager will create a fresh connection each time. This property is not required on a dedicated witness node. + +```text +# This property controls how many times a database connection is +# reused before creating a new one. If set to zero, a new +# connection will be created every time an agent pings its local +# database. +db.reuse.connection.count=0 +``` + + + +The `auto.failover` property enables automatic failover. By default, auto.failover is set to true. + +```text +# Whether or not failover will happen automatically when the primary +# fails. Set to false if you want to receive the failover notifications +# but not have EFM actually perform the failover steps. +# The value of this property must be the same across all agents. +auto.failover=true +``` + + + +Use the `auto.reconfigure` property to instruct Failover Manager to enable or disable automatic reconfiguration of remaining Standby servers after the primary standby is promoted to Primary. Set the property to `true` to enable automatic reconfiguration (the default) or `false` to disable automatic reconfiguration. This property is not required on a dedicated witness node. If you are using Advanced Server or PostgreSQL version 11 or earlier, the `recovery.conf` file will be backed up during the reconfiguration process. + +```text +# After a standby is promoted, Failover Manager will attempt to +# update the remaining standbys to use the new primary. For database +# versions before 12, Failover Manager will back up recovery.conf. +# Then it will change the host parameter of the primary_conninfo entry +# in recovery.conf or postgresql.auto.conf, and restart the database. +# The restart command is contained in either the efm_db_functions or +# efm_root_functions file; default when not running db as an os +# service is: "pg_ctl restart -m fast -w -t -D " +# where the timeout is the local.timeout property value and the +# directory is specified by db.data.dir. To turn off +# automatic reconfiguration, set this property to false. +auto.reconfigure=true +``` + +!!! Note + `primary_conninfo` is a space-delimited list of keyword=value pairs. + + + +Use the `promotable` property to indicate that a node should not be promoted. The `promotable` property is ignored when a primary agent is started. This simplifies switching back to the original primary after a switchover or failover. To override the setting, use the efm set-priority command at runtime; for more information about the efm set-priority command, see [Using the efm Utility](../../07_using_efm_utility/#using_efm_utility). + +```text +# A standby with this set to false will not be added to the +# failover priority list, and so will not be available for +# promotion. The property will be used whenever an agent starts +# as a standby or resumes as a standby after being idle. After +# startup/resume, the node can still be added or removed from the +# priority list with the 'efm set-priority' command. This +# property is required for all non-witness nodes. +promotable=true +``` + + + +If the same amount of data has been written to more than one standby node, and a failover occurs, the `use.replay.tiebreaker` value will determine how Failover Manager selects a replacement primary. Set the `use.replay.tiebreaker` property to `true` to instruct Failover Manager to failover to the node that will come out of recovery faster, as determined by the log sequence number. To ignore the log sequence number and promote a node based on user preference, set `use.replay.tiebreaker` to `false`. + +```text +# Use replay LSN value for tiebreaker when choosing a standby to +# promote before using failover priority. Set this property to true to +# consider replay location as more important than failover priority +# (as seen in cluster-status command) when choosing the "most ahead" +# standby to promote. +use.replay.tiebreaker=true +``` + + + +Use the `standby.restart.delay` property to specify the time in seconds that the standby should wait before it gets reconfigured (stopped/started) to follow the new primary after a promotion. + +```text +# Time in seconds for this standby to delay restarting to follow the +# primary after a promotion. This can be used to have standbys restart +# at different times to increase availability. Caution should be used +# when using this feature, as a delayed standby will not be following +# the new primary and care must be taken that the new primary retains +# enough WAL for the standby to follow it. +# Please see the user's guide for more information. +standby.restart.delay=0 +``` + + + +You can use the `application.name` property to provide the name of an application that will be copied to the `primary_conninfo` parameter before restarting an old primary node as a standby. + +```text +# During a switchover, recovery settings are copied from a standby +# to the original primary. If the application.name property is set, +# Failover Manager will replace the application_name portion of the +# primary_conninfo entry with this property value before starting +# the original primary database as a standby. If this property is +# not set, Failover Manager will remove the parameter value +# from primary_conninfo. +application.name= +``` + +!!! Note + You should set the `application.name` property on the primary and any promotable standby; in the event of a failover/switchover, the primary node could potentially become a standby node again. + + + +Use the `restore.command` property to instruct Failover Manager to update the `restore_command` when a new primary is promoted. `%h` represents the address of the new primary; Failover Manager will replace `%h` with the address of the new primary. `%f` and `%p` are placeholders used by the server. If the property is left blank, Failover Manager will not update the `restore_command` values on the standbys after a promotion. + +See the PostgreSQL documentation for more information about using a [restore_command](https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-ARCHIVE-RECOVERY). + +```text +# If the restore_command on a standby restores directly from the +# primary node, use this property to have Failover Manager change +# the command when a new primary is promoted. +# +# Use the %h placeholder to represent the address of the new primary. +# During promotion it will be replaced with the address of the new +# primary. +# +# If not specified, failover manager will not change the +# restore_command value, if any, on standby nodes. +# +# Example: +# restore.command=scp @%h:/var/lib/edb/as12/data/archive/%f %p +restore.command= +``` + + + +The database parameter `synchronous_standby_names` on the primary node specifies the names and count of the synchronous standby servers that will confirm receipt of data, to ensure that the primary nodes can accept write transactions. When `reconfigure.num.sync` property is set to true, Failover Manager will reduce the number of synchronous standby servers and reload the configuration of the primary node to reflect the current value. + +```text +# Reduce num_sync when the number of synchronous standbys drops below +# the value required by the primary database. If set to true, Failover +# Manager will reduce the number of standbys needed in the primary's +# synchronous_standby_names property and reload the primary +# configuration. Failover Manager will not reduce the number below 1, +# taking the primary out of synchronous replication, unless the +# reconfigure.sync.primary property is also set to true. +# To raise num_sync, see the reconfigure.num.sync.max property below. +reconfigure.num.sync=false +``` + + + +Set the `reconfigure.sync.primary` property to `true` to take the primary database out of synchronous replication mode if the number of standby nodes drops below the level required. Set `reconfigure.sync.primary` to `false` to send a notification if the standby count drops, but not interrupt synchronous replication. + +```text +# Take the primary database out of synchronous replication mode when +# needed. If set to true, Failover Manager will clear the +# synchronous_standby_names configuration parameter on the primary +# if the number of synchronous standbys drops below the required +# level for the primary to accept writes. +# If set to false, Failover Manager will detect the situation but +# will only send a notification if the standby count drops below the +# required level. +# +# CAUTION: TAKING THE PRIMARY DATABASE OUT OF SYNCHRONOUS MODE MEANS +# THERE MAY ONLY BE ONE COPY OF DATA. DO NOT MAKE THIS CHANGE UNLESS +# YOU ARE SURE THIS IS OK. +reconfigure.sync.primary=false +``` + + + +Use the `minimum.standbys` property to specify the minimum number of standby nodes that will be retained on a cluster; if the standby count drops to the specified minimum, a replica node will not be promoted in the event of a failure of the primary node. + +```text +# Instead of setting specific standbys as being unavailable for +# promotion, this property can be used to set a minimum number +# of standbys that will not be promoted. Set to one, for +# example, promotion will not happen if it will drop the number +# of standbys below this value. This property must be the same on +# each node. +minimum.standbys=0 +``` + + + +Use the `recovery.check.period` property to specify the number of seconds that Failover Manager will wait before checks to see if a database is out of recovery. + +```text +# Time in seconds between checks to see if a promoting database +# is out of recovery. +recovery.check.period=1 +``` + + + +Use the `restart.connection.timeout` property to specify the number of seconds that Failover Manager will attempt to connect to a newly reconfigured primary or standby node while the database on that node prepares to accept connections. + +```text +# Time in seconds to keep trying to connect to a database after a +# start or restart command returns successfully but the database +# is not ready to accept connections yet (a rare occurance). This +# applies to standby databases that are restarted when being +# reconfigured for a new primary, and to primary databases that +# are stopped and started as standbys during a switchover. +# This retry mechanism is unrelated to the auto.resume.period +# parameter. +restart.connection.timeout=60 +``` + + + +Use the `auto.resume.period` property to specify the number of seconds (after a monitored database fails and an agent has assumed an idle state, or when starting in IDLE mode) during which an agent will attempt to resume monitoring that database. + +```text +# Period in seconds for IDLE agents to try to resume monitoring +# after a database failure or when starting in IDLE mode. Set to +# 0 for agents to not try to resume (in which case the +# 'efm resume ' command is used after bringing a +# database back up). +auto.resume.period=0 +``` + + + +Failover Manager provides support for clusters that use a virtual IP. If your cluster uses a virtual IP, provide the host name or IP address in the `virtual.ip` property; specify the corresponding prefix in the `virtual.ip.prefix` property. If `virtual.ip` is left blank, virtual IP support is disabled. + +Use the `virtual.ip.interface` property to provide the network interface used by the VIP. + +The specified virtual IP address is assigned only to the primary node of the cluster. If you specify `virtual.ip.single=true`, the same VIP address will be used on the new primary in the event of a failover. Specify a value of false to provide a unique IP address for each node of the cluster. + +For information about using a virtual IP address, see [Using Failover Manager with Virtual IP Addresses](../05_using_vip_addresses/#using_vip_addresses). + +```text +# These properties specify the IP and prefix length that will be +# remapped during failover. If you do not use a VIP as part of +# your failover solution, leave the virtual.ip property blank to +# disable Failover Manager support for VIP processing (assigning, +# releasing, testing reachability, etc). +# +# If you specify a VIP, the interface and prefix are required. +# +# If you specify a host name, it will be resolved to an IP address +# when acquiring or releasing the VIP. If the host name resolves +# to more than one IP address, there is no way to predict which +# address Failover Manager will use. +# +# By default, the virtual.ip and virtual.ip.prefix values must be +# the same across all agents. If you set virtual.ip.single to +# false, you can specify unique values for virtual.ip and +# virtual.ip.prefix on each node. +# +# If you are using an IPv4 address, the virtual.ip.interface value +# should not contain a secondary virtual ip id (do not include +# ":1", etc). +virtual.ip= +virtual.ip.interface= +virtual.ip.prefix= +virtual.ip.single=true +``` + +!!! Note + If a primary agent is started and the node does not currently have the VIP, the EFM agent will acquire it. Stopping a primary agent does not drop the VIP from the node. + + + +Set the `check.vip.before.promotion` property to false to indicate that Failover Manager will not check to see if a VIP is in use before assigning it to a a new primary in the event of a failure. Note that this could result in multiple nodes broadcasting on the same VIP address; unless the primary node is isolated or can be shut down via another process, you should set this property to true. + +```text +# Whether to check if the VIP (when used) is still in use before +# promoting after a primary failure. Turning this off may allow +# the new primary to have the VIP even though another node is also +# broadcasting it. This should only be used in environments where +# it is known that the failed primary node will be isolated or +# shut down through other means. +check.vip.before.promotion=true +``` + + + +Use the following properties to provide paths to scripts that reconfigure your load balancer in the event of a switchover or primary failure scenario. The scripts will also be invoked in the event of a standby failure. If you are using these properties, they should be provided on every node of the cluster (primary, standby, and witness) to ensure that if a database node fails, another node will call the detach script with the failed node's address. + +You do not need to set the below properties if you are using Pgpool as Load Balancer solution and have set the Pgpool integration properties. + +Provide a script name after the `script.load.balancer.attach` property to identify a script that will be invoked when a node should be attached to the load balancer. Use the `script.load.balancer.detach` property to specify the name of a script that will be invoked when a node should be detached from the load balancer. Include the `%h` placeholder to represent the IP address of the node that is being attached or removed from the cluster. Include the `%t` placeholder to instruct Failover Manager to include an p (for a primary node) or an s (for a standby node) in the string. + +```text +# Absolute path to load balancer scripts +# The attach script is called when a node should be attached to +# the load balancer, for example after a promotion. The detach +# script is called when a node should be removed, for example +# when a database has failed or is about to be stopped. Use %h to +# represent the IP/hostname of the node that is being +# attached/detached. Use %t to represent the type of node being +# attached or detached: the letter m will be passed in for primary nodes +#and the letter s for standby nodes. +# +# Example: +# script.load.balancer.attach=/somepath/attachscript %h %t +script.load.balancer.attach= +script.load.balancer.detach= +``` + + + +`script.fence` specifies the path to an optional user-supplied script that will be invoked during the promotion of a standby node to primary node. + +```text +# absolute path to fencing script run during promotion +# +# This is an optional user-supplied script that will be run +# during failover on the standby database node. If left blank, +# no action will be taken. If specified, EFM will execute this +# script before promoting the standby. +# +# Parameters can be passed into this script for the failed primary +# and new primary node addresses. Use %p for new primary and %f +# for failed primary. On a node that has just been promoted, %p +# should be the same as the node's efm binding address. +# +# Example: +# script.fence=/somepath/myscript %p %f +# +# NOTE: FAILOVER WILL NOT OCCUR IF THIS SCRIPT RETURNS A NON-ZERO EXIT +# CODE. +script.fence= +``` + +
+ +Use the `script.post.promotion` property to specify the path to an optional user-supplied script that will be invoked after a standby node has been promoted to primary. + +```text +# Absolute path to fencing script run after promotion +# +# This is an optional user-supplied script that will be run after +# failover on the standby node after it has been promoted and +# is no longer in recovery. The exit code from this script has +# no effect on failover manager, but will be included in a +# notification sent after the script executes. +# +# Parameters can be passed into this script for the failed primary +# and new primary node addresses. Use %p for new primary and %f +# for failed primary. On a node that has just been promoted, %p +# should be the same as the node's efm binding address. +# +# Example: +# script.post.promotion=/somepath/myscript %f %p +script.post.promotion= +``` + + + +Use the `script.resumed property` to specify an optional path to a user-supplied script that will be invoked when an agent resumes monitoring of a database. + +```text +# Absolute path to resume script +# +# This script is run before an IDLE agent resumes +# monitoring its local database. +script.resumed= +``` + + + +Use the `script.db.failure` property to specify the complete path to an optional user-supplied script that Failover Manager will invoke if an agent detects that the database that it monitors has failed. + +```text +# Absolute path to script run after database failure +# This is an optional user-supplied script that will be run after +# an agent detects that its local database has failed. +script.db.failure= +``` + + + +Use the `script.primary.isolated` property to specify the complete path to an optional user-supplied script that Failover Manager will invoke if the agent monitoring the primary database detects that the primary is isolated from the majority of the Failover Manager cluster. This script is called immediately after the VIP is released (if a VIP is in use). + +```text +# Absolute path to script run on isolated primary +# This is an optional user-supplied script that will be run after +# a primary agent detects that it has been isolated from the +# majority of the efm cluster. +script.primary.isolated= +``` + + + +Use the `script.remote.pre.promotion` property to specify the path and name of a script that will be invoked on any agent nodes not involved in the promotion when a node is about to promote its database to primary. + +Include the %p placeholder to identify the address of the new primary node. + +```text +# Absolute path to script invoked on non-promoting agent nodes +# before a promotion. +# +# This optional user-supplied script will be invoked on other +# agents when a node is about to promote its database. The exit +# code from this script has no effect on Failover Manager, but +# will be included in a notification sent after the script +# executes. +# +# Pass a parameter (%p) with the script to identify the new +# primary node address. +# +# Example: +# script.remote.pre.promotion=/path_name/script_name %p +script.remote.pre.promotion= +``` + +
+ +Use the `script.remote.post.promotion` property to specify the path and name of a script that will be invoked on any non-primary nodes after a promotion occurs. + +Include the %p placeholder to identify the address of the new primary node. + +```text +# Absolute path to script invoked on non-primary agent nodes +# after a promotion. +# +# This optional user-supplied script will be invoked on nodes +# (except the new primary) after a promotion occurs. The exit code +# from this script has no effect on Failover Manager, but will be +# included in a notification sent after the script executes. +# +# Pass a parameter (%p) with the script to identify the new +# primary node address. +# +# Example: +# script.remote.post.promotion=/path_name/script_name %p +script.remote.post.promotion= +``` + + + +Use the `script.custom.monitor` property to provide the name and location of an optional script that will be invoked on regular intervals (specified in seconds by the `custom.monitor.interval` property). + +Use `custom.monitor.timeout` to specify the maximum time that the script will be allowed to run; if script execution does not complete within the time specified, Failover Manager will send a notification. + +Set `custom.monitor.safe.mode` to `true` to instruct Failover Manager to report non-zero exit codes from the script, but not promote a standby as a result of an exit code. + +```text +# Absolute path to a custom monitoring script. +# +# Use script.custom.monitor to specify the location and name of +# an optional user-supplied script that will be invoked +# periodically to perform custom monitoring tasks. A non-zero +# exit value means that a check has failed; this will be treated +# as a database failure. On a primary node, script failure will +# cause a promotion. On a standby node script failure will +# generate a notification and the agent will become IDLE. +# +# The custom.monitor.\* properties are required if a custom +# monitoring script is specified: +# +# custom.monitor.interval is the time in seconds between executions +# of the script. +# +# custom.monitor.timeout is a timeout value in seconds for how +# long the script will be allowed to run. If script execution +# exceeds the specified time, the task will be stopped and a +# notification sent. Subsequent runs will continue. +# +# If custom.monitor.safe.mode is set to true, non-zero exit codes +# from the script will be reported but will not cause a promotion +# or be treated as a database failure. This allows testing of the +# script without affecting EFM. +# +script.custom.monitor= +custom.monitor.interval= +custom.monitor.timeout= +custom.monitor.safe.mode= +``` + + + +Use the `sudo.command` property to specify a command that will be invoked by Failover Manager when performing tasks that require extended permissions. Use this option to include command options that might be specific to your system authentication. + +Use the `sudo.user.command` property to specify a command that will be invoked by Failover Manager when executing commands that will be performed by the database owner. + +```text +# Command to use in place of 'sudo' if desired when efm runs +# the efm_db_functions or efm_root_functions, or efm_address +# scripts. +# Sudo is used in the following ways by efm: +# +# sudo /usr/edb/efm-/bin/efm_address +# sudo /usr/edb/efm-/bin/efm_root_functions +# sudo -u /usr/edb/efm-/bin/efm_db_functions +# +# 'sudo' in the first two examples will be replaced by the value +# of the sudo.command property. 'sudo -u ' will +# be replaced by the value of the sudo.user.command property. +# The '%u' field will be replaced with the db owner. +sudo.command=sudo +sudo.user.command=sudo -u %u +``` + + + +Use the `lock.dir` property to specify an alternate location for the Failover Manager lock file; the file prevents Failover Manager from starting multiple (potentially orphaned) agents for a single cluster on the node. + +```text +# Specify the directory of lock file on the node. Failover +# Manager creates a file named .lock at this location to +# avoid starting multiple agents for same cluster. If the path +# does not exist, Failover Manager will attempt to create it. If +# not specified defaults to '/var/lock/efm-' +lock.dir= +``` + + + +Use the `log.dir` property to specify the location to which agent log files will be written; Failover Manager will attempt to create the directory if the directory does not exist. + +```text +# Specify the directory of agent logs on the node. If the path +# does not exist, Failover Manager will attempt to create it. If +# not specified defaults to '/var/log/efm-'. (To store +# Failover Manager startup logs in a custom location, modify the +# path in the service script to point to an existing, writable +# directory.) +# If using a custom log directory, you must configure +# logrotate separately. Use 'man logrotate' for more information. +log.dir= +``` + + + +After enabling the UDP or TCP protocol on a Failover Manager host, you can enable logging to syslog. Use the `syslog.protocol` parameter to specify the protocol type (UDP or TCP) and the `syslog.port` parameter to specify the listener port of the syslog host. The `syslog.facility` value may be used as an identifier for the process that created the entry; the value must be between LOCAL0 and LOCAL7. + +```text +# Syslog information. The syslog service must be listening on +# the port for the given protocol, which can be UDP or TCP. +# The facilities supported are LOCAL0 through LOCAL7. +syslog.host=localhost +syslog.port=514 +syslog.protocol=UDP +syslog.facility=LOCAL1 +``` + + + +Use the `file.log.enabled` and `syslog.enabled` properties to specify the type of logging that you wish to implement. Set `file.log.enabled` to `true` to enable logging to a file; enable the UDP protocol or TCP protocol and set `syslog.enabled` to `true` to enable logging to syslog. You can enable logging to both a file and syslog. + +```text +# Which logging is enabled. +file.log.enabled=true +syslog.enabled=false +``` + +For more information about configuring syslog logging, see [Enabling syslog Log File Entries](../../09_controlling_logging/#enabling_syslog). + + + +Use the `jgroups.loglevel` and `efm.loglevel` parameters to specify the level of detail logged by Failover Manager. The default value is INFO. For more information about logging, see [Controlling Logging](../../09_controlling_logging/#controlling_logging). + +```text +# Logging levels for JGroups and EFM. +# Valid values are: TRACE, DEBUG, INFO, WARN, ERROR +# Default value: INFO +# It is not necessary to increase these values unless debugging a +# specific issue. If nodes are not discovering each other at +# startup, increasing the jgroups level to DEBUG will show +# information about the TCP connection attempts that may help +# diagnose the connection failures. +jgroups.loglevel=INFO +efm.loglevel=INFO +``` + + + +Use the `jvm.options` property to pass JVM-related configuration information. The default setting specifies the amount of memory that the Failover Manager agent will be allowed to use. + +```text +# Extra information that will be passed to the JVM when starting +# the agent. +jvm.options=-Xmx128m +``` + diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx new file mode 100644 index 00000000000..c0456f97d8c --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx @@ -0,0 +1,81 @@ +--- +title: "Encrypting Your Database Password" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/encrypting_database_password.html" +--- + + + +Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-4.0/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. + +To encrypt a password, use the command: + +```text +# efm encrypt [ --from-env ] +``` + +Where `` specifies the name of the Failover Manager cluster. + +If you include the `--from-env` option, you must export the value you wish to encrypt before invoking the encryption utility. For example: + +```text +export EFMPASS=password +``` + +If you do not include the `--from-env` option, Failover Manager will prompt you to enter the database password twice before generating an encrypted password for you to place in your cluster property file. When the utility shares the encrypted password, copy and paste the encrypted password into the cluster property files. + +!!! Note + Many Java vendors ship their version of Java with full-strength encryption included, but not enabled due to export restrictions. If you encounter an error that refers to an illegal key size when attempting to encrypt the database password, you should download and enable a Java Cryptography Extension (JCE) that provides an unlimited policy for your platform. + +The following example demonstrates using the encrypt utility to encrypt a password for the `acctg` cluster: + +```text +# efm encrypt acctg +This utility will generate an encrypted password for you to place in + your EFM cluster property file: +/etc/edb/efm-4.0/acctg.properties +Please enter the password and hit enter: +Please enter the password again to confirm: +The encrypted password is: 516b36fb8031da17cfbc010f7d09359c +Please paste this into your acctg.properties file +db.password.encrypted=516b36fb8031da17cfbc010f7d09359c +``` + +!!! Note + The utility will notify you if a properties file does not exist. + +After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: + +```text +[witness@localhost ~]# systemctl start edb-efm-4.0 +Job for edb-efm-4.0.service failed because the control process exited with error code. See "systemctl status edb-efm-4.0.service" and "journalctl -xe" for details. +``` + +If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-4.0/startup-efm.log`) for more information. + +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, startup information is also available with the following command: + +```text +systemctl status edb-efm-4.0 +``` + +To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. + +**Using the EFMPASS Environment Variable** + +The following example demonstrates using the --from-env environment variable when encrypting a password. Before invoking the `efm encrypt` command, set the value of `EFMPASS` to the password (`1safepassword`): + +```text +# export EFMPASS=1safepassword +``` + +Then, invoke `efm encrypt`, specifying the `--from-env` option: + +```text +# efm encrypt acctg --from-env +# 7ceecd8965fa7a5c330eaa9e43696f83 +``` + +The encrypted password (`7ceecd8965fa7a5c330eaa9e43696f83`) is returned as a text value; when using a script, you can check the exit code of the command to confirm that the command succeeded. A successful execution returns `0`. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx new file mode 100644 index 00000000000..5b7b5b030bf --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx @@ -0,0 +1,35 @@ +--- +title: "The Cluster Members File" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/cluster_members.html" +--- + + + +Each node in a Failover Manager cluster has a cluster members file (by default, named efm.nodes) that contains a list of the current Failover Manager cluster members. When an agent starts, it uses the file to locate other cluster members. The Failover Manager installer creates a file template for the cluster members file named `efm.nodes.in` in the `/etc/edb/efm-4.0` directory. + +After completing the Failover Manager installation, you must make a working copy of the template: + +```text +cp /etc/edb/efm-4.0/efm.nodes.in /etc/edb/efm-4.0/efm.nodes +``` + +After copying the template file, change the owner of the file to `efm`: + +```text +chown efm:efm efm.nodes +``` + +By default, Failover Manager expects the cluster members file to be named `efm.nodes`. If you name the cluster members file something other than `efm.nodes`, you must modify the Failover Manager service script to instruct Failover Manager to use the new name. + +The cluster members file on the first node started can be empty; this node will become the Membership Coordinator. On each subsequent node, the cluster member file must contain the address and port number of the Membership Coordinator. Each entry in the cluster members file must be listed in an address:port format, with multiple entries separated by white space. + +The agents will update the contents of the `efm.nodes` file to match the current members of the cluster. As agents join or leave the cluster, the `efm.nodes` files on other agents are updated to reflect the current cluster membership. If you invoke the [efm stop-cluster](../07_using_efm_utility/#efm_stop_cluster) command, Failover Manager does not modify the file. + +If the Membership Coordinator leaves the cluster, another node will assume the role. You can use the [efm cluster-status](../07_using_efm_utility/#efm_cluster_status) command to find the address of the Membership Coordinator. If a node joins or leaves a cluster while an agent is down, before starting that agent you must manually ensure that the file includes at least the current Membership Coordinator's address and port. + +If you know the addresses and ports of the nodes that will be joining the cluster, you can include the addresses in the cluster members file at any time. At startup, any addresses that do not identify cluster members will be ignored unless the `auto.allow.hosts` property (in the [cluster properties file](01_cluster_properties/#auto_allow_hosts)) is set to `true`. + +If the `stable.nodes.file` property (located in the [cluster properties file](01_cluster_properties/#auto_allow_hosts)) is set to `true`, the agent will not update the `.nodes` file when cluster members join or leave the cluster; this behavior is most useful when the IP addresses of cluster members do not change often. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx new file mode 100644 index 00000000000..f49e8489cfb --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx @@ -0,0 +1,116 @@ +--- +title: "Extending Failover Manager Permissions" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/extending_efm_permissions.html" +--- + + + +During the Failover Manager installation, the installer creates a user named `efm`. `efm` does not have sufficient privileges to perform management functions that are normally limited to the database owner or operating system superuser. + +- When performing management functions requiring database superuser privileges, `efm` invokes the `efm_db_functions` script. +- When performing management functions requiring operating system superuser privileges, `efm` invokes the `efm_root_functions` script. +- When assigning or releasing a virtual IP address, `efm` invokes the `efm_address` script. +- When enabling Pgpool integration, `efm` invokes the `efm_pgpool_functions` script. + +The `efm_db_functions` or `efm_root_functions` scripts perform management functions on behalf of the `efm` user. + +The sudoers file contains entries that allow the user `efm` to control the Failover Manager service for clusters owned by `postgres` or `enterprisedb`. You can modify a copy of the sudoers file to grant permission to manage Postgres clusters owned by other users to `efm`. + +The `efm-41` file is located in `/etc/sudoers.d`, and contains the following entries: + +```text +# Copyright EnterpriseDB Corporation, 2014-2020. All Rights Reserved. +# +# Do not edit this file. Changes to the file may be overwritten +# during an upgrade. +# +# This file assumes you are running your efm cluster as user 'efm'. If not, +# then you will need to copy this file. + +# Allow user 'efm' to sudo efm_db_functions as either 'postgres' or 'enterprisedb'. +# If you run your db service under a non-default account, you will need to copy +# this file to grant the proper permissions and specify the account in your efm +# cluster properties file by changing the 'db.service.owner' property. +efm ALL=(postgres) NOPASSWD: /usr/edb/efm-4.0/bin/efm_db_functions +efm ALL=(enterprisedb) NOPASSWD: /usr/edb/efm-4.0/bin/efm_db_functions + +# Allow user 'efm' to sudo efm_root_functions as 'root' to write/delete the PID file, +# validate the db.service.owner property, etc. +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_root_functions + +# Allow user 'efm' to sudo efm_address as root for VIP tasks. +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_address + +# Allow user 'efm' to sudo efm_pgpool_functions as root for pgpool tasks. +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_pgpool_functions + +# relax tty requirement for user 'efm' +Defaults:efm !requiretty +``` + +If you are using Failover Manager to monitor clusters that are owned by users other than `postgres` or `enterprisedb`, make a copy of the `efm-41` file, and modify the content to allow the user to access the `efm_functions` script to manage their clusters. + +If an agent cannot start because of permission problems, make sure the default `/etc/sudoers` file contains the following line at the end of the file: + +```text +## Read drop-in files from /etc/sudoers.d (the # here does not # mean a comment) + +#includedir /etc/sudoers.d +``` + + + +## Running Failover Manager without sudo + +By default, Failover Manager uses sudo to securely manage access to system functionality. If you choose to configure Failover Manager to run without sudo access, Note that root access is still required to: + +- install the Failover Manager RPM. +- perform Failover Manager setup tasks. + +To run Failover Manager without sudo, you must select a database process owner that will have privileges to perform management functions on behalf of Failover Manager. The user could be the default database superuser (for example, enterprisedb or postgres) or another privileged user. After selecting the user: + +1. Use the following command to add the user to the `efm` group: + + ```text + usermod -a -G efm enterprisedb + ``` + + This should allow the user to write to `/var/run/efm-4.0` and `/var/lock/efm-4.0`. + +2. If you are reusing a cluster name, remove any previously created log files; the new user will not be able to write to log files created by the default (or other) owner. + +3. Copy the cluster properties template file and the nodes template file: + + ```text + su - enterprisedb + + cp /etc/edb/efm-4.0/efm.properties.in .properties + + cp /etc/edb/efm-4.0/efm.nodes.in /.nodes + ``` + +Then, modify the cluster properties file, providing the name of the user in the `db.service.owner` property. You must also ensure that the `db.service.name` property is blank; without sudo, you cannot run services without root access. + +After modifying the configuration, the new user can control Failover Manager with the following command: + +```text +/usr/edb/efm-4.0/bin/runefm.sh start|stop .properties +``` + +Where `` specifies the full path of the cluster properties file. Note that the user must ensure that the full path to the properties file must be provided whenever the non-default user is controlling agents or using the efm script. + +To allow the new user to manage Failover Manager as a service, you must provide a custom script or unit file. + +Failover Manager uses a binary named `manage-vip` that resides in `/usr/edb/efm-4.0/bin/secure/` to perform VIP management operations without sudo privileges. This script uses setuid to acquire with the privileges needed to manage Virtual IP addresses. + +- This directory is only accessible to root and users in the `efm` group. +- The binary is only executable by root and the `efm` group. + +For security reasons, we recommend against modifying the access privileges of the `/usr/edb/efm-4.0/bin/secure/` directory or the `manage-vip` script. + +For more information about using Failover Manager without sudo, visit: + + diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx new file mode 100644 index 00000000000..99f5a68f4a2 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx @@ -0,0 +1,148 @@ +--- +title: "Using Failover Manager with Virtual IP Addresses" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_vip_addresses.html" +--- + + + +Failover Manager uses the `efm_address` script to assign or release a virtual IP address. + +!!! Note + Virtual IP addresses are not supported by many cloud providers. In those environments, another mechanism should be used (such as an Elastic IP Address on AWS), which can be changed when needed by a fencing or post-promotion script. + +By default, the script resides in: + + `/usr/edb/efm-4.0/bin/efm_address` + +Failover Manager uses the following command variations to assign or release an IPv4 or IPv6 IP address. + +To assign a virtual IPv4 IP address: + +```text +# efm_address add4 / +``` + +To assign a virtual IPv6 IP address: + +```text +# efm_address add6 / +``` + +To release a virtual address: + +```text +# efm_address del +``` + +Where: + + `` matches the name specified in the `virtual.ip.interface` property in the cluster properties file. + + `` or `` matches the value specified in the `virtual.ip` property in the cluster properties file. + + `prefix` matches the value specified in the `virtual.ip.prefix` property in the cluster properties file. + +For more information about properties that describe a virtual IP address, see [The Cluster Properties File](01_cluster_properties/#virtual_ip). + +You must invoke the `efm_address` script as the root user. The `efm` user is created during the installation, and is granted privileges in the sudoers file to run the `efm_address` script. For more information about the sudoers file, see [Extending Failover Manager Permissions](04_extending_efm_permissions/#extending_efm_permissions). + +!!! Note + If a VIP address (or any address other than the `bind.address`) is assigned to a node, the operating system can choose the source address used when contacting the database. Be sure that you modify the `pg_hba.conf` file on all monitored databases to allow contact from all addresses within your replication scenario. + +**Testing the VIP** + +When using a virtual IP (VIP) address with Failover Manager, it is important to test the VIP functionality manually before starting Failover manager. This will catch any network-related issues before they cause a problem during an actual failover. While testing the VIP, ensure that Failover Manager is not running. + +The following steps test the actions that Failover Manager will take. The example uses the following property values: + +```text +virtual.ip=172.24.38.239 +virtual.ip.interface=eth0 +virtual.ip.prefix=24 +ping.server.command=/bin/ping -q -c3 -w5 +``` + +!!! Note + The `virtual.ip.prefix` specifies the number of significant bits in the virtual Ip address. + +When instructed to ping the VIP from a node, use the command defined by the `ping.server.command` property. + +1. Ping the VIP from all nodes to confirm that the address is not already in use: + +```text +# /bin/ping -q -c3 -w5 172.24.38.239 +PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. +--- 172.24.38.239 ping statistics --- +4 packets transmitted, 0 received, +3 errors, 100% packet loss, + time 3000ms +``` + +You should see 100% packet loss. + +2. Run the `efm_address add4` command on the Primary node to assign the VIP and then confirm with ip address: + +```text +# efm_address add4 eth0 172.24.38.239/24 +# ip address + +eth0 Link encap:Ethernet HWaddr 36:AA:A4:F4:1C:40 +inet addr:172.24.38.239 Bcast:172.24.38.255 +... +``` + +3. Ping the VIP from the other nodes to verify that they can reach the VIP: + +```text +# /bin/ping -q -c3 -w5 172.24.38.239 +PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. +--- 172.24.38.239 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 1999ms +rtt min/avg/max/mdev = 0.023/0.025/0.029/0.006 ms +``` + +You should see no packet loss. + +4. Use the `efm_address del` command to release the address on the primary node and confirm the node has been released with ip address: + +```text +# efm_address del eth0 172.24.38.239/24 +# ip address +eth0 Link encap:Ethernet HWaddr 22:00:0A:89:02:8E +inet addr:10.137.2.142 Bcast:10.137.2.191 +... +``` + +The output from this step should not show an eth0 interface + +5. Repeat step 3, this time verifying that the Standby and Witness do not see the VIP in use: + +```text +# /bin/ping -q -c3 -w5 172.24.38.239 +PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. +--- 172.24.38.239 ping statistics --- +4 packets transmitted, 0 received, +3 errors, 100% packet loss, + time 3000ms +``` + +You should see 100% packet loss. Repeat this step on all nodes. + +6. Repeat step 2 on all Standby nodes to assign the VIP to every node. You can ping the VIP from any node to verify that it is in use. + +```text +# efm_address add4 eth0 172.24.38.239/24 +# ip address + +eth0 Link encap:Ethernet HWaddr 36:AA:A4:F4:1C:40 +inet addr:172.24.38.239 Bcast:172.24.38.255 +... +``` + +After the test steps above, release the VIP from any non-Primary node before attempting to start Failover Manager. + +!!! Note + The network interface used for the VIP does not have to be the same interface used for the Failover Manager agent's `bind.address` value. The primary agent will drop the VIP as needed during a failover, and Failover Manager will verify that the VIP is no longer available before promoting a standby. A failure of the bind address network will lead to primary isolation and failover. + +If the VIP uses a different interface, you may encounter a timing condition where the rest of the cluster checks for a reachable VIP before the primary agent has dropped it. In this case, EFM will retry the VIP check for the number of seconds specified in the `node.timeout` property to help ensure that a failover happens as expected. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx new file mode 100644 index 00000000000..a89e7f687b9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Configuring Failover Manager" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_efm.html" +--- + + + +Configurable Failover Manager properties are specified in two user-modifiable files: + +- [efm.properties](01_cluster_properties/#cluster_properties) +- [efm.nodes](03_cluster_members/#cluster_members) + +
+ +cluster_properties encrypting_database_password cluster_members extending_efm_permissions using_vip_addresses + +
diff --git a/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx b/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx new file mode 100644 index 00000000000..25fb74d736d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx @@ -0,0 +1,318 @@ +--- +title: "Using Failover Manager" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_efm.html" +--- + + + +Failover Manager offers support for monitoring and failover of clusters with one or more Standby servers. You can add or remove nodes from the cluster as your demand for resources grows or shrinks. + +If a primary node reboots, Failover Manager may detect the database is down on the Primary node and promote a Standby node to the role of Primary. If this happens, the Failover Manager agent on the (rebooted) Primary node will not get a chance to write the `recovery.conf` file; the rebooted Primary node will return to the cluster as a second Primary node. To prevent this, start the Failover Manager agent before starting the database server. The agent will start in idle mode, and check to see if there is already a primary in the cluster. If there is a primary node, the agent will verify that a `recovery.conf` or `standby.signal` file exists, and the database will not start as a second primary. + +## Managing a Failover Manager Cluster + +Once configured, a Failover Manager cluster requires no regular maintenance. The following sections provide information about performing the management tasks that may occasionally be required by a Failover Manager Cluster. + +By default, [some of the efm commands](07_using_efm_utility/#using_efm_utility) must be invoked by `efm` or an OS superuser; an administrator can selectively permit users to invoke these commands by adding the user to the `efm` group. The commands are: + +- [efm allow-node](07_using_efm_utility/#efm_allow_node) +- [efm disallow-node](07_using_efm_utility/#efm_disallow_node) +- [efm promote](07_using_efm_utility/#efm_promote) +- [efm resume](07_using_efm_utility/#efm_resume) +- [efm set-priority](07_using_efm_utility/#efm_set_priority) +- [efm stop-cluster](07_using_efm_utility/#efm_stop_cluster) +- [efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf) + + + +### Starting the Failover Manager Cluster + +You can start the nodes of a Failover Manager cluster in any order. + +To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/CentOS 8.x, assume superuser privileges, and invoke the command: + +```text +systemctl start edb-efm-4.0 +``` + +If the cluster properties file for the node specifies that `is.witness` is `true`, the node will start as a Witness node. + +If the node is not a dedicated Witness node, Failover Manager will connect to the local database and invoke the `pg_is_in_recovery()` function. If the server responds `false`, the agent assumes the node is a Primary node, and assigns a virtual IP address to the node (if applicable). If the server responds `true`, the Failover Manager agent assumes that the node is a Standby server. If the server does not respond, the agent will start in an idle state. + +After joining the cluster, the Failover Manager agent checks the supplied database credentials to ensure that it can connect to all of the databases within the cluster. If the agent cannot connect, the agent will shut down. + +If a new primary or standby node joins a cluster, all of the existing nodes will also confirm that they can connect to the database on the new node. + + + +!!! Note + If you are running `/var/lock` or `/var/run` on `tmpfs` (Temporary File System), make sure that the systemd service file for Failover Manager has a dependency on `systemd-tmpfiles-setup.service`. + +### Adding Nodes to a Cluster + +You can add a node to a Failover Manager cluster at any time. When you add a node to a cluster, you must modify the cluster to allow the new node, and then tell the new node how to find the cluster. The following steps detail adding a node to a cluster: + +1. Unless `auto.allow.hosts` is set to `true`, use the `efm allow-node` command, to add the address of the new node to the Failover Manager allowed node host list. When invoking the command, specify the cluster name and the address of the new node: + + ```text + efm allow-node
+ ``` + + For more information about using the `efm allow-node` command or controlling a Failover Manager service, see [Using the EFM Utility](07_using_efm_utility/#efm_allow_node). + + Install a Failover Manager agent and configure the cluster properties file on the new node. For more information about modifying the properties file, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). + +2. Configure the cluster members file on the new node, adding an entry for the Membership Coordinator. For more information about modifying the cluster members file, see [The Cluster Members File](04_configuring_efm/03_cluster_members/#cluster_members). + +3. Assume superuser privileges on the new node, and start the Failover Manager agent. To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/CentOS 8.x, invoke the command: + + ```text + systemctl start edb-efm-4.0 + ``` + +When the new node joins the cluster, Failover Manager will send a notification to the administrator email provided in the `user.email` property, and/or will invoke the specified notification script. + + + +!!! Note + To be a useful Standby for the current node, the node must be a standby in the PostgreSQL Streaming Replication scenario. + +### Changing the Priority of a Standby + +If your Failover Manager cluster includes more than one Standby server, you can use the `efm set-priority` command to influence the promotion priority of a Standby node. Invoke the command on any existing member of the Failover Manager cluster, and specify a priority value after the IP address of the member. + +For example, the following command instructs Failover Manager that the `acctg` cluster member that is monitoring `10.0.1.9` is the primary Standby `(1)`: + +```text +efm set-priority acctg 10.0.1.9 1 +``` + +You can set the priority of a standby to `0` to make the standby non-promotable. Setting the priority of a standby to a value greater than `0` overrides a property value of `promotable=false`. + +For example, if the properties file on node `10.0.1.10` includes a setting of `promotable=false` and you use `efm set-priority` to set the promotion priority of `10.0.1.10` to be the standby used in the event of a failover, the value designated by the `efm set-priority` command will override the value in the property file: + +```text +efm set-priority acctg 10.0.1.10 1 +``` + +In the event of a failover, Failover Manager will first retrieve information from Postgres streaming replication to confirm which Standby node has the most recent data, and promote the node with the least chance of data loss. If two Standby nodes contain equally up-to-date data, the node with a higher user-specified priority value will be promoted to Primary unless [use.replay.tiebreaker](04_configuring_efm/01_cluster_properties/#use_replay_tiebreaker) is set to `false` . To check the priority value of your Standby nodes, use the command: + +```text +efm cluster-status +``` + + + +!!! Note + The promotion priority may change if a node becomes isolated from the cluster, and later re-joins the cluster. + +### Promoting a Failover Manager Node + +You can invoke `efm promote` on any node of a Failover Manager cluster to start a manual promotion of a Standby database to Primary database. + +Manual promotion should only be performed during a maintenance window for your database cluster. If you do not have an up-to-date Standby database available, you will be prompted before continuing. To start a manual promotion, assume the identity of `efm` or the OS superuser, and invoke the command: + +```text +efm promote [-switchover] [-sourcenode
] [-quiet] [-noscripts]` +``` + +Where: + + `` is the name of the Failover Manager cluster. + + Include the `–switchover` option to reconfigure the original Primary as a Standby. If you include the `–switchover` keyword, the cluster must include a primary node and at least one standby, and the nodes must be in sync. + + Include the `–sourcenode` keyword to specify the node from which the recovery settings will be copied to the primary. + + Include the `-quiet` keyword to suppress notifications during switchover. + + Include the `-noscripts` keyword to prevent instruct Failover Manager to not invoke fencing and post-promotion scripts. + +During switchover: + +- For server versions 11 and prior, the `recovery.conf` file is copied from an existing standby to the primary node. For server version 12 and later, the `primary_conninfo` and `restore_command` parameters are copied and stored in memory. +- The primary database is stopped. +- If you are using a VIP, the address is released from the primary node. +- A standby is promoted to replace the primary node, and acquires the VIP. +- The address of the new primary node is added to the `recovery.conf` file or the `primary_conninfo` details are stored in memory. +- If the `application.name` property is set for this node, the application_name property will be added to the `recovery.conf` file or the `primary_conninfo` information will be stored in memory. +- If you are using server version 12 or later, the recovery settings that have been stored in memory are written to the `postgresql.auto.conf` file. A `standby.signal` file is created. +- The old primary is started; the agent will resume monitoring it as a standby. + +During a promotion, the Primary agent releases the virtual IP address. If it is not a switchover, a recovery.conf file is created in the directory specified by the db.data.dir property. The recovery.conf file is used to prevent the old primary database from starting until the file is removed, preventing the node from starting as a second primary in the cluster. If the promotion is part of a switchover, recovery settings are handled as described above. + +The Primary agent remains running, and assumes a status of Idle. + +The Standby agent confirms that the virtual IP address is no longer in use before pinging a well- known address to ensure that the agent is not isolated from the network. The Standby agent runs the fencing script and promotes the Standby database to Primary. The Standby agent then assigns the virtual IP address to the Standby node, and runs the post-promotion script (if applicable). + +Note that this command instructs the service to ignore the value specified in the `auto.failover` parameter in the cluster properties file. + +To return a node to the role of primary, place the node first in the promotion list: + +```text +efm set-priority
+``` + +Then, perform a manual promotion: + +```text +efm promote ‑switchover +``` + +For more information about the efm utility, see [Using the EFM Utility](07_using_efm_utility/#using_efm_utility). + + + +### Stopping a Failover Manager Agent + +When you stop an agent, Failover Manager will remove the node's address from the cluster members list on all of the running nodes of the cluster, but will not remove the address from the Failover Manager Allowed node host list. + +To stop the Failover Manager agent on RHEL/CentOS 7.x or RHEL/CentOS 8.x, assume superuser privileges, and invoke the command: + +```text +systemctl stop edb-efm-4.0 +``` + +Until you invoke the `efm disallow-node` command (removing the node's address of the node from the Allowed node host list), you can use the `service edb-efm-4.0 start` command to restart the node at a later date without first running the `efm allow-node` command again. + + +Note that stopping an agent does not signal the cluster that the agent has failed unless the [primary.shutdown.as.failure](04_configuring_efm/01_cluster_properties/cluster_properties/#primary_shutdown_as_failure) property is set to `true`. + +### Stopping a Failover Manager Cluster + +To stop a Failover Manager cluster, connect to any node of a Failover Manager cluster, assume the identity of `efm` or the OS superuser, and invoke the command: + +```text +efm stop-cluster +``` + +The command will cause *all* Failover Manager agents to exit. Terminating the Failover Manager agents completely disables all failover functionality. + +!!! Note + When you invoke the `efm stop-cluster` command, all authorized node information is lost from the Allowed node host list. + +### Removing a Node from a Cluster + +The `efm disallow-node` command removes the IP address of a node from the Failover Manager Allowed Node host list. Assume the identity of `efm` or the OS superuser on any existing node (that is currently part of the running cluster), and invoke the `efm disallow-node` command, specifying the cluster name and the IP address of the node: + +```text +efm disallow-node
+``` + +The `efm disallow-node` command will not stop a running agent; the service will continue to run on the node until you [stop the agent](#stop_efm_agent). If the agent or cluster is subsequently stopped, the node will not be allowed to rejoin the cluster, and will be removed from the failover priority list (and will be ineligible for promotion). + +After invoking the `efm disallow-node` command, you must use the [efm allow-node](07_using_efm_utility/#efm_allow_node) command to add the node to the cluster again. + + + +## Running Multiple Agents on a Single Node + +You can monitor multiple database clusters that reside on the same host by running multiple Primary or Standby agents on that Failover Manager node. You may also run multiple Witness agents on a single node. To configure Failover Manager to monitor more than one database cluster, while ensuring that Failover Manager agents from different clusters do not interfere with each other, you must: + +1. Create a cluster properties file for each member of each cluster that defines a unique set of properties and the role of the node within the cluster. +2. Create a cluster members file for each member of each cluster that lists the members of the cluster. +3. Customize the unit file (on a RHEL/CentOS 7.x or RHEL/CentOS 8.x system) for each cluster to specify the names of the cluster properties and the cluster members files. +4. Start the services for each cluster. + +The examples that follow uses two database clusters (acctg and sales) running on the same node: + +- Data for `acctg` resides in `/opt/pgdata1`; its server is monitoring port `5444`. +- Data for `sales` resides in `/opt/pgdata2`; its server is monitoring port `5445`. + +To run a Failover Manager agent for both of these database clusters, use the `efm.properties.in` template to create two properties files. Each cluster properties file must have a unique name. For this example, we create `acctg.properties` and `sales.properties` to match the `acctg` and `sales` database clusters. + +The following parameters must be unique in each cluster properties file: + + `admin.port` + + `bind.address` + + `db.port` + + `db.data.dir` + + `virtual.ip` (if used) + + `virtual.ip.interface` (if used) + +Within each cluster properties file, the `db.port` parameter should specify a unique value for each cluster, while the `db.user` and `db.database` parameter may have the same value or a unique value. For example, the `acctg.properties` file may specify: + + `db.user=efm_user` + + `db.password.encrypted=7c801b32a05c0c5cb2ad4ffbda5e8f9a` + + `db.port=5444` + + `db.database=acctg_db` + +While the `sales.properties` file may specify: + + `db.user=efm_user` + + `db.password.encrypted=e003fea651a8b4a80fb248a22b36f334` + + `db.port=5445` + + `db.database=sales_db` + +Some parameters require special attention when setting up more than one Failover Manager cluster agent on the same node. If multiple agents reside on the same node, each port must be unique. Any two ports will work, but it may be easier to keep the information clear if using ports that are not too close to each other. + +When creating the cluster properties file for each cluster, the `db.data.dir` parameters must also specify values that are unique for each respective database cluster. + +The following parameters are used when assigning the virtual IP address to a node. If your Failover Manager cluster does not use a virtual IP address, leave these parameters blank. + + `virtual.ip` + + `virtual.ip.interface` + + `virtual.ip.prefix` + +This parameter value is determined by the virtual IP addresses being used and may or may not be the same for both `acctg.properties` and `sales.properties`. + +After creating the `acctg.properties` and `sales.properties` files, create a service script or unit file for each cluster that points to the respective property files; this step is platform specific. If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, see [RHEL/CentOS 7.x or RHEL/CentOS 8.x](#rhelcentos-7x-or-rhelcentos-8x). + +!!! Note + If you are using a unit file, you must manually update the file to reflect the new service name when you upgrade Failover Manager. + +### RHEL/CentOS 7.x or RHEL/CentOS 8.x + +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, you should copy the `edb-efm-4.0` unit file to new file with a name that is unique for each cluster. For example, if you have two clusters (named acctg and sales), the unit file names might be: + +```text +/usr/lib/systemd/system/efm-acctg.service + +/usr/lib/systemd/system/efm-sales.service +``` + +Then, edit the `CLUSTER` variable within each unit file, changing the specified cluster name from `efm` to the new cluster name. For example, for a cluster named `acctg`, the value would specify: + +```text +Environment=CLUSTER=acctg +``` + +You must also update the value of the `PIDfile` parameter to specify the new cluster name. For example: + +```text +PIDFile=/var/run/efm-4.0/acctg.pid +``` + +After copying the service scripts, use the following commands to enable the services: + +```text +# systemctl enable efm-acctg.service + +# systemctl enable efm-sales.service +``` + +Then, use the new service scripts to start the agents. For example, you can start the `acctg` agent with the command: + +```text +# systemctl start efm-acctg` +``` + +For information about customizing a unit file, please visit: + + diff --git a/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx b/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx new file mode 100644 index 00000000000..fe731b8548d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx @@ -0,0 +1,142 @@ +--- +title: "Monitoring a Failover Manager Cluster" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/monitoring_efm_cluster.html" +--- + + + +You can use either the Failover Manager `efm cluster-status` command or the PEM Client graphical interface to check the current status of a monitored node of a Failover Manager cluster. + +## Reviewing the Cluster Status Report + +The efm cluster-status [cluster properties file](07_using_efm_utility/#efm_cluster_status) command returns a report that contains information about the status of the Failover Manager cluster. To invoke the command, enter: + +```text +# efm cluster-status +``` + +The following status report is for a cluster named edb that has three nodes running: + +```text +Agent Type Address Agent DB VIP +----------------------------------------------------------------------- +Standby 172.19.10.2 UP UP 192.168.225.190 +Standby 172.19.12.163 UP UP 192.168.225.190 +Primary 172.19.14.9 UP UP 192.168.225.190* + + +Allowed node host list: +172.19.14.9 172.19.12.163 172.19.10.2 + + +Membership coordinator: 172.19.14.9 + + +Standby priority host list: +172.19.12.163 172.19.10.2 + +Promote Status: + +DB Type Address WAL Received LSN WAL Replayed LSN Info +-------------------------------------------------------------------- +Primary 172.19.14.9 0/4000638 +Standby 172.19.12.163 0/4000638 0/4000638 +Standby 172.19.10.2 0/4000638 0/4000638 + + +Standby database(s) in sync with primary. It is safe to promote. +``` + +The cluster status section provides an overview of the status of the agents that reside on each node of the cluster: + +```text +Agent Type Address Agent DB VIP +----------------------------------------------------------------------- +Standby 172.19.10.2 UP UP 192.168.225.190 +Standby 172.19.12.163 UP UP 192.168.225.190 +Primary 172.19.14.9 UP UP 192.168.225.190* +``` + +The asterisk (\*) after the VIP address indicates that the address is available for connections. If a VIP address is not followed by an asterisk, the address has been associated with the node (in the properties file), but the address is not currently in use. + +Failover Manager agents provide the information displayed in the Cluster Status section. + +The `Allowed node host list` and `Standby priority host list` provide an easy way to tell which nodes are allowed to join the cluster, and the promotion order of the nodes. The IP address of the Membership coordinator is also displayed in the report: + +```text +Allowed node host list: +172.19.14.9 172.19.12.163 172.19.10.2 +Membership coordinator: 172.19.14.9 +Standby priority host list: +172.19.12.163 172.19.10.2 +``` + +The `Promote Status` section of the report is the result of a direct query from the node on which you are invoking the cluster-status command to each database in the cluster; the query also returns the transaction log location of each database. Because the queries to each database return at different points in time, the LSNs may not match even if streaming replication is working normally for the cluster. + +```text +Promote Status: + +DB Type Address WAL Received LSN WAL Replayed LSN Info +------------------------------------------------------------------- +Primary 172.19.14.9 0/4000638 +Standby 172.19.12.163 0/4000638 0/4000638 +Standby 172.19.10.2 0/4000638 0/4000638 +``` + +If a database is down (or if the database has been restarted, but the resume command has not yet been invoked), the state of the agent that resides on that host will be Idle. If an agent is idle, the cluster status report will include a summary of the condition of the idle node. For example: + +```text +Agent Type Address Agent DB VIP +----------------------------------------------------- +Idle 172.19.18.105 UP UP 172.19.13.105 +``` + +**Exit Codes** + +The cluster status process returns an exit code that is based on the state of the cluster: + +- An exit code of `0` indicates that all agents are running, and the databases on the Primary and Standby nodes are running and in sync. + +- A non-zero exit code indicates that there is a problem. The following problems can trigger a non-zero exit code: + + A database is down or unknown (or has an idle agent). + + Failover Manager cannot decrypt the provided database password. + + There is a problem contacting the databases to get WAL locations. + + There is no Primary agent. + + There are no Standby agents. + + One or more Standby nodes are not in sync with the Primary. + +## Monitoring Streaming Replication with Postgres Enterprise Manager + +If you use Postgres Enterprise Manager (PEM) to monitor your servers, you can configure the Streaming Replication Analysis dashboard (part of the PEM graphical interface) to display the state of a Primary or Standby node that is part of a Streaming Replication scenario. + +![The Streaming Replication dashboard (Primary node)](images/str_replication_dashboard_master.png) + +The Streaming Replication Analysis Dashboard displays statistical information about activity for any monitored server on which streaming replication is enabled. The dashboard header identifies the status of the monitored server (either Replication Primary or Replication Slave), and displays the date and time that the server was last started, the date and time that the page was last updated, and a current count of triggered alerts for the server. + +When reviewing the dashboard for a Replication Slave (a Standby node), a label at the bottom of the dashboard confirms the status of the server. + +![The Streaming Replication dashboard (Standby node)](images/str_replication_dashboard_standby.png) + +By default, the PEM replication probes that provide information for the Streaming Replication Analysis dashboard are disabled. + +To view the Streaming Replication Analysis dashboard for the Primary node of a replication scenario, you must enable the following probes: + +- Streaming Replication +- WAL Archive Status + +To view the Streaming Replication Analysis dashboard for the Standby node of a replication scenario, you must enable the following probes: + +- Streaming Replication Lag Time + +For more information about PEM, please visit the EnterpriseDB website at: + + diff --git a/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx b/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx new file mode 100644 index 00000000000..6db8691d3e0 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx @@ -0,0 +1,213 @@ +--- +title: "Using the efm Utility" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_efm_utility.html" +--- + + + +Failover Manager provides the efm utility to assist with cluster management. The RPM installer adds the utility to the `/usr/edb/efm-4.0/bin` directory when you install Failover Manager. + +**efm allow-node** + + + +```text +efm allow-node +``` + +Invoke the `efm allow-node` command to allow the specified node to join the cluster. When invoking the command, provide the name of the cluster and the IP address of the joining node. + +This command must be invoked by `efm`, a member of the `efm` group, or root. + +**efm disallow-node** + + + +```text +efm disallow-node
+``` + +Invoke the `efm disallow-node` command to remove the specified node from the allowed hosts list, and prevent the node from joining a cluster. Provide the name of the cluster and the address of the node when calling the `efm disallow-node` command. This command must be invoked by `efm`, a member of the `efm` group, or root. + +**efm cluster-status** + + + +```text +efm cluster-status +``` + +Invoke the `efm cluster-status` command to display the status of a Failover Manager cluster. For more information about the status report, see [Monitoring a Failover Manager Cluster](06_monitoring_efm_cluster/#monitoring_efm_cluster). + +**efm cluster-status-json** + + + +```text +efm cluster-status-json +``` + +Invoke the `efm cluster-status-json` command to display the status of a Failover Manager cluster in json format. While the format of the displayed information is different than the display generated by the efm cluster-status command, the information source is the same. + +The following example is generated by querying the status of a healthy cluster with three nodes: + +```text +{ + "nodes": { + "172.16.144.176": { + "type": "Witness", + "agent": "UP", + "db": "N\/A", + "vip": "", + "vip_active": false + }, + "172.16.144.177": { + "type": "Primary", + "agent": "UP", + "db": "UP", + "vip": "", + "vip_active : false" + "xlogReceive : 0/14001478" + "xlog : 0/14001478" + "xloginfo :" + }, + "172.16.144.180": { + "type": "Standby", + "agent": "UP", + "db": "UP", + "vip": "", + "vip_active : false" + "xlogReceive : 0/14001478" + "xlog : 0/14001478" + "xloginfo :" + } + }, + "allowednodes": [ + "172.16.144.177", + "172.16.144.160", + "172.16.144.180", + "172.16.144.176" + ], + "membershipcoordinator": "172.16.144.177", + "failoverpriority": [ + "172.16.144.180" + ], + "minimumstandbys": 0, + "missingnodes": [], + "messages": [] +} +``` + +**efm encrypt** + + + +```text +efm encrypt [--from-env] +``` + +Invoke the `efm encrypt` command to encrypt the database password before include the password in the cluster properties file. Include the `--from-env` option to instruct Failover Manager to use the value specified in the `EFMPASS` environment variable, and execute without user input. For more information, see [Encrypting Your Database Password](04_configuring_efm/01_cluster_properties/01_encrypting_database_password/#encrypting_database_password). + +**efm promote** + + + +```text +efm promote cluster_name [-switchover [-sourcenode
][-quiet][-noscripts] +``` + +The `efm promote` command instructs Failover Manager to perform a manual failover of standby to primary. + +Manual promotion should only be attempted if the status command reports that the cluster includes a Standby node that is up-to-date with the Primary. If there is no up-to-date Standby, Failover Manager will prompt you before continuing. + +Include the `–switchover` clause to promote a standby node, and reconfigure a primary node as a standby node. Include the `-sourcenode` keyword, and specify a node address to indicate the node whose recovery settings will be copied to the old primary node (making it a standby). Include the `-quiet` keyword to suppress notifications during the switchover process. Include the `-noscripts` keyword to instruct Failover Manager to not invoke fencing or post-promotion scripts. + +This command must be invoked by `efm`, a member of the `efm` group, or root. + +!!! Note + This command instructs the service to ignore the value specified in the `auto.failover` parameter in the cluster properties file. + +**efm resume** + + + +```text +efm resume +``` + +Invoke the `efm resume` command to resume monitoring a previously stopped database. This command must be invoked by efm, a member of the efm group, or root. + +**efm set-priority** + + + +```text +efm set-priority
+``` + +Invoke the `efm set-priority` command to assign a failover priority to a standby node. The value specifies the order in which the node will be used in the event of a failover. This command must be invoked by `efm`, a member of the `efm` group, or root. + +Use the priority option to specify the place for the node in the priority list. For example, specify a value of 1 to indicate that the node is the primary standby, and will be the first node promoted in the event of a failover. A priority value of 0 instructs Failover Manager to not promote the standby. + +**efm stop-cluster** + + + +```text +efm stop-cluster +``` + +Invoke the `efm stop-cluster` command to stop Failover Manager on all nodes. This command instructs Failover Manager to connect to each node on the cluster and instruct the existing members to shut down. The command has no effect on running databases, but when the command completes, there is no failover protection in place. + +!!! Note + When you invoke the `efm stop-cluster` command, all authorized node information is removed from the Allowed node host list. + +This command must be invoked by `efm`, a member of the `efm` group, or root. + +**efm upgrade-conf** + + + +```text +efm upgrade-conf [-source ] +``` + +Invoke the `efm upgrade-conf` command to copy the configuration files from an existing Failover Manager installation, and add parameters required by a Failover Manager installation. Provide the name of the previous cluster when invoking the utility. This command must be invoked with root privileges. + +If you are upgrading from a Failover Manager configuration that does not use sudo, include the `-source` flag and specify the name of the *directory* in which the configuration files reside when invoking upgrade-conf. + +**efm node-status-json** + + + +```text +efm node-status-json +``` + +Invoke the `efm node-status-json` command to display the status of a local node in json format. A successful execution of this command returns `0` as its exit code. In case of a database failure or an agent status becoming IDLE, the command returns `1` as exit code. + +The following is an example output of the `efm node-status-json` command: + +> ```text +> { +> "type":"Standby", +> "address":"172.16.144.130", +> "agent":"UP", +> "db":"UP", +> "vip":"", +> "vip_active":"false" +> } +> ``` + +**efm --help** + + + +```text +efm --help +``` + +Invoke the `efm --help` command to display online help for the Failover Manager utility commands. diff --git a/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx b/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx new file mode 100644 index 00000000000..4ab2f2aa6ff --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx @@ -0,0 +1,55 @@ +--- +title: "Controlling the Failover Manager Service" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/controlling_efm_service.html" +--- + + + +Each node in a Failover Manager cluster hosts a Failover Manager agent that is controlled by a service script. By default, the service script expects to find: + +- A configuration file named `efm.properties` that contains the properties used by the Failover Manager service. Each node of a replication scenario must contain a properties file that provides information about the node. +- A cluster members file named `efm.nodes` that contains a list of the cluster members. Each node of a replication scenario must contain a cluster members list. + +Note that if you are running multiple clusters on a single node you will need to manually create configuration files with cluster-specific names and modify the service script for the corresponding clusters. + +The commands that control the Failover Manager service are platform-specific. + + + +## Using the systemctl Utility on RHEL/CentOS 7.x and RHEL/CentOS 8.x + +On RHEL/CentOS 7.x and RHEL/CentOS 8.x, Failover Manager runs as a Linux service named (by default) `edb-efm-4.0.service` that is located in `/usr/lib/systemd/system`. Each database cluster monitored by Failover Manager will run a copy of the service on each node of the replication cluster. + +Use the following systemctl commands to control a Failover Manager agent that resides on a RHEL/CentOS 7.x and RHEL/CentOS 8.x host: + +```text +systemctl start edb-efm-4.0 +``` + +The start command starts the Failover Manager agent on the current node. The local Failover Manager agent monitors the local database and communicates with Failover Manager on the other nodes. You can start the nodes in a Failover Manager cluster in any order. This command must be invoked by root. + +```text +systemctl stop edb-efm-4.0 +``` + +Stop the Failover Manager on the current node. This command must be invoked by root. + +```text +systemctl status edb-efm-4.0 +``` + +The status command returns the status of the Failover Manager agent on which it is invoked. You can invoke the status command on any node to instruct Failover Manager to return status and server startup information. + +```text +[root@ONE ~]}> systemctl status edb-efm-4.0 + edb-efm-4.0.service - EnterpriseDB Failover Manager 4.0 + Loaded: loaded (/usr/lib/systemd/system/edb-efm-4.0.service; disabled; vendor preset: disabled) + Active: active (running) since Wed 2013-02-14 14:02:16 EST; 4s ago + Process: 58125 ExecStart=/bin/bash -c /usr/edb/edb-efm-4.0/bin/runefm.sh start ${CLUSTER} (code=exited, status=0/SUCCESS) + Main PID: 58180 (java) + CGroup: /system.slice/edb-efm-4.0.service + └─58180 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-0.b14.el7_4.x86_64/jre/bin/java -cp /usr/edb/edb-efm-4.0/lib/EFM-4.0.0.jar -Xmx128m... +``` diff --git a/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx new file mode 100644 index 00000000000..af66915f9c0 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx @@ -0,0 +1,90 @@ +--- +title: "Controlling Logging" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/controlling_logging.html" +--- + + + +Failover Manager writes and stores one log file per agent and one startup log per agent in `/var/log/-4.0` (where `` specifies the name of the cluster). + +You can control the level of detail written to the agent log by modifying the `jgroups.loglevel` and `efm.loglevel` parameters in the [cluster properties file](04_configuring_efm/01_cluster_properties/#loglevel): + +```text +# Logging levels for JGroups and EFM. +# Valid values are: TRACE, DEBUG, INFO, WARN, ERROR +# Default value: INFO +# It is not necessary to increase these values unless debugging a +# specific issue. If nodes are not discovering each other at +# startup, increasing the jgroups level to DEBUG will show +# information about the TCP connection attempts that may help +# diagnose the connection failures. +jgroups.loglevel=INFO +efm.loglevel=INFO +``` + +The logging facilities use the Java logging library and logging levels. The log levels (in order from most logging output to least) are: + +> - `TRACE` +> - `DEBUG` +> - `INFO` +> - `WARN` +> - `ERROR` + +For example, if you set the `efm.loglevel` parameter to `WARN`, Failover Manager will only log messages at the `WARN` level and above (`WARN` and `ERROR`). + +By default, Failover Manager log files are rotated daily, compressed, and stored for a week. You can modify the file rotation schedule by changing settings in the log rotation file (`/etc/logrotate.d/efm-4.0`). For more information about modifying the log rotation schedule, consult the logrotate man page: + +> `$ man logrotate` + + + +## Enabling syslog Log File Entries + +Failover Manager supports syslog logging. To implement syslog logging, you must configure syslog to allow UDP or TCP connections. + +To allow a connection to syslog, edit the `/etc/rsyslog.conf` file and uncomment the protocol you wish to use. You must also ensure that the `UDPServerRun` or `TCPServerRun` entry associated with the protocol includes the port number to which log entries will be sent. For example, the following configuration file entries enable UDP connections to port 514: + +```text +# Provides UDP syslog reception +$ModLoad imudp +$UDPServerRun 514 +``` + +The following configuration file entries enable TCP connections to port 514: + +```text +# Provides TCP syslog reception +$ModLoad imtcp +$InputTCPServerRun 514 +``` + +After modifying the syslog configuration file, restart the `rsyslog` service to enable the connections: + +> `systemctl restart rsyslog.service` + +After modifying the `rsyslog.conf` file on the Failover Manager host, you must modify the Failover Manager properties to enable logging. Use your choice of editor to [modify the properties file](04_configuring_efm/01_cluster_properties/#logtype_enabled) (`/etc/edb/efm-4.1/efm.properties.in`) specifying the type of logging that you wish to implement: + +```text +# Which logging is enabled. +file.log.enabled=true +syslog.enabled=false +``` + +You must also [specify syslog details](04_configuring_efm/01_cluster_properties/#syslog_logging) for your system. Use the `syslog.protocol` parameter to specify the protocol type (UDP or TCP) and the `syslog.port` parameter to specify the listener port of the syslog host. The `syslog.facility` value may be used as an identifier for the process that created the entry; the value must be between `LOCAL0` and `LOCAL7`. + +```text +# Syslog information. The syslog service must be listening # on the + port for the given protocol, which can be UDP or +# TCP. The facilities supported are LOCAL0 through LOCAL7. +# syslog.host=localhost +syslog.port=514 +syslog.protocol=UDP +syslog.facility=LOCAL1 +``` + +For more information about syslog, please see the syslog man page: + +> `syslog man` diff --git a/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx b/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx new file mode 100644 index 00000000000..3087ea24575 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx @@ -0,0 +1,157 @@ +--- +title: "Notifications" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/notifications.html" +--- + + + +Failover Manager will send e-mail notifications and/or invoke a notification script when a notable event occurs that affects the cluster. If you have configured Failover Manager to send an email notification, you must have an SMTP server running on port 25 on each node of the cluster. Use the following parameters to configure notification behavior for Failover Manager: + +```text +user.email +script.notification +from.email +``` + +For more information about editing the configuration properties, see [Specifying Cluster Properties](04_configuring_efm/01_cluster_properties/#cluster_properties). + +The body of the notification contains details about the event that triggered the notification, and about the current state of the cluster. For example: + +```text +EFM node: 10.0.1.11 +Cluster name: acctg +Database name: postgres +VIP: ip_address (Active|Inactive) +Database health is not being monitored. +``` + +The VIP field displays the IP address and state of the virtual IP if implemented for the node. + +Failover Manager assigns a severity level to each notification. The following levels indicate increasing levels of attention required: + +- `INFO` indicates an informational message about the agent and does not require any manual intervention (for example, Failover Manager has started or stopped). See [List of INFO level notifications](#notifications_info) +- `WARNING` indicates that an event has happened that requires the administrator to check on the system (for example, failover has occurred). See [List of WARNING level notifications](#notifications_warning) +- `SEVERE` indicates that a serious event has happened and requires the immediate attention of the administrator (for example, failover was attempted, but was unable to complete). See [List of SEVERE level notifications](#notifications_severe) + +The severity level designates the urgency of the notification. A notification with a severity level of `SEVERE` requires user attention immediately, while a notification with a severity level of `INFO` will call your attention to operational information about your cluster that does not require user action. Notification severity levels are not related to logging levels; all notifications are sent regardless of the log level detail specified in the configuration file. + +You can use the [notification.level](04_configuring_efm/01_cluster_properties/#notification_level) property to specify the minimum severity level that will trigger a notification. + +!!! Note + In addition to sending notices to the administrative email address, all notifications are recorded in the cluster log file (`/var/log/efm-4.0/.log`). + +The conditions listed in the table below will trigger an `INFO` level notification: + + + +| Subject | Description | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Executed fencing script | Executed fencing script script_name Results: script_results | +| Executed post-promotion script | Executed post-promotion script script_name Results: script_results | +| Executed remote pre-promotion script | Executed remote pre-promotion script script_name Results: script_results | +| Executed remote post-promotion script | Executed remote post-promotion script script_name Results: script_results | +| Executed post-database failure script | Executed post-database failure script script_name Results: script_results | +| Executed primary isolation script | Executed primary isolation script script_name Results: script_results | +| Witness agent running on node_address for cluster cluster_name | Witness agent is running. | +| Primary agent running on node_address for cluster cluster_name | Primary agent is running and database health is being monitored. | +| Standby agent running on node_address for cluster cluster_name | Standby agent is running and database health is being monitored. | +| Idle agent running on node node_address for cluster cluster_name | Idle agent is running. After starting the local database, the agent can be resumed. | +| Assigning VIP to node node_address | Assigning VIP VIP_address to node node_address Results: script_results | +| Releasing VIP from node node_address | Releasing VIP VIP_address from node node_address Results: script_results | +| Starting auto resume check for cluster cluster_name | The agent on this node will check every auto.resume.period seconds to see if it can resume monitoring the failed database. The cluster should be checked during this time and the agent stopped if the database will not be started again. See the agent log for more details. | +| Executed agent resumed script | Executed agent resumed script script_name Results: script_results | +| WAL logs backed up during promotion | When reconfiguring this standby to follow the new primary, the pg_xlog or pg_wal contents were backed up in the pgdata directory. This backup should be removed when convenient to free up disk space. | + +The conditions listed in the table below will trigger a *WARNING* level notification: + + + +| Subject | Description | +| ------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Witness agent exited on node_address for cluster cluster_name | Witness agent has exited. | +| Primary agent exited on node_address for cluster cluster_name | Database health is not being monitored. | +| Cluster cluster_name notified that primary node has left | Failover is disabled for the cluster until the primary agent is restarted. | +| Standby agent exited on node_address for cluster cluster_name | Database health is not being monitored. | +| Agent exited during promotion on node_address for cluster cluster_name | Database health is not being monitored. | +| Agent exited on node_address for cluster cluster_name | The agent has exited. This is generated by an agent in the Idle state. | +| Agent exited for cluster cluster_name | The agent has exited. This notification is usually generated during startup when an agent exits before startup has completed. | +| Virtual IP address assigned to non-primary node | The virtual IP address appears to be assigned to a non-primary node. To avoid any conflicts, Failover Manager will release the VIP. You should confirm that the VIP is assigned to your primary node and manually reassign the address if it is not. | +| Virtual IP address not assigned to primary node. | The virtual IP address appears to not be assigned to a primary node. EDB Postgres Failover Manager will attempt to reacquire the VIP. | +| No standby agent in cluster for cluster cluster_name | The standbys on cluster_name have left the cluster. | +| Standby agent failed for cluster cluster_name | A standby agent on cluster_name has left the cluster, but the coordinator has detected that the standby database is still running. | +| Standby database failed for cluster cluster_name | A standby agent has signaled that its database has failed. The other nodes also cannot reach the standby database. | +| Standby agent cannot reach database for cluster cluster_name | A standby agent has signaled database failure, but the other nodes have detected that the standby database is still running. | +| Cluster cluster_name has dropped below three nodes | At least three nodes are required for full failover protection. Please add witness or agent node to the cluster. | +| Subset of cluster cluster_name disconnected from primary | This node is no longer connected to the majority of the cluster cluster_name. Because this node is part of a subset of the cluster, failover will not be attempted. Current nodes that are visible are: node_address | +| Promotion has started on cluster cluster_name. | The promotion of a standby has started on cluster cluster_name. | +| Witness failure for cluster cluster_name | Witness running at node_address has left the cluster. | +| Idle agent failure for cluster cluster_name. | Idle agent running at node_address has left the cluster. | +| One or more nodes isolated from network for cluster cluster_name | This node appears to be isolated from the network. Other members seen in the cluster are: node_name | +| Node no longer isolated from network for cluster cluster_name. | This node is no longer isolated from the network. | +| Standby agent tried to promote, but primary DB is still running | The standby EFM agent tried to promote itself, but detected that the primary DB is still running on node_address. This usually indicates that the primary EFM agent has exited. Failover has NOT occurred. | +| Standby agent started to promote, but primary has rejoined. | The standby EFM agent started to promote itself, but found that a primary agent has rejoined the cluster. Failover has NOT occurred. | +| Standby agent tried to promote, but could not verify primary DB | The standby EFM agent tried to promote itself, but could not detect whether or not the primary DB is still running on node_address. Failover has NOT occurred. | +| Standby agent tried to promote, but VIP appears to still be assigned | The standby EFM agent tried to promote itself, but could not because the virtual IP address (VIP_address) appears to still be assigned to another node. Promoting under these circumstances could cause data corruption. Failover has NOT occurred. | +| Standby agent tried to promote, but appears to be orphaned | The standby EFM agent tried to promote itself, but could not because the well-known server (server_address) could not be reached. This usually indicates a network issue that has separated the standby agent from the other agents. Failover has NOT occurred. | +| Potential manual failover required on cluster cluster_name. | A potential failover situation was detected for cluster cluster_name. Automatic failover has been disabled for this cluster, so manual intervention is required. | +| Failover has completed on cluster cluster_name | Failover has completed on cluster cluster_name. | +| Lock file for cluster cluster_name has been removed | The lock file for cluster cluster_name has been removed from: path_name on node node_address. This lock prevents multiple agents from monitoring the same cluster on the same node. Please restore this file to prevent accidentally starting another agent for cluster. | +| A recovery file for cluster cluster_name has been found on primary node | A recovery file for cluster cluster_name has been found at: path_name on primary node node_address. This may be problematic should you attempt to restart the DB on this node. | +| recovery_target_timeline is not set to latest in recovery settings | The recovery_target_timeline parameter is not set to latest in the recovery settings. The standby server will not be able to follow a timeline change that occurs when a new primary is promoted. | +| Promotion has not occurred for cluster cluster_name | A promotion was attempted but there is already a node being promoted: ip_address. | +| Standby not reconfigured after failover in cluster cluster_name | The auto.reconfigure property has been set to false for this node. The node has not been reconfigured to follow the new primary node after a failover. | +| Could not resume replay for standby standby_id. | Could not resume replay for standby. Manual intervention may be required. Error: error_message. | +| Possible problem with database timeout values | Your remote.timeout value (value) is higher than your local.timeout value (value). If the local database takes too long to respond, the local agent could assume that the database has failed though other agents can connect. While this will not cause a failover, it could force the local agent to stop monitoring, leaving you without failover protection. | +| No standbys available for promotion in cluster cluster_name | The current number of standby nodes in the cluster has dropped to the minimum number: number. There cannot be a failover unless another standby node(s) is added or made promotable. | +| No promotable standby for cluster cluster_name | The current failover priority list in the cluster is empty. You have removed the only promotable standby for the cluster cluster_name. There cannot be a failover unless another promotable standby node(s) is added or made promotable by adding to failover priority list. | +| Synchronous replication has been reconfigured for cluster cluster_name | The number of synchronous standby nodes in the cluster has dropped below number. The synchronous standby names on primary has been reconfigured to: new synchronous_standby_names value. | +| Synchronous replication has been disabled for cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped below count. The primary has been taken out of synchronous replication mode. | +| Could not reload database configuration. | Could not reload database configuration. Manual intervention is required. Error: error_message. | +| Custom monitor timeout for cluster cluster_name | The following custom monitoring script has timed out: script_name | +| Custom monitor 'safe mode' failure for cluster cluster_name | The following custom monitor script has failed, but is being run in "safe mode": script_name. Output: script_results | +| primary.shutdown.as.failure set to true for primary node | The primary.shutdown.as.failure property has been set to true for this cluster. Stopping the primary agent without stopping the entire cluster will be treated by the rest of the cluster as an immediate primary agent failure. If maintenance is required on the primary database, shut down the primary agent and wait for a notification from the remaining nodes that failover will not happen. | +| Primary cannot ping local database for cluster cluster_name | The Primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will become IDLE and attempt to resume monitoring the database. | + + + + +The conditions listed in the table below will trigger a *SEVERE* notification: + +| Subject | Description | +| -------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Standby database restarted but EFM cannot connect | The start or restart command for the database ran successfully but the database is not accepting connections. EFM will keep trying to connect for up to restart.connection.timeout seconds. | +| Unable to connect to DB on node_address | The maximum connections limit has been reached. | +| Unable to connect to DB on node_address | Invalid password for db.user=user_name. | +| Unable to connect to DB on node_address | Invalid authorization specification. | +| Primary cannot resume monitoring local database for cluster cluster_name | The primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will not release the VIP and/or create a recovery.conf file. The primary agent will remain IDLE until the resume command is run to resume monitoring the database. | +| Fencing script error | Fencing script script_name failed to execute successfully. Exit Value: exit_code Results: script_results Failover has NOT occurred. | +| Post-promotion script failed | Post-promotion script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Remote post-promotion script failed | Remote post-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | +| Remote pre-promotion script failed | Remote pre-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | +| Post-database failure script error | Post-database failure script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Agent resumed script error | Agent resumed script script_name failed to execute successfully. Results: script_results | +| Primary isolation script failed | Primary isolation script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Could not promote standby | The promote command failed on node. Could not promote standby. Error details: error_details | +| Error creating recovery.conf file on node_address for cluster cluster_name | There was an error creating the recovery.conf file on primary node node_address during promotion. Promotion has continued, but requires manual intervention to ensure that the old primary node can not be restarted. Error details: message_details | +| An unexpected error has occurred for cluster cluster_name | An unexpected error has occurred on this node. Please check the agent log for more information. Error: error_details | +| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. The cluster is telling the primary agent at ip_address to fence off the primary database to prevent two primarys when the rest of the failover manager cluster promotes a standby. | +| Isolated primary database shutdown. | The isolated primary database has been shutdown by failover manager. | +| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. Before the primary could finish detecting isolation, a standby was promoted and has rejoined this node in the cluster. This node is isolating itself to avoid more than one primary database. | +| Could not assign VIP to node node_address | Failover manager could not assign the VIP address for some reason. | +| primary_or_standby database failure for cluster cluster_name | The database has failed on the specified node. | +| Agent is timing out for cluster cluster_name | This agent has timed out trying to reach the local database. After the timeout, the agent could successfully ping the database and has resumed monitoring. However, the node should be checked to make sure it is performing normally to prevent a possible database or agent failure. | +| Resume timed out for cluster cluster_name | This agent could not resume monitoring after reconfiguring and restarting the local database. See agent log for details. | +| Internal state mismatch for cluster cluster_name | The failover manager cluster's internal state did not match the actual state of the cluster members. This is rare and can be caused by a timing issue of nodes joining the cluster and/or changing their state. The problem should be resolved, but you should check the cluster status as well to verify. Details of the mismatch can be found in the agent log file. | +| Failover has not occurred | An agent has detected that the primary database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | +| Database in wrong state on node_address | The standby agent has detected that the local database is no longer in recovery. The agent will now become idle. Manual intervention is required. | +| Database in wrong state on node_address | The primary agent has detected that the local database is in recovery. The agent will now become idle. Manual intervention is required. | +| Database connection failure for cluster cluster_name | This node is unable to connect to the database running on: node_addressUntil this is fixed, failover may not work properly because this node will not be able to check if the database is running or not. | +| Standby custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a standby node. The agent will stop monitoring the local database. Script location: script_name Script output: script_results | +| Primary custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a primary node. EFM will attempt to promote a standby. Script location: script_name Script output: script_results | +| Loopback address set for ping.server.ip | Loopback address is set for ping.server.ip property. This setting can interfere with the network isolation detection and hence it should be changed. | +| Load balancer attach script error | Load balancer attach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Load balancer detach script error | Load balancer detach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Not enough synchronous standbys available in cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped to count. All write queries on the primary will be blocked until enough synchronous standby nodes are added. | diff --git a/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx new file mode 100644 index 00000000000..f225d41f915 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx @@ -0,0 +1,117 @@ +--- +title: "Supported Failover and Failure Scenarios" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/supported_scenarios.html" +--- + + + +Failover Manager monitors a cluster for failures that may or may not result in failover. + +Failover Manager supports a very specific and limited set of failover scenarios. Failover can occur: + +- if the Primary database crashes or is shutdown. +- if the node hosting the Primary database crashes or becomes unreachable. + +Failover Manager makes every attempt to verify the accuracy of these conditions. If agents cannot confirm that the Primary database or node has failed, Failover Manager will not perform any failover actions on the cluster. + +Failover Manager also supports a *no* *auto*-*failover* mode for situations where you want Failover Manager to monitor and detect failover conditions, but not perform an automatic failover to a Standby. In this mode, a notification is sent to the administrator when failover conditions are met. To disable automatic failover, modify the cluster properties file, setting the [auto.failover](04_configuring_efm/01_cluster_properties/#auto_failover) parameter to false. + +Failover Manager will alert an administrator to situations that require administrator intervention, but that do not merit promoting a Standby database to Primary. + + + +## Primary Database is Down + +If the agent running on the Primary database node detects a failure of the Primary database, Failover Manager begins the process of confirming the failure. + +![Confirming the Failure of the Primary Database.](images/supported_scenarios_master_db_down.png) + +If the agent on the Primary node detects that the Primary database has failed, all agents attempt to connect directly to the Primary database. If an agent can connect to the database, Failover Manager sends a notification about the state of the Primary node. If no agent can connect, the Primary agent declares database failure and releases the VIP (if applicable). + +If no agent can reach the virtual IP address or the database server, Failover Manager starts the failover process. The Standby agent on the most up-to-date node runs a fencing script (if applicable), promotes the Standby database to Primary database, and assigns the virtual IP address to the Standby node. Any additional Standby nodes are configured to replicate from the new primary unless auto.reconfigure is set to false. If applicable, the agent runs a post-promotion script. + +**Returning the Node to the Cluster** + +To recover from this scenario without restarting the entire cluster, you should: + +1. Restart the database on the original Primary node as a Standby database. +2. Invoke the `efm resume` command on the original Primary node. + +**Returning the Node to the Role of Primary** + +After returning the node to the cluster as a Standby, you can easily return the node to the role of Primary: + +1. If the cluster has more than one Standby node, use the `efm set-priority` command to set the node's failover priority to 1. +2. Invoke the [efm promote -switchover](07_using_efm_utility/#efm_promote) command to promote the node to its original role of Primary node. + + + +## Standby Database is Down + +If a Standby agent detects a failure of its database, the agent notifies the other agents; the other agents confirm the state of the database. + +![Confirming the failure of a Standby Database.](images/supported_scenarios_standby_db_down.png) + +After returning the Standby database to a healthy state, invoke the `efm resume` command to return the Standby to the cluster. + + + +## Primary Agent Exits or Node Fails + +If the Failover Manager Primary agent crashes or the node fails, a Standby agent will detect the failure and (if appropriate) initiate a failover. + +![Confirming the failure of the Primary Agent.](images/supported_scenarios_master_agent_exits.png) + +If an agent detects that the Primary agent has left, all agents attempt to connect directly to the Primary database. If any agent can connect to the database, an agent sends a notification about the failure of the Primary agent. If no agent can connect, the agents attempt to ping the virtual IP address to determine if it has been released. + +If no agent can reach the virtual IP address or the database server, Failover Manager starts the failover process. The Standby agent on the most up-to-date node runs a fencing script (if applicable), promotes the Standby database to Primary database, and assigns the virtual IP address to the Standby node; if applicable, the agent runs a post-promotion script. Any additional Standby nodes are configured to replicate from the new primary unless auto.reconfigure is set to false. + +If this scenario has occurred because the primary has been isolated from network, the Primary agent will detect the isolation and release the virtual IP address and create the recovery.conf file. Failover Manager will perform the previously listed steps on the remaining nodes of the cluster. + +To recover from this scenario without restarting the entire cluster, you should: + +1. Restart the original Primary node. +2. Bring the original Primary database up as a Standby node. +3. Start the service on the original Primary node. + +Please note that stopping an agent does not signal the cluster that the agent has failed. + + + +## Standby Agent Exits or Node Fails + +If a Standby agent exits or a Standby node fails, the other agents will detect that it is no longer connected to the cluster. + +![Failure of Standby Agent.](images/supported_scenarios_standby_agent_exits.png) + +When the failure is detected, the agents attempt to contact the database that resides on the node; if the agents confirm that there is a problem, Failover Manager sends the appropriate notification to the administrator. + +If there is only one Primary and one Standby remaining, there is no failover protection in the case of a Primary node failure. In the case of a Primary database failure, the Primary and Standby agents can agree that the database failed and proceed with failover. + + + +## Dedicated Witness Agent Exits / Node Fails + +The following scenario details the actions taken if a dedicated Witness (a node that is not hosting a database) fails. + +![Confirming the Failure of a dedicated Witness.](images/supported_scenarios_witness_agent_exits.png) + +When an agent detects that the Witness node cannot be reached, Failover Manager notifies the administrator of the state of the Witness. + +!!! Note + If the witness fails and the cluster only has two nodes, then there is no failover protection because the standby node has no way to know if the primary failed or was disconnected. In a two node cluster, if the primary database fails but the nodes are still connected, failover will still occur since the standby can confirm the condition of the primary database. + + + +## Nodes Become Isolated from the Cluster + +The following scenario details the actions taken if one or more nodes (a minority of the cluster) become isolated from the majority of the cluster. + +![If members of the cluster become isolated.](images/supported_scenarios_node_becomes_isolated.png) + +If one or more nodes (but less than half of the cluster) become isolated from the rest of the cluster, the remaining cluster behaves as if the nodes have failed. The agents attempt to discern if the Primary node is among the isolated nodes; it is, the Primary fences itself off from the cluster, while a Standby node (from within the cluster majority) is promoted to replace it. Other Standby nodes are configured to replicate from the new primary unless `auto.reconfigure` is set to `false`. + +Failover Manager then notifies an administrator, and the isolated nodes rejoin the cluster when they are able. When the nodes rejoin the cluster, the failover priority may change. diff --git a/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx b/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx new file mode 100644 index 00000000000..15455f32cd9 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx @@ -0,0 +1,108 @@ +--- +title: "Upgrading an Existing Cluster" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/upgrading_existing_cluster.html" +--- + + + +Failover Manager provides a utility to assist you when upgrading a Failover Manager cluster. To upgrade an existing cluster, you must: + +1. Install Failover Manager 4.0 on each node of the cluster. For detailed information about installing Failover Manager, see [Installing Failover Manager](03_installing_efm/#installing_efm). +2. After installing Failover Manager, invoke the `efm upgrade-conf` utility to create the `.properties` and `.nodes` files for Failover Manager 4.0. The Failover Manager installer installs the upgrade utility ([efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf)) to the `/usr/edb/efm-4.0/bin directory`. To invoke the utility, assume root privileges, and invoke the command: + +```text +efm upgrade-conf +``` + +The efm `upgrade-conf` utility locates the `.properties` and `.nodes` files of pre-existing clusters and copies the parameter values to a new configuration file for use by Failover Manager. The utility saves the updated copy of the configuration files in the `/etc/edb/efm-4.0` directory. + +1. Modify the `.properties` and `.nodes` files for EFM 4.0, specifying any new preferences. Use your choice of editor to modify any additional properties in the properties file (located in the `/etc/edb/efm-4.0` directory) before starting the service for that node. For detailed information about property settings, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). + +!!! Note + `db.bin` is a required property. When modifying the properties file, ensure that the `db.bin` property specifies the location of the Postgres `bin` directory. + +1. Use a version-specific command to stop the old Failover Manager cluster; for example, you can use the following command to stop a version 4.0 cluster: + +```text +/usr/efm-4.0/bin/efm stop-cluster efm +``` + +1. Start the new [Failover manager service](08_controlling_efm_service/#controlling_efm_service) (`edb-efm-4.0`) on each node of the cluster. + +The following example demonstrates invoking the upgrade utility to create the `.properties` and `.nodes` files for a Failover Manager installation: + +```text +[root@localhost efm-4.0]# /usr/edb/efm-4.0/bin/efm upgrade-conf efm +Checking directory /etc/edb/efm-3.10 +Processing efm.properties file +The following properties were added in addition to those in previous ˓→installed version: +notification.text.prefix +encrypt.agent.messages +standby.restart.delay +The following properties were renamed from those in previous installed ˓→version: +stop.failed.master => stop.failed.primary +master.shutdown.as.failure => primary.shutdown.as.failure +script.master.isolated => script.primary.isolated +stop.isolated.master => stop.isolated.primary +reconfigure.sync.master => reconfigure.sync.primary +Checking directory /etc/edb/efm-3.10 +Processing efm.nodes file +db.password.encrypted re-encoded with stronger encryption. +Upgrade of files is finished. The owner and group for properties and ˓→nodes files have been set as 'efm'. +[root@localhost efm-4.0]# +``` + +If you are [using a Failover Manager configuration without sudo](04_configuring_efm/04_extending_efm_permissions/#running_efm_without_sudo), include the `-source` flag and specify the name of the directory in which the configuration files reside when invoking `upgrade-conf`. If the directory is not the configuration default directory, the upgraded files will be created in the directory from which the `upgrade-conf` command was invoked. + +**Please note**: If you are using a unit file, you must manually update the file to reflect the new Failover Manager service name when you perform an upgrade. + +## Un-installing Failover Manager + +After upgrading to Failover Manager 4.0, you can use your native package manager to remove previous installations of Failover Manager. For example, use the following command to remove Failover Manager 3.10 and any unneeded dependencies: + +- On RHEL or CentOS 7.x: + +```text +yum remove edb-efm310 +``` + +- On RHEL or CentOS 8.x: + +```text +dnf remove edb-efm310 +``` + +- On Debian or Ubuntu: + +```text +apt-get remove edb-efm310 +``` + +- On SLES: + +```text +zypper remove edb-efm310 +``` + +## Performing a Database Update (Minor Version) + +This section describes how to perform a quick minor database version upgrade. You can use the steps that follow to upgrade from one minor version to another (for example, from 10.1.5 to version 10.2.7), or to apply a patch release for a version. + +You should first update the database server on each Standby node of the Failover Manager cluster. Then, perform a switchover, promoting a Standby node to the role of Primary within the Failover Manager cluster. Then, perform a database update on the old primary node. + +On each node of the cluster you must perform the following steps to update the database server: + +1. Stop the Failover Manager agent. +2. Stop the database server. +3. Update the database server. +4. Start the database service. +5. Start the Failover Manager agent. + +For detailed information about controlling the Advanced Server service, or upgrading your version of Advanced Server, please see the EDB Postgres Advanced Server Guide, available at: + +[https://www.enterprisedb.com/docs](/epas/latest/) + +When your updates are complete, you can use the [efm set-priority](07_using_efm_utility/#efm_set_priority) command to add the old primary to the front of the standby list (if needed), and then switchover to return the cluster to its original state. diff --git a/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx b/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx new file mode 100644 index 00000000000..f2c421c3ce1 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx @@ -0,0 +1,47 @@ +--- +title: "Troubleshooting" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/troubleshooting.html" +--- + + + +**Authorization file not found. Is the local agent running?** + +If you invoke an EFM cluster management command and EFM is not running on the node, the `efm` command will display an error: + +```text +Authorization file not found. Is the local agent running? +``` + +**Not authorized to run this command. User '<os user>' is not a member of the \`efm\` group.** + +You must have special privileges to invoke some of the `efm` commands documented in [Using the efm Utility](07_using_efm_utility/#using_efm_utility). If these commands are invoked by a user who isn't authorized to run them, the `efm` command will display an error: + +```text +Not authorized to run this command. User '' is not a member of the `efm` group. +``` + +**Notification; Unexpected error message** + +If you receive a notification message about an unexpected error message, check the [Failover Manager log file](09_controlling_logging/#controlling_logging) for an `OutOfMemory` message. Failover Manager runs with the default memory value set by this property: + +```text +# Extra information that will be passed to the JVM when starting the agent. +jvm.options=-Xmx128m +``` + +If you are running with less than 128 megabytes allocated, you should increase the value and restart the Failover Manager agent. + +**Confirming the OpenJDK version** + +Failover Manager is tested with OpenJDK; we strongly recommend using OpenJDK. You can use the following command to check the type of your Java installation: + +```text +# java -version +openjdk version "1.8.0_191" +OpenJDK Runtime Environment (build 1.8.0_191-b12) +OpenJDK 64-Bit Server VM (build 25.191-b12, mixed mode) +``` diff --git a/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx b/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx new file mode 100644 index 00000000000..9b19511aba2 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx @@ -0,0 +1,44 @@ +--- +title: "Configuring Streaming Replication" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_streaming_replication.html" +--- + + + +Configuring a replication scenario can be complex; for detailed information about configuration options, please see the PostgreSQL core documentation, available at: + + + +You may want to use a `.pgpass` file to enable md5 authentication for the replication user – this may or may not be the safest authentication method for your environment. For more information about the supported authentication options, please see the PostgreSQL core documentation at: + + + +!!! Note + From Version 3.10 onwards, EFM uses `pg_ctl` utility for standby promotion. You do not need to set the `trigger_file` or `promote_trigger_file` parameter for promotion of a standby server. + +## Limited Support for Cascading Replication + +While Failover Manager does not provide full support for cascading replication, it does provide limited support for simple failover in a cascading replication scenario. Cascading replication allows a Standby node to stream to another Standby node, reducing the number of connections (and processing overhead) to the primary node. + +![Cascading replication.](images/cascading_replication.png) + +For detailed information about configuring cascading replication, please see the PostgreSQL documentation at: + + + +To use Failover Manager in a cascading replication scenario, you should modify the cluster properties file, setting the following property values on Standby Node #2: + +```text +promotable=false +auto.reconfigure=false +``` + +In the event of a Failover, Standby Node #1 will be promoted to the role of Primary node. Should failover occur, Standby Node #2 will continue to act as a read-only replica for the new Primary node until you take actions to manually reconfigure the replication scenario to contain 3 nodes. + +In the event of a failure of Standby Node #1, you will not have failover protection, but you will receive an email notifying you of the failure of the node. + +!!! Note + Performing a switchover and switch back to the original primary may not preserve the cascading replication scenario. diff --git a/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx b/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx new file mode 100644 index 00000000000..61e1a171302 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx @@ -0,0 +1,69 @@ +--- +title: "Configuring SSL Authentication on a Failover Manager Cluster" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_ssl_authentication.html" +--- + + + +The following steps enable SSL authentication for Failover Manager. Note that all connecting clients will be required to use SSL authentication when connecting to any database server within the cluster; you will be required to modify the connection methods currently used by existing clients. + +To enable SSL on a Failover Manager cluster, you must: + +1. Place a `server.crt` and `server.key` file in the `data` directory (under your Advanced Server installation). You can purchase a certificate signed by an authority, or create your own self-signed certificate. For information about creating a self-signed certificate, see the PostgreSQL core documentation at: + + + +2. Modify the `postgresql.conf` file on each database within the Failover Manager cluster, enabling SSL: + + ```text + ssl=on + ``` + +> After modifying the postgresql.conf file, you must restart the server. + +1. Modify the `pg_hba.conf` file on each node of the Failover Manager cluster, adding the following line to the beginning of the file: + + ```text + hostnossl all all all reject + ``` + +> The line instructs the server to reject any connections that are not using SSL authentication; this enforces SSL authentication for any connecting clients. For information about modifying the pg_hba.conf file, see the PostgreSQL core documentation at: +> +> > + +1. After placing the server.crt and server.key file in the data directory, convert the certificate to a form that Java understands; you can use the command: + + ```text + openssl x509 -in server.crt -out server.crt.der -outform der + ``` + +> For more information, visit: +> +> > + +1. Then, add the certificate to the Java trusted certificates file: + + ```text + keytool -keystore $JAVA_HOME/lib/security/cacerts -alias -import -file server.crt.der + ``` + +> Where +> +> > `$JAVA_HOME` is the home directory of your Java installation. +> > +> > <alias_name> can be any string, but must be unique for each certificate. +> > +> > You can use the `keytool` command to review a list of the available certificates or retrieve information about a specific certificate. For more information about using the keytool command, enter: +> > +> > > ```text +> > > man keytool +> > > ``` +> +> The certificate from each database server must be imported into the trusted certificates file of each agent. Note that the location of the cacerts file may vary on each system. For more information, visit: +> +> > + +1. Modify the [efm.properties file](04_configuring_efm/01_cluster_properties/#jdbc_sslmode) on each node within the cluster, setting the `jdbc.sslmode` property. diff --git a/product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png new file mode 100644 index 00000000000..9f70a4f63fd --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9cc1551b6cb7ea81a2d3cae4593cb13bd34477c2882cc6ca5a63597fdf2af1b +size 53120 diff --git a/product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png new file mode 100644 index 00000000000..2477de6eba6 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/cascading_replication1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64acefbc9cbf3a086eec5c019e268e463357d6bb0620e7f7be7a34ffd906b49c +size 36920 diff --git a/product_docs/docs/efm/3.10/efm_user/images/edb_logo.png b/product_docs/docs/efm/3.10/efm_user/images/edb_logo.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/edb_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png b/product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png new file mode 100644 index 00000000000..a15a28d3cf3 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/failover_manager_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d17e3763dc0e81372a7377e6cf7578e693cfeef91e21637b85f7e4818a37a03d +size 116126 diff --git a/product_docs/docs/efm/3.10/efm_user/images/placeholder.png b/product_docs/docs/efm/3.10/efm_user/images/placeholder.png new file mode 100755 index 00000000000..3c3bf2a4365 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 +size 16849 diff --git a/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png new file mode 100644 index 00000000000..435cc08ba1d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_master.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e40f70e02570dc7a8d3f1591f2311f431009719887073ed57585a593a76ac6 +size 327010 diff --git a/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png new file mode 100644 index 00000000000..c8a11e4fa42 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/str_replication_dashboard_standby.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22525d4998a13c62071ea69c33eea474b6e773f3af6535bd8c62a2e36d906ca0 +size 337248 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png new file mode 100644 index 00000000000..f57c544993e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_agent_exits.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:216f36072c4fe21a71d5277fb2c0868f685b77ea1cb14b3092a16a8a6f3055e8 +size 217408 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png new file mode 100644 index 00000000000..df22dc9aa92 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_master_db_down.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a84eda4b8e16846448db35f2921da3ad6bb2b24ec5f0ebb828b1b3f0cb87fcf +size 266651 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png new file mode 100644 index 00000000000..269eeb1ea0f --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_node_becomes_isolated.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5c49c7e6672c7fc897dc156d3899f49efe3cf90087cf4aa9e6e2544f88e9508 +size 148435 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png new file mode 100644 index 00000000000..1b0a90cbe14 --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_agent_exits.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07ad86e12732575d5698c652d7caadccc847d3e567f9109270b918b144527cd7 +size 56094 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png new file mode 100644 index 00000000000..e5ad35bae7a --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_standby_db_down.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26fd9fe629f7acff573eddc0a7c81c697b1659345dc951dc1c24a0cc14787a1 +size 64713 diff --git a/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png new file mode 100644 index 00000000000..356b9a3912e --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/images/supported_scenarios_witness_agent_exits.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9565b2be9589b2d12536820247f1b5719f63a19d72e8dbe902b3bad5ca093cbd +size 37332 diff --git a/product_docs/docs/efm/3.10/efm_user/index.mdx b/product_docs/docs/efm/3.10/efm_user/index.mdx new file mode 100644 index 00000000000..ec9c73be97d --- /dev/null +++ b/product_docs/docs/efm/3.10/efm_user/index.mdx @@ -0,0 +1,26 @@ +--- +title: "EDB Failover Manager" + +#legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/whats_new.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/conclusion.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/index.html" +--- + +**EDB Failover Manager** + +EDB Postgres Failover Manager (EFM) is a high-availability module from EnterpriseDB that enables a Postgres primary node to automatically failover to a Standby node in the event of a software or hardware failure on the primary. + +This guide provides information about installing, configuring and using Failover Manager . For information about the platforms and versions supported by Failover Manager , visit the EnterpriseDB website at: + + + +This document uses Postgres to mean either the PostgreSQL or EDB Postgres Advanced Server database. + +
+ +whats_new failover_manager_overview installing_efm configuring_efm using_efm monitoring_efm_cluster using_efm_utility controlling_efm_service controlling_logging notifications supported_scenarios upgrading_existing_cluster troubleshooting configuring_streaming_replication configuring_ssl_authentication conclusion + +
From a07d192e8b6455cb25df1f9432213914c00e65df Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Thu, 22 Apr 2021 17:44:54 +0530 Subject: [PATCH 05/50] Version update in Quickstart guide Former-commit-id: b421f46dd6466bd7f08e35986465f5010bf9d1e8 --- .../docs/efm/3.10/efm_quick_start/index.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/product_docs/docs/efm/3.10/efm_quick_start/index.mdx b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx index f196c7ac5c3..683ba65244f 100644 --- a/product_docs/docs/efm/3.10/efm_quick_start/index.mdx +++ b/product_docs/docs/efm/3.10/efm_quick_start/index.mdx @@ -3,8 +3,8 @@ title: "Creating a Failover Manager Cluster" #legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/4.0/index.html" + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/index.html" --- EDB Postgres Failover Manager (Failover Manager) is a high-availability module from EnterpriseDB that enables a Postgres Primary node to automatically failover to a Standby node in the event of a software or hardware failure on the Primary. @@ -24,7 +24,7 @@ You must perform some basic installation and configuration steps before performi - You must also install Failover Manager on each primary and standby node. During Advanced Server installation, you configured an EnterpriseDB repository on each database host. You can use the EnterpriseDB repository and the `yum install` command to install Failover Manager on each node of the cluster: ```text - yum install edb-efm41 + yum install edb-efm310 ``` During the installation process, the installer will create a user named `efm` that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by `enterprisedb` or `postgres`. The example that follows creates a cluster named `efm`. @@ -36,7 +36,7 @@ Start the configuration process on a primary or standby node. Then, copy the con Copy the provided sample files to create EFM configuration files, and correct the ownership: ```text -cd /etc/edb/efm-4.0 +cd /etc/edb/efm-3.10 cp efm.properties.in efm.properties @@ -52,7 +52,7 @@ chown efm:efm efm.nodes Create the [encrypted password](/efm/latest/efm_user/04_configuring_efm/02_encrypting_database_password/) needed for the properties file: ```text -/usr/edb/efm-4.0/bin/efm encrypt efm +/usr/edb/efm-3.10/bin/efm encrypt efm ``` Follow the onscreen instructions to produce the encrypted version of your database password. @@ -96,29 +96,29 @@ Please note that the Failover Manager agent will not verify the content of the ` **Step 5: Configure the Other Nodes** -Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-4.0` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: +Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-3.10` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: - Modify the `bind.address` property to use the node’s local address. - Set `is.witness` to `true` if the node is a witness node. If the node is a witness node, the properties relating to a local database installation will be ignored. **Step 6: Start the EFM Cluster** -On any node, start the Failover Manager agent. The agent is named `edb-efm-4.0`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or CentOS/RHEL 8.x host use the command: +On any node, start the Failover Manager agent. The agent is named `edb-efm-3.10`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or CentOS/RHEL 8.x host use the command: ```text -systemctl start edb-efm-4.0 +systemctl start edb-efm-3.10 ``` On a a CentOS or RHEL 6.x host use the command: ```text -service edb-efm-4.0 start +service edb-efm-3.10 start ``` After the agent starts, run the following command to see the status of the single-node cluster. You should see the addresses of the other nodes in the `Allowed node host` list. ```text -/usr/edb/efm-4.0/bin/efm cluster-status efm +/usr/edb/efm-3.10/bin/efm cluster-status efm ``` Start the agent on the other nodes. Run the `efm cluster-status efm` command on any node to see the cluster status. @@ -126,7 +126,7 @@ Start the agent on the other nodes. Run the `efm cluster-status efm` command on If any agent fails to start, see the startup log for information about what went wrong: ```text -cat /var/log/efm-4.0/startup-efm.log +cat /var/log/efm-3.10/startup-efm.log ``` **Performing a Switchover** @@ -134,7 +134,7 @@ cat /var/log/efm-4.0/startup-efm.log If the cluster status output shows that the primary and standby(s) are in sync, you can perform a switchover with the following command: ```text -/usr/edb/efm-4.0/bin/efm promote efm -switchover +/usr/edb/efm-3.10/bin/efm promote efm -switchover ``` The command will promote a standby and reconfigure the primary database as a new standby in the cluster. To switch back, run the command again. @@ -142,5 +142,5 @@ The command will promote a standby and reconfigure the primary database as a new For quick access to online help, you can invoke the following command: ```text -/usr/edb/efm-4.0/bin/efm --help +/usr/edb/efm-3.10/bin/efm --help ``` From f29c6b96587dbc3c84fa71354ae3507ae6ad9b6e Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 23 Apr 2021 14:30:43 +0530 Subject: [PATCH 06/50] version change Former-commit-id: 38091d7fa136fb843d4df01773ff31dbdc1e437b --- .../docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx | 4 ++-- .../docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx | 2 +- .../3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx | 2 +- product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx index 1d582eeb107..1298dfbc6c8 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/01_introduction.mdx @@ -3,12 +3,12 @@ title: "Architecture Overview" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" --- This guide explains how to configure Failover Manager and Pgpool best to leverage the benefits that they provide for Advanced Server. Using the reference architecture described in the Architecture section, you can learn how to achieve high availability by implementing an automatic failover mechanism (with Failover Manager) while scaling the system for larger workloads and an increased number of concurrent clients with read-intensive or mixed workloads to achieve horizontal scaling/read-scalability (with Pgpool). -The architecture described in this document has been developed and tested for EFM 4.1, EDB Pgpool 4.1, and Advanced Server 13. +The architecture described in this document has been developed and tested for EFM 3.10, EDB Pgpool, and Advanced Server 12. Documentation for Advanced Server and Failover Manager are available from EnterpriseDB at: diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx index 8526284e6b5..100af8abea5 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/02_architecture.mdx @@ -3,7 +3,7 @@ title: "Architecture" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/architecture.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/architecture.html" --- ![A typical EFM and Pgpool configuration](images/edb_ha_architecture.png) diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx index 8487a402cf1..310afb892ed 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx @@ -3,7 +3,7 @@ title: "Implementing High Availability with Pgpool" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/components_ha_pgpool.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/components_ha_pgpool.html" --- Failover Manager monitors the health of Postgres nodes; in the event of a database failure, Failover Manager performs an automatic failover to a Standby node. Note that Pgpool does not monitor the health of backend nodes and will not perform failover to any Standby nodes. diff --git a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx index 645be9cabc6..576f8db5f19 100644 --- a/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx +++ b/product_docs/docs/efm/3.10/efm_pgpool_ha_guide/index.mdx @@ -3,10 +3,10 @@ title: "EDB Postgres High Availability & Horizontal Read Scaling Architecture" #legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/introduction.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/conclusion.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/4.1/index.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/genindex.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/conclusion.html" + #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/index.html" --- Since high-availability and read scalability are not part of the core feature set of EDB Postgres Advanced Server, Advanced Server relies on external tools to provide this functionality. This document focuses on the functionality provided by EDB Failover Manager and Pgpool-II, and discusses the implications of a high-availability architecture formed around these tools. From 239eb86b598e583b42d4c5f7520dd9f2d0bf7095 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Thu, 20 May 2021 20:34:39 +0530 Subject: [PATCH 07/50] updates for first 3 topics Former-commit-id: 028417608e7c198eeb3cb5c5b703676b9b14406f --- .../docs/efm/3.10/efm_user/01_whats_new.mdx | 9 +- .../01_prerequisites.mdx | 10 +-- .../efm/3.10/efm_user/03_installing_efm.mdx | 85 +++++-------------- 3 files changed, 26 insertions(+), 78 deletions(-) diff --git a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx index 33bc9b3f8f5..e613bbc5635 100644 --- a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx +++ b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx @@ -10,8 +10,7 @@ legacyRedirectsGenerated: The following changes have been made to EDB Postgres Failover Manager to create version 4.0: -- Encryption for database password has been improved. Encryption has also been enabled for communication between the agents. -- Standby servers are no longer stopped while selecting the new primary. This enhancement significantly speeds up the promotion process. -- To be consistent with community naming guidelines, the term Master has been replaced with Primary in the Failover Manager product and documentation. The upgrade-conf tool will handle the task of renaming the impacted properties post-upgrade. The load balancer scripts such as `script.load.balancer.attach`, `script.load. balancer.detach` will now accept character `p` instead of character `m` as an argument. -- Support has been added to delay the restart of standbys after a promotion. You can increase the availability by staggering the restart of standbys. -- A primary agent now attempts to resume health monitoring in a situation where the agent can not reach its local database but other agents can. +• Support for physical replication slot feature of Postgres +• Support for NAT addresses +• Introduction of a new command to check the status of a local node +• Replace `trigger_file` with `pg_ctl` utility for standby promotion diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx index dff775c99ff..adc78655494 100644 --- a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx @@ -31,7 +31,7 @@ Failover Manager requires that PostgreSQL streaming replication be configured be On database versions 11 (or prior), unless specified with the `-sourcenode` option, a `recovery.conf` file is copied from a random standby node to the stopped primary during switchover. You should ensure that the paths within the `recovery.conf` file on your standby nodes are consistent before performing a switchover. For more information about the `-sourcenode` option, please see [Promoting a Failover Manager Node](../05_using_efm/#promote_node). -On database version 12 or later, the `primary_conninfo` and `restore_command` properties are copied from a random standby node to the stopped primary during switchover (unless otherwise specified with the `-sourcenode` option). +On database version 12, the `primary_conninfo` and `restore_command` properties are copied from a random standby node to the stopped primary during switchover (unless otherwise specified with the `-sourcenode` option). **Modify the pg_hba.conf File** @@ -69,11 +69,11 @@ To prevent this, ensure that the Failover Manager agent auto starts before the d If a Linux firewall (i.e. iptables) is enabled on the host of a Failover Manager node, you may need to add rules to the firewall configuration that allow tcp communication between the Failover Manager processes in the cluster. For example: ```text -# iptables -I INPUT -p tcp --dport 7800 -j ACCEPT +# iptables -I INPUT -p tcp --dport 7800:7810 -j ACCEPT /sbin/service iptables save ``` -The command shown above opens the port 7800. Failover Manager will connect via the port that corresponds to the port specified in the cluster properties file. +The command shown above opens a small range of ports (7800 through 7810). Failover Manager will connect via the port that corresponds to the port specified in the cluster properties file. **Ensure that the Database user has Sufficient Privileges** @@ -85,12 +85,8 @@ The database user specified by the `db.user` property in the `efm.properties` fi `pg_wal_replay_resume()` - `pg_wal_replay_pause()` - `pg_reload_conf()` -The `pg_reload_conf()` privilege is required only if you have the `reconfigure.num.sync` or `reconfigure.sync.primary` property set to `true`. - For detailed information about each of these functions, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/index.html). The user must also have permissions to read the values of configuration variables; a database superuser can use the PostgreSQL `GRANT` command to provide the permissions needed: diff --git a/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx index add844dddfe..4f020cbdb51 100644 --- a/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx +++ b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx @@ -1,14 +1,14 @@ --- title: "Installing Failover Manager" legacyRedirects: - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_debian_ubuntu.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_sles.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/post_install_tasks.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_rhel_centos_oel.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_debian_ubuntu.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_sles.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/post_install_tasks.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_rhel_centos_oel.html" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_efm.html" + - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_efm.html" --- @@ -44,6 +44,7 @@ After receiving your credentials, you must create the EnterpriseDB repository co failovermethod=priority enabled=1 gpgcheck=1 + repo_gpgcheck=1 gpgkey=ftp://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHELX/gpg-pubkey-6976a827-5164221b # End of configuration file EOF @@ -76,7 +77,7 @@ After receiving your credentials, you must create the EnterpriseDB repository co 6. Install the selected package: ```text - yum -y install edb-efm40 + yum -y install edb-efm310 ``` ### RHEL or CentOS 7 Host @@ -108,7 +109,7 @@ After receiving your credentials, you must create the EnterpriseDB repository co 5. Install the selected package: ```text - yum -y install edb-efm40 + yum -y install edb-efm310 ``` ### RHEL or CentOS 8 Host @@ -154,7 +155,7 @@ After receiving your credentials, you must create the EnterpriseDB repository co ``` 6. Install the selected package: ```text - dnf -y install edb-efm40 + dnf -y install edb-efm310 ``` ## Debian or Ubuntu Host @@ -165,63 +166,15 @@ To install Failover Manager, you must have credentials that allow access to the The following steps will walk you through using the EnterpriseDB apt repository to install Failover Manager. -### Debian Host - 1. Assume superuser privileges: ```text sudo su – ``` 2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: -- On Debian 9 - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` -- On Debian 10 - - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` - -3. Add support to your system for secure APT repositories: - ```text - apt-get install apt-transport-https - ``` -4. Add the EDB signing key: - ```text - wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - - ``` -5. Update the repository meta data: - ```text - apt-get update - ``` -6. Install Failover Manager: - ```text - apt-get -y install edb-efm40 - ``` - -### Ububtu Host - -1. Assume superuser privileges: - ```text - sudo su – - ``` -2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: - -- On Ubuntu 18.04 ```text sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' ``` -- On Ubuntu 20.4 - - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` - 3. Add support to your system for secure APT repositories: ```text apt-get install apt-transport-https @@ -236,7 +189,7 @@ The following steps will walk you through using the EnterpriseDB apt repository ``` 6. Install Failover Manager: ```text - apt-get -y install edb-efm40 + apt-get -y install edb-efm310 ``` ## SLES Host @@ -285,7 +238,7 @@ You can use the zypper package manager to install a Failover Manager agent on an 6. Now you can use the zypper utility to install a Failover Manager agent: ```text - zypper -n install edb-efm40 + zypper -n install edb-efm310 ``` ## Performing post-installation tasks @@ -305,11 +258,11 @@ components are installed in the following locations: | Component | Location | | --------------------------------- | --------------------------- | -| Executables | /usr/edb/efm-4.0/bin | -| Libraries | /usr/edb/efm-4.0/lib | -| Cluster configuration files | /etc/edb/efm-4.0 | -| Logs | /var/log/efm- 4.0 | -| Lock files | /var/lock/efm-4.0 | -| Log rotation file | /etc/logrotate.d/efm-4.0 | -| sudo configuration file | /etc/sudoers.d/efm-40 | -| Binary to access VIP without sudo | /usr/edb/efm-4.0/bin/secure | +| Executables | /usr/edb/efm-3.10/bin | +| Libraries | /usr/edb/efm-3.10/lib | +| Cluster configuration files | /etc/edb/efm-3.10 | +| Logs | /var/log/efm- 3.10 | +| Lock files | /var/lock/efm-3.10 | +| Log rotation file | /etc/logrotate.d/efm-3.10 | +| sudo configuration file | /etc/sudoers.d/efm-310 | +| Binary to access VIP without sudo | /usr/edb/efm-3.10/bin/secure | From 3a13094e9ab0a601f3bf654282591e54dcca2246 Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Thu, 3 Jun 2021 21:48:50 +0530 Subject: [PATCH 08/50] Pgpool_4.2.3_Updates Former-commit-id: aa1cd750fd5ea2febf722876102ae8ac060a295a --- ...stalling_and_configuring_the_pgpool-II.mdx | 41 +++++++++++-------- .../1.0/03_configuring_connection_pooling.mdx | 2 + product_docs/docs/pgpool/1.0/index.mdx | 13 +++--- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx index 6c66a13a72f..562ded57f29 100644 --- a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx +++ b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx @@ -8,7 +8,12 @@ legacyRedirectsGenerated: -Pgpool-II runs as a service on Linux systems. Windows systems does not support Pgpool. The Pgpool version required by your EDB Postgres Advanced Server and PostgreSQL installation is version-specific, but the documented and supported functionality of each version is the same. The following table lists the Pgpool version and their corresponding EDB Postgres Advanced Server and PostgreSQL versions. The information in this guide applies to each version listed in the table below. +!!! Note + Pgpool-II runs as a service on Linux systems. Windows systems does not support Pgpool. + +The following table lists the Pgpool version and their corresponding EDB Postgres Advanced Server and PostgreSQL versions. + +The Pgpool version required for your EDB Postgres Advanced Server and PostgreSQL installation is version-specific, but the documented and supported functionality of each version is the same. | **Pgpool Version** | **Postgres Version** | **Supported Platforms** | |--------------------|---------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -36,7 +41,7 @@ Before installing the repository configuration, you must have credentials that a Perform the following steps to install Pgpool-II on a CentOS host: -1. To install the repository configuration, assume superuser privileges and invoke one of the following platform-specific commands: +1. To install the repository configuration, assume superuser privileges and invoke the platform-specific command: On CentOS 7: @@ -56,7 +61,7 @@ Perform the following steps to install Pgpool-II on a CentOS host: sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo ``` -3. Before installing Pgpool-II, execute the following command to install the Extra Packages for Enterprise Linux (EPEL): +3. Invoke the following command to install the Extra Packages for Enterprise Linux (EPEL): On CentOS 7: @@ -70,19 +75,21 @@ Perform the following steps to install Pgpool-II on a CentOS host: dnf -y install epel-release ``` -4. For CentOS 8, enable the PowerTools repository to satisfy additional package dependencies: +4. The following steps are applicable only for CentOS 8: + + a. Enable the PowerTools repository to satisfy additional package dependencies: ```text dnf config-manager --set-enabled PowerTools ``` -5. For CentOS 8, disable the built-in PostgreSQL module: + b. Disable the built-in PostgreSQL module: ```text dnf -qy module disable postgresql ``` -6. Use the platform-specific command to install Pgpool-II: +5. Invoke the platform-specific command to install Pgpool-II: On CentOS 7: @@ -98,7 +105,7 @@ Perform the following steps to install Pgpool-II on a CentOS host: Where <xx> is the Pgpool release version. - For example, to install the latest Pgpool Version 4.2, execute the following command: + For example, to install the latest Pgpool Version 4.2, invoke the following command: On CentOS 7: @@ -114,8 +121,8 @@ Perform the following steps to install Pgpool-II on a CentOS host: When you install an RPM package signed by a source that is not recognized by your system, your permission to import the key to your local server may be asked. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y` and press `Return` to continue. - Pgpool-II is installed in the `/usr/edb/pgpool/` directory, where <x.y> is the installed Pgpool-II version number. The configuration files are created in the `/etc/sysconfig/edb/pgpool` directory, where *<x.y>* is the Pgpool release version. By default, `.sample` is appended to the configuration file name; remove the `.sample` from the configuration file after copying the file to create your custom configuration. - + Pgpool-II is installed in the `/usr/edb/pgpool/` directory, where <x.y> is the installed Pgpool-II version number. + ## Installing Pgpool-II on an RHEL Host @@ -124,7 +131,7 @@ Before creating the repository configuration file, you must have credentials tha Perform the following steps to install Pgpool-II: -1. To create the repository configuration file, assume superuser privileges and invoke one of the following platform-specific commands: +1. To create the repository configuration file, assume superuser privileges and invoke the platform-specific command: On RHEL 7: @@ -144,7 +151,7 @@ Perform the following steps to install Pgpool-II: sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo ``` -3. Before installing Pgpool, execute the following command to install the Extra Packages for Enterprise Linux (EPEL): +3. Invoke the following command to install the Extra Packages for Enterprise Linux (EPEL): On RHEL 7: @@ -180,7 +187,7 @@ Perform the following steps to install Pgpool-II: dnf -qy module disable postgresql ``` -6. Use the platform-specific command to install Pgpool-II: +6. Invoke the platform-specific command to install Pgpool-II: On RHEL 7: @@ -196,7 +203,7 @@ Perform the following steps to install Pgpool-II: Where <xx> is the Pgpool release version. - For example, to install the latest Pgpool Version 4.2, execute the following command: + For example, to install the latest Pgpool Version 4.2, invoke the following command: On RHEL 7: @@ -212,8 +219,8 @@ Perform the following steps to install Pgpool-II: When you install an RPM package that is signed by a source that is not recognized by your system, your permission to import the key to your local server may be asked for. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - Pgpool-II will be installed in the `/usr/edb/pgpool/` directory, where <x.y> is the installed Pgpool-II version number. The configuration files are created in the `/etc/sysconfig/edb/pgpool` directory, where *<x.y>* is the Pgpool release version. By default, `.sample` is appended to the configuration file name; remove the `.sample` from the configuration file after copying the file to create your custom configuration. - + Pgpool-II will be installed in the `/usr/edb/pgpool/` directory, where <x.y> is the installed Pgpool-II version number. + ## Installing Pgpool-II on an RHEL/CentOS 7 PPCLE Host @@ -252,7 +259,7 @@ Perform the following steps to install Pgpool-II on an RHEL/CentOS 7 PPC64LE Hos sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo ``` -4. Before installing Pgpool-II, execute the following command to install the Extra Packages for Enterprise Linux (EPEL): +4. Invoke the following command to install the Extra Packages for Enterprise Linux (EPEL): ```text yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm @@ -348,7 +355,7 @@ You can use the Zypper package manager to install Pgpool-II on a SLES 12 host. Z sudo su - ``` -2. Use the following command to add the EDB repository to your SLES host: +2. Invoke the following command to add the EDB repository to your SLES host: ```text zypper addrepo https://zypp.enterprisedb.com/suse/edb-sles.repo diff --git a/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx b/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx index 3f659567e68..b5e03731518 100644 --- a/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx +++ b/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx @@ -6,6 +6,8 @@ legacyRedirectsGenerated: - "/edb-docs/d/pgpool-ii/user-guides/pgpool-ii-guide/1.0/configuring_connection_pooling.html" --- +The configuration files are created in the `/etc/sysconfig/edb/pgpool` directory, where *<x.y>* is the Pgpool release version. By default, `.sample` is appended to the configuration file name; remove the `.sample` from the configuration file after copying the file to create your custom configuration. + !!! Note The configuration options for Pgpool-II are extensive; consider the options listed below as a starting point only. For more information about configuring and using Pgpool-II, please consult the [project website](https://www.pgpool.net/docs/latest/en/html/index.html). diff --git a/product_docs/docs/pgpool/1.0/index.mdx b/product_docs/docs/pgpool/1.0/index.mdx index d57997a9738..e365dea4148 100644 --- a/product_docs/docs/pgpool/1.0/index.mdx +++ b/product_docs/docs/pgpool/1.0/index.mdx @@ -11,7 +11,7 @@ legacyRedirectsGenerated: - "/edb-docs/p/pgpool-ii/1.0" --- -Pgpool-II acts as a middleman between client applications and a PostgreSQL database server. +Pgpool-II acts as a middleware between client applications and a PostgreSQL database server. Using Pgpool-II adds the following benefits to your application connection infrastructure: @@ -33,16 +33,13 @@ EDB supports the following Pgpool-II functionality: **Conventions Used in this Guide** -In this guide: - -- The term Postgres refers to either PostgreSQL or EDB Postgres Advanced Server. - -!!! Note - If you are using the pdf version of this document, using the cut/paste command may result in extra spaces or carriage returns in the pasted command. If a command fails, check the command carefully for additional characters. +The term Postgres refers to either PostgreSQL or EDB Postgres Advanced Server. **What's New** -1. Upstream merge with Pgpool-II [4.2.0](https://www.pgpool.net/docs/42/en/html/release-4-2-0.html) and [4.2.1](https://www.pgpool.net/docs/42/en/html/release-4-2-1.html). +Upstream merge with Pgpool-II 4.2.3 + +1. Upstream merge with Pgpool-II [4.2.3](https://www.pgpool.net/docs/42/en/html/release-4-2-3.html). 2. 4.2 is a major release. For more details on migrating to 4.2, see the [Migration Section](https://www.pgpool.net/docs/42/en/html/release-4-2-0.html#AEN10359).
From ad8aef57b9c840edf36a911dca7d8765b43b0f33 Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Fri, 4 Jun 2021 16:12:44 +0530 Subject: [PATCH 09/50] Pgpool_4.2.3_June4 Former-commit-id: 73065ce820d8aef6c3e79046b46deeb7ce9c7c12 --- ...stalling_and_configuring_the_pgpool-II.mdx | 43 +++++++++++-------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx index 562ded57f29..6eb35b5045b 100644 --- a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx +++ b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx @@ -285,7 +285,7 @@ Perform the following steps to install Pgpool-II on an RHEL/CentOS 7 PPC64LE Hos To install Pgpool-II on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/user). -The following steps walk you through using the EDB apt repository to install a Debian package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. +Perform the following steps to to install a Debian package using the EDB apt repository. 1. Assume superuser privileges: @@ -295,13 +295,13 @@ The following steps walk you through using the EDB apt repository to install a D 2. Configure access to the EDB repository on your system: - On Debian 9, Ubuntu 18, and Ubuntu 20: + On Debian 9, Ubuntu 18, and Ubuntu 20, replace the `username` and `password` with your EDB credentials: ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + sh -c 'echo "deb https://:@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' ``` - On Debian 10: + On Debian 10: a. Set up the EDB repository: @@ -309,7 +309,7 @@ The following steps walk you through using the EDB apt repository to install a D sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' ``` - b. Substitute your EDB credentials for the `username` and `password` placeholders in the following command: + b. Replace the `username` and `password` with your EDB credentials: ```text sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' @@ -341,7 +341,7 @@ The following steps walk you through using the EDB apt repository to install a D Where <xx> is the Pgpool release version. -After installing Pgpool-II on a Debian host, the configuration files are located in the `/etc/edb/edb-pgpool` directory, where <x.y> is the Pgpool release version. By default, `.sample` is appended to the configuration file name; `.sample` should be removed after copying the file to create your custom configuration. +After installing Pgpool-II on a Debian host, the configuration files are located in the `/etc/edb/edb-pgpool` directory, where <x.y> is the Pgpool release version. @@ -399,7 +399,7 @@ You can use the Zypper package manager to install Pgpool-II on a SLES 12 host. Z zypper -n install java-1_8_0-openjdk ``` -9. Then, use the Zypper utility to install Pgpool-II: +9. Use the Zypper utility to install Pgpool-II: ```text zypper install -n edb-pgpool @@ -411,45 +411,50 @@ You can use the Zypper package manager to install Pgpool-II on a SLES 12 host. Z ## Installing Pgpool-II Using the Linux Graphical Installer -Graphical installers for Pgpool-II are available via StackBuilder Plus (for EDB Postgres Advanced Server hosts) or Stack Builder (on PostgreSQL hosts). You can access StackBuilder Plus through your Linux start menu. Pgpool does not support Windows systems. +Graphical installers for Pgpool-II are available via StackBuilder Plus (on EDB Postgres Advanced Server hosts) or Stack Builder (on PostgreSQL hosts). -Perform the following steps to install Pgpool-II: +!!! Note + Pgpool does not support Windows systems. + +Perform the following steps to install Pgpool-II by accessing StackBuilder Plus through your Linux start menu: -1. Open StackBuilder Plus and select your EDB Postgres Advanced Server installation from the drop-down list on the `Welcome` window. Click `Next` to continue to the application selection page. Expand the `Add-ons, tools and utilities` node, and check the box next to the Pgpool-II version you want to install and download the Pgpool-II installer. Click `Next` to continue. Provide the credentials and click `Next`. -2. The selected packages and the default download directory are displayed. Change the download directory location if required. Click `Next`. -3. Once you have downloaded the installation files, a confirmation message is displayed. Click `Next` to start the Pgpool-II installation. -4. Select an installation language and click `OK`. -5. The Pgpool installer welcomes you to the setup wizard. +1. Open StackBuilder Plus and select your EDB Postgres Advanced Server installation from the drop-down list. Click `Next` to continue to the application selection page. +2. Expand the `Add-ons, tools and utilities` node, and check the box next to the Pgpool-II version you want to install and download the Pgpool-II installer. +3. Click `Next` to continue. Provide the credentials and click `Next`. +4. The selected packages and the default download directory are displayed. Click `Next`. +5. Once you have downloaded the installation files, a confirmation message is displayed. Click `Next` to start the Pgpool-II installation. +6. Select an installation language and click `OK`. +7. The Pgpool installer welcomes you to the setup wizard. ![The Pgpool-II Welcome window](images/pp1.png) Fig. 1: The Pgpool-II Welcome window -6. Use the `Installation Directory` field to specify the directory where you would install the Pgpool-II software (the default installation directory is `/opt/edb`). Then, click `Next` to continue. +8. Use the `Installation Directory` field to specify the directory where you would install the Pgpool-II software (the default installation directory is `/opt/edb`). Then, click `Next` to continue. ![The Pgpool-II Installation Details Window](images/pp2.png) Fig. 2: The Pgpool-II Installation Details Window -7. Use the `Operating System User` field to specify the Linux operating system user's name that Pgpool-II will change to after startup. Then, click `Next` to continue. +9. Use the `Operating System User` field to specify the Linux operating system user's name that Pgpool-II will change to after startup. Then, click `Next` to continue. ![The Pgpool-II Operating User window](images/pgpool3.png) Fig. 3: The Pgpool-II Operating User window -8. The `Ready to Install` window notifies you when the installer has all of the information needed to install Pgpool-II on your system. Click `Next` to install Pgpool-II. +10. The `Ready to Install` window notifies you when the installer has all of the information needed to install Pgpool-II on your system. Click `Next` to install Pgpool-II. ![The Ready to Install window](images/pgpool4.png) Fig. 4: The Ready to Install window -9. Progress bars inform you as the installation progresses. +11. Progress bars inform you as the installation progresses. ![The installation progresses](images/pgpool5.png) Fig. 5: The installation progresses -10. The installer notifies you when the setup wizard has completed the Pgpool-II installation. Click `Finish` to exit the installer. +12. The installer notifies you when the setup wizard has completed the Pgpool-II installation. Click `Finish` to exit the installer. ![The installation is complete](images/pp20.png) From 7b3ab257892db639860702d3b070b68be6d44309 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 4 Jun 2021 17:54:58 +0530 Subject: [PATCH 10/50] Updated the files to fix the diff between EFM 3.10 and EFM 4.0 Former-commit-id: 65a05bd50e6b1fd5a88b5cf8a9c3c565b4eefeab --- .../docs/efm/3.10/efm_user/01_whats_new.mdx | 5 +- .../01_prerequisites.mdx | 3 - .../02_failover_manager_overview/index.mdx | 3 - .../efm/3.10/efm_user/03_installing_efm.mdx | 10 +- .../01_encrypting_database_password.mdx | 13 ++- .../01_cluster_properties/index.mdx | 56 +---------- ...ter_members.mdx => 02_cluster_members.mdx} | 7 +- .../02_encrypting_database_password.mdx | 81 ---------------- ...s.mdx => 03_extending_efm_permissions.mdx} | 26 ++---- ...dresses.mdx => 04_using_vip_addresses.mdx} | 5 +- .../efm_user/04_configuring_efm/index.mdx | 3 - .../docs/efm/3.10/efm_user/05_using_efm.mdx | 15 ++- .../efm_user/06_monitoring_efm_cluster.mdx | 4 +- .../3.10/efm_user/07_using_efm_utility.mdx | 5 +- .../efm_user/08_controlling_efm_service.mdx | 23 ++--- .../3.10/efm_user/09_controlling_logging.mdx | 7 +- .../efm/3.10/efm_user/10_notifications.mdx | 93 +++++++++---------- .../3.10/efm_user/11_supported_scenarios.mdx | 3 - .../12_upgrading_existing_cluster.mdx | 51 ++++------ .../efm/3.10/efm_user/13_troubleshooting.mdx | 3 - .../14_configuring_streaming_replication.mdx | 3 - .../15_configuring_ssl_authentication.mdx | 3 - product_docs/docs/efm/3.10/efm_user/index.mdx | 6 -- 23 files changed, 109 insertions(+), 319 deletions(-) rename product_docs/docs/efm/3.10/efm_user/04_configuring_efm/{03_cluster_members.mdx => 02_cluster_members.mdx} (89%) delete mode 100644 product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx rename product_docs/docs/efm/3.10/efm_user/04_configuring_efm/{04_extending_efm_permissions.mdx => 03_extending_efm_permissions.mdx} (79%) rename product_docs/docs/efm/3.10/efm_user/04_configuring_efm/{05_using_vip_addresses.mdx => 04_using_vip_addresses.mdx} (95%) diff --git a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx index e613bbc5635..53bebcc410f 100644 --- a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx +++ b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx @@ -1,14 +1,11 @@ --- title: "What’s New" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/whats_new.html" --- -The following changes have been made to EDB Postgres Failover Manager to create version 4.0: +The following changes have been made to EDB Postgres Failover Manager to create version 3.10: • Support for physical replication slot feature of Postgres • Support for NAT addresses diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx index adc78655494..589d66db19c 100644 --- a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/01_prerequisites.mdx @@ -1,9 +1,6 @@ --- title: "Prerequisites" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/prerequisites.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx index fac4ec73d30..da04067111d 100644 --- a/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx +++ b/product_docs/docs/efm/3.10/efm_user/02_failover_manager_overview/index.mdx @@ -1,9 +1,6 @@ --- title: "Failover Manager Overview" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/failover_manager_overview.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx index 4f020cbdb51..60310c7e785 100644 --- a/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx +++ b/product_docs/docs/efm/3.10/efm_user/03_installing_efm.mdx @@ -1,14 +1,6 @@ --- title: "Installing Failover Manager" -legacyRedirects: - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_debian_ubuntu.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_sles.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/post_install_tasks.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_rhel_centos_oel.html" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_efm.html" + --- diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx index 77f0a39f8c9..d6f2e7b3ea1 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx @@ -1,10 +1,9 @@ --- title: "Encrypting Your Database Password" --- - -Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-4.0/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. +Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-3.10/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. To encrypt a password, use the command: @@ -31,7 +30,7 @@ The following example demonstrates using the encrypt utility to encrypt a passwo # efm encrypt acctg This utility will generate an encrypted password for you to place in your EFM cluster property file: -/etc/edb/efm-4.0/acctg.properties +/etc/edb/efm-3.10/acctg.properties Please enter the password and hit enter: Please enter the password again to confirm: The encrypted password is: 516b36fb8031da17cfbc010f7d09359c @@ -45,16 +44,16 @@ db.password.encrypted=516b36fb8031da17cfbc010f7d09359c After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: ```text -[witness@localhost ~]# systemctl start edb-efm-4.0 -Job for edb-efm-4.0.service failed because the control process exited with error code. See "systemctl status edb-efm-4.0.service" and "journalctl -xe" for details. +[witness@localhost ~]# systemctl start edb-efm-3.10 +Job for edb-efm-3.10.service failed because the control process exited with error code. See "systemctl status edb-efm-3.10.service" and "journalctl -xe" for details. ``` -If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-4.0/startup-efm.log`) for more information. +If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-3.10/startup-efm.log`) for more information. If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, startup information is also available with the following command: ```text -systemctl status edb-efm-4.0 +systemctl status edb-efm-3.10 ``` To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx index a447ad897d6..6d409b62c0f 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx @@ -1,19 +1,16 @@ --- title: "The Cluster Properties File" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/cluster_properties.html" --- -Each node in a Failover Manager cluster has a properties file (by default, named `efm.properties`) that contains the properties of the individual node on which it resides. The Failover Manager installer creates a file template for the properties file named `efm.properties.in` in the `/etc/edb/efm-4.0` directory. +Each node in a Failover Manager cluster has a properties file (by default, named `efm.properties`) that contains the properties of the individual node on which it resides. The Failover Manager installer creates a file template for the properties file named `efm.properties.in` in the `/etc/edb/efm-3.10` directory. After completing the Failover Manager installation, you must make a working copy of the template before modifying the file contents: ```text -# cp /etc/edb/efm-4.0/efm.properties.in /etc/edb/efm-4.0/efm.properties +# cp /etc/edb/efm-3.10/efm.properties.in /etc/edb/efm-3.10/efm.properties ``` After copying the template file, change the owner of the file to `efm`: @@ -27,7 +24,7 @@ After copying the template file, change the owner of the file to `efm`: After creating the cluster properties file, add (or modify) configuration parameter values as required. For detailed information about each property, see [Specifying Cluster Properties](#specifying-cluster-properties). -The property files are owned by `root`. The Failover Manager service script expects to find the files in the `/etc/edb/efm-4.0 directory`. If you move the property file to another location, you must create a symbolic link that specifies the new location. +The property files are owned by `root`. The Failover Manager service script expects to find the files in the `/etc/edb/efm-3.10 directory`. If you move the property file to another location, you must create a symbolic link that specifies the new location. !!! Note All user scripts referenced in the properties file will be invoked as the Failover Manager user. @@ -69,7 +66,6 @@ Use the properties in the `efm.properties` file to specify connection, administr | [user.email](#user_email) | | | | This value must be same for all the agents; can be left blank if using a notification script | | [from.email](#from_email) | | | [efm@localhost](mailto:efm@localhost) | Leave blank to use the default [efm@localhost](mailto:efm@localhost) | | [notification.level](#notification_level) | Y | Y | INFO | See the [list of notifications](../../10_notifications/#notifications) | -| [notification.text.prefix](#notification_text_prefix) | | | | | | [script.notification](#script_notification) | | | | Required if user.email property is not used; both parameters can be used together | | [bind.address](#bind_address) | Y | Y | | Example: <ip_address>:<port> | | [external.address](#external_address) | | | | Example: <ip_address/hostname> | @@ -79,8 +75,7 @@ Use the properties in the `efm.properties` file to specify connection, administr | [local.timeout](#local_timeout) | Y | | 60 | | | [local.timeout.final](#local_timeout_final) | Y | | 10 | | | [remote.timeout](#remote_timeout) | Y | Y | 10 | | -| [node.timeout](#node_timeout) | Y | Y | 50 | This value must be same for all the agents | -| [encrypt.agent.messages](#encrypt_agent_messages) | Y | Y | false | This value must be same for all the agents | +| [node.timeout](#node_timeout) | Y | Y | 50 | This value must be same for all the agents | | [stop.isolated.primary](#stop_isolated_primary) | Y | | true | | | [stop.failed.primary](#stop_failed_primary) | Y | | true | | | [primary.shutdown.as.failure](#primary_shutdown_as_failure) | Y | Y | false | | @@ -94,7 +89,6 @@ Use the properties in the `efm.properties` file to specify connection, administr | [auto.reconfigure](#auto_reconfigure) | Y | | true | This value must be same for all the agents | | [promotable](#promotable) | Y | | true | | | [use.replay.tiebreaker](#use_replay_tiebreaker) | Y | Y | true | This value must be same for all the agents | -| [standby.restart.delay](#standby_restart_delay) | | | 0 | | | [application.name](#application_name) | | | | Set to replace the application_name portion of the primary_conninfo entry with this property value before starting the original primary database as a standby. | | [restore.command](#restore_command) | | | | Example: restore.command=scp <db_service_owner>@%h: <archive_path>/%f %p | | [reconfigure.num.sync](#reconfigure_num_sync) | Y | | false | | @@ -328,22 +322,6 @@ Use the `notification.level` property to specify the minimum severity level at w notification.level=INFO ``` - - -Use the `notification.text.prefix` property to specify the text to be added to the beginning of every notification. - -```text -# Text to add to the beginning of every notification. This could -# be used to help identify what the cluster is used for, the role -# of this node, etc. To use multiple lines, add a backslash \ to -# the end of a line of text. To include a newline use \n. -# Example: -# notification.text.prefix=Development cluster for Example dept.\n\ -# Used by Dev and QA \ -# See Example group for questions. -notification.text.prefix= -``` - Use the `script.notification` property to specify the path to a user-supplied script that acts as a notification service; the script will be passed a message subject and a message body. The script will be invoked each time Failover Manager generates a user notification. @@ -472,17 +450,6 @@ Use the `node.timeout` property to specify the number of seconds that an agent w node.timeout=50 ``` - - -Use the `encrypt.agent.messages` property to specify if the messages sent between agents should be encrypted. - -```text -# Set to true to encrypt messages that are sent between agents. -# This property must be the same on all agents or else the agents -# will not be able to connect. -encrypt.agent.messages=false -``` - Use the `stop.isolated.primary` property to instruct Failover Manager to shut down the database if a primary agent detects that it is isolated. When true (the default), Failover Manager will stop the database before invoking the script specified in the `script.primary.isolated` property. @@ -682,21 +649,6 @@ If the same amount of data has been written to more than one standby node, and a use.replay.tiebreaker=true ``` - - -Use the `standby.restart.delay` property to specify the time in seconds that the standby should wait before it gets reconfigured (stopped/started) to follow the new primary after a promotion. - -```text -# Time in seconds for this standby to delay restarting to follow the -# primary after a promotion. This can be used to have standbys restart -# at different times to increase availability. Caution should be used -# when using this feature, as a delayed standby will not be following -# the new primary and care must be taken that the new primary retains -# enough WAL for the standby to follow it. -# Please see the user's guide for more information. -standby.restart.delay=0 -``` - You can use the `application.name` property to provide the name of an application that will be copied to the `primary_conninfo` parameter before restarting an old primary node as a standby. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_cluster_members.mdx similarity index 89% rename from product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx rename to product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_cluster_members.mdx index 5b7b5b030bf..2c82421d305 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_cluster_members.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_cluster_members.mdx @@ -1,19 +1,16 @@ --- title: "The Cluster Members File" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/cluster_members.html" --- -Each node in a Failover Manager cluster has a cluster members file (by default, named efm.nodes) that contains a list of the current Failover Manager cluster members. When an agent starts, it uses the file to locate other cluster members. The Failover Manager installer creates a file template for the cluster members file named `efm.nodes.in` in the `/etc/edb/efm-4.0` directory. +Each node in a Failover Manager cluster has a cluster members file (by default, named efm.nodes) that contains a list of the current Failover Manager cluster members. When an agent starts, it uses the file to locate other cluster members. The Failover Manager installer creates a file template for the cluster members file named `efm.nodes.in` in the `/etc/edb/efm-3.10` directory. After completing the Failover Manager installation, you must make a working copy of the template: ```text -cp /etc/edb/efm-4.0/efm.nodes.in /etc/edb/efm-4.0/efm.nodes +cp /etc/edb/efm-3.10/efm.nodes.in /etc/edb/efm-3.10/efm.nodes ``` After copying the template file, change the owner of the file to `efm`: diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx deleted file mode 100644 index c0456f97d8c..00000000000 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/02_encrypting_database_password.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "Encrypting Your Database Password" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/encrypting_database_password.html" ---- - - - -Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-4.0/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. - -To encrypt a password, use the command: - -```text -# efm encrypt [ --from-env ] -``` - -Where `` specifies the name of the Failover Manager cluster. - -If you include the `--from-env` option, you must export the value you wish to encrypt before invoking the encryption utility. For example: - -```text -export EFMPASS=password -``` - -If you do not include the `--from-env` option, Failover Manager will prompt you to enter the database password twice before generating an encrypted password for you to place in your cluster property file. When the utility shares the encrypted password, copy and paste the encrypted password into the cluster property files. - -!!! Note - Many Java vendors ship their version of Java with full-strength encryption included, but not enabled due to export restrictions. If you encounter an error that refers to an illegal key size when attempting to encrypt the database password, you should download and enable a Java Cryptography Extension (JCE) that provides an unlimited policy for your platform. - -The following example demonstrates using the encrypt utility to encrypt a password for the `acctg` cluster: - -```text -# efm encrypt acctg -This utility will generate an encrypted password for you to place in - your EFM cluster property file: -/etc/edb/efm-4.0/acctg.properties -Please enter the password and hit enter: -Please enter the password again to confirm: -The encrypted password is: 516b36fb8031da17cfbc010f7d09359c -Please paste this into your acctg.properties file -db.password.encrypted=516b36fb8031da17cfbc010f7d09359c -``` - -!!! Note - The utility will notify you if a properties file does not exist. - -After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: - -```text -[witness@localhost ~]# systemctl start edb-efm-4.0 -Job for edb-efm-4.0.service failed because the control process exited with error code. See "systemctl status edb-efm-4.0.service" and "journalctl -xe" for details. -``` - -If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-4.0/startup-efm.log`) for more information. - -If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, startup information is also available with the following command: - -```text -systemctl status edb-efm-4.0 -``` - -To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. - -**Using the EFMPASS Environment Variable** - -The following example demonstrates using the --from-env environment variable when encrypting a password. Before invoking the `efm encrypt` command, set the value of `EFMPASS` to the password (`1safepassword`): - -```text -# export EFMPASS=1safepassword -``` - -Then, invoke `efm encrypt`, specifying the `--from-env` option: - -```text -# efm encrypt acctg --from-env -# 7ceecd8965fa7a5c330eaa9e43696f83 -``` - -The encrypted password (`7ceecd8965fa7a5c330eaa9e43696f83`) is returned as a text value; when using a script, you can check the exit code of the command to confirm that the command succeeded. A successful execution returns `0`. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx similarity index 79% rename from product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx rename to product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx index f49e8489cfb..f016a002f41 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_extending_efm_permissions.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx @@ -1,9 +1,6 @@ --- title: "Extending Failover Manager Permissions" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/extending_efm_permissions.html" --- @@ -34,18 +31,15 @@ The `efm-41` file is located in `/etc/sudoers.d`, and contains the following ent # If you run your db service under a non-default account, you will need to copy # this file to grant the proper permissions and specify the account in your efm # cluster properties file by changing the 'db.service.owner' property. -efm ALL=(postgres) NOPASSWD: /usr/edb/efm-4.0/bin/efm_db_functions -efm ALL=(enterprisedb) NOPASSWD: /usr/edb/efm-4.0/bin/efm_db_functions +efm ALL=(postgres) NOPASSWD: /usr/edb/efm-3.10/bin/efm_db_functions +efm ALL=(enterprisedb) NOPASSWD: /usr/edb/efm-3.10/bin/efm_db_functions # Allow user 'efm' to sudo efm_root_functions as 'root' to write/delete the PID file, # validate the db.service.owner property, etc. -efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_root_functions +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-3.10/bin/efm_root_functions # Allow user 'efm' to sudo efm_address as root for VIP tasks. -efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_address - -# Allow user 'efm' to sudo efm_pgpool_functions as root for pgpool tasks. -efm ALL=(ALL) NOPASSWD: /usr/edb/efm-4.0/bin/efm_pgpool_functions +efm ALL=(ALL) NOPASSWD: /usr/edb/efm-3.10/bin/efm_address # relax tty requirement for user 'efm' Defaults:efm !requiretty @@ -78,7 +72,7 @@ To run Failover Manager without sudo, you must select a database process owner t usermod -a -G efm enterprisedb ``` - This should allow the user to write to `/var/run/efm-4.0` and `/var/lock/efm-4.0`. + This should allow the user to write to `/var/run/efm-3.10` and `/var/lock/efm-3.10`. 2. If you are reusing a cluster name, remove any previously created log files; the new user will not be able to write to log files created by the default (or other) owner. @@ -87,9 +81,9 @@ To run Failover Manager without sudo, you must select a database process owner t ```text su - enterprisedb - cp /etc/edb/efm-4.0/efm.properties.in .properties + cp /etc/edb/efm-3.10/efm.properties.in .properties - cp /etc/edb/efm-4.0/efm.nodes.in /.nodes + cp /etc/edb/efm-3.10/efm.nodes.in /.nodes ``` Then, modify the cluster properties file, providing the name of the user in the `db.service.owner` property. You must also ensure that the `db.service.name` property is blank; without sudo, you cannot run services without root access. @@ -97,19 +91,19 @@ Then, modify the cluster properties file, providing the name of the user in the After modifying the configuration, the new user can control Failover Manager with the following command: ```text -/usr/edb/efm-4.0/bin/runefm.sh start|stop .properties +/usr/edb/efm-3.10/bin/runefm.sh start|stop .properties ``` Where `` specifies the full path of the cluster properties file. Note that the user must ensure that the full path to the properties file must be provided whenever the non-default user is controlling agents or using the efm script. To allow the new user to manage Failover Manager as a service, you must provide a custom script or unit file. -Failover Manager uses a binary named `manage-vip` that resides in `/usr/edb/efm-4.0/bin/secure/` to perform VIP management operations without sudo privileges. This script uses setuid to acquire with the privileges needed to manage Virtual IP addresses. +Failover Manager uses a binary named `manage-vip` that resides in `/usr/edb/efm-3.10/bin/secure/` to perform VIP management operations without sudo privileges. This script uses setuid to acquire with the privileges needed to manage Virtual IP addresses. - This directory is only accessible to root and users in the `efm` group. - The binary is only executable by root and the `efm` group. -For security reasons, we recommend against modifying the access privileges of the `/usr/edb/efm-4.0/bin/secure/` directory or the `manage-vip` script. +For security reasons, we recommend against modifying the access privileges of the `/usr/edb/efm-3.10/bin/secure/` directory or the `manage-vip` script. For more information about using Failover Manager without sudo, visit: diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_using_vip_addresses.mdx similarity index 95% rename from product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx rename to product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_using_vip_addresses.mdx index 99f5a68f4a2..ee8b2457714 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/05_using_vip_addresses.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/04_using_vip_addresses.mdx @@ -1,9 +1,6 @@ --- title: "Using Failover Manager with Virtual IP Addresses" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_vip_addresses.html" --- @@ -15,7 +12,7 @@ Failover Manager uses the `efm_address` script to assign or release a virtual IP By default, the script resides in: - `/usr/edb/efm-4.0/bin/efm_address` + `/usr/edb/efm-3.10/bin/efm_address` Failover Manager uses the following command variations to assign or release an IPv4 or IPv6 IP address. diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx index a89e7f687b9..ff826e281a4 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/index.mdx @@ -1,9 +1,6 @@ --- title: "Configuring Failover Manager" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_efm.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx b/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx index 25fb74d736d..fa401337400 100644 --- a/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx +++ b/product_docs/docs/efm/3.10/efm_user/05_using_efm.mdx @@ -1,9 +1,6 @@ --- title: "Using Failover Manager" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_efm.html" --- @@ -35,7 +32,7 @@ You can start the nodes of a Failover Manager cluster in any order. To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/CentOS 8.x, assume superuser privileges, and invoke the command: ```text -systemctl start edb-efm-4.0 +systemctl start edb-efm-3.10 ``` If the cluster properties file for the node specifies that `is.witness` is `true`, the node will start as a Witness node. @@ -70,7 +67,7 @@ You can add a node to a Failover Manager cluster at any time. When you add a nod 3. Assume superuser privileges on the new node, and start the Failover Manager agent. To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/CentOS 8.x, invoke the command: ```text - systemctl start edb-efm-4.0 + systemctl start edb-efm-3.10 ``` When the new node joins the cluster, Failover Manager will send a notification to the administrator email provided in the `user.email` property, and/or will invoke the specified notification script. @@ -173,10 +170,10 @@ When you stop an agent, Failover Manager will remove the node's address from the To stop the Failover Manager agent on RHEL/CentOS 7.x or RHEL/CentOS 8.x, assume superuser privileges, and invoke the command: ```text -systemctl stop edb-efm-4.0 +systemctl stop edb-efm-3.10 ``` -Until you invoke the `efm disallow-node` command (removing the node's address of the node from the Allowed node host list), you can use the `service edb-efm-4.0 start` command to restart the node at a later date without first running the `efm allow-node` command again. +Until you invoke the `efm disallow-node` command (removing the node's address of the node from the Allowed node host list), you can use the `service edb-efm-3.10 start` command to restart the node at a later date without first running the `efm allow-node` command again. Note that stopping an agent does not signal the cluster that the agent has failed unless the [primary.shutdown.as.failure](04_configuring_efm/01_cluster_properties/cluster_properties/#primary_shutdown_as_failure) property is set to `true`. @@ -279,7 +276,7 @@ After creating the `acctg.properties` and `sales.properties` files, create a ser ### RHEL/CentOS 7.x or RHEL/CentOS 8.x -If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, you should copy the `edb-efm-4.0` unit file to new file with a name that is unique for each cluster. For example, if you have two clusters (named acctg and sales), the unit file names might be: +If you are using RHEL/CentOS 7.x or RHEL/CentOS 8.x, you should copy the `edb-efm-3.10` unit file to new file with a name that is unique for each cluster. For example, if you have two clusters (named acctg and sales), the unit file names might be: ```text /usr/lib/systemd/system/efm-acctg.service @@ -296,7 +293,7 @@ Environment=CLUSTER=acctg You must also update the value of the `PIDfile` parameter to specify the new cluster name. For example: ```text -PIDFile=/var/run/efm-4.0/acctg.pid +PIDFile=/var/run/efm-3.10/acctg.pid ``` After copying the service scripts, use the following commands to enable the services: diff --git a/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx b/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx index fe731b8548d..698302836ef 100644 --- a/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx +++ b/product_docs/docs/efm/3.10/efm_user/06_monitoring_efm_cluster.mdx @@ -1,9 +1,7 @@ --- title: "Monitoring a Failover Manager Cluster" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/monitoring_efm_cluster.html" + --- diff --git a/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx b/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx index 6db8691d3e0..04db0acb13d 100644 --- a/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx +++ b/product_docs/docs/efm/3.10/efm_user/07_using_efm_utility.mdx @@ -1,14 +1,11 @@ --- title: "Using the efm Utility" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/using_efm_utility.html" --- -Failover Manager provides the efm utility to assist with cluster management. The RPM installer adds the utility to the `/usr/edb/efm-4.0/bin` directory when you install Failover Manager. +Failover Manager provides the efm utility to assist with cluster management. The RPM installer adds the utility to the `/usr/edb/efm-3.10/bin` directory when you install Failover Manager. **efm allow-node** diff --git a/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx b/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx index 4ab2f2aa6ff..ae5eea9f89e 100644 --- a/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx +++ b/product_docs/docs/efm/3.10/efm_user/08_controlling_efm_service.mdx @@ -1,9 +1,6 @@ --- title: "Controlling the Failover Manager Service" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/controlling_efm_service.html" --- @@ -21,35 +18,35 @@ The commands that control the Failover Manager service are platform-specific. ## Using the systemctl Utility on RHEL/CentOS 7.x and RHEL/CentOS 8.x -On RHEL/CentOS 7.x and RHEL/CentOS 8.x, Failover Manager runs as a Linux service named (by default) `edb-efm-4.0.service` that is located in `/usr/lib/systemd/system`. Each database cluster monitored by Failover Manager will run a copy of the service on each node of the replication cluster. +On RHEL/CentOS 7.x and RHEL/CentOS 8.x, Failover Manager runs as a Linux service named (by default) `edb-efm-3.10.service` that is located in `/usr/lib/systemd/system`. Each database cluster monitored by Failover Manager will run a copy of the service on each node of the replication cluster. Use the following systemctl commands to control a Failover Manager agent that resides on a RHEL/CentOS 7.x and RHEL/CentOS 8.x host: ```text -systemctl start edb-efm-4.0 +systemctl start edb-efm-3.10 ``` The start command starts the Failover Manager agent on the current node. The local Failover Manager agent monitors the local database and communicates with Failover Manager on the other nodes. You can start the nodes in a Failover Manager cluster in any order. This command must be invoked by root. ```text -systemctl stop edb-efm-4.0 +systemctl stop edb-efm-3.10 ``` Stop the Failover Manager on the current node. This command must be invoked by root. ```text -systemctl status edb-efm-4.0 +systemctl status edb-efm-3.10 ``` The status command returns the status of the Failover Manager agent on which it is invoked. You can invoke the status command on any node to instruct Failover Manager to return status and server startup information. ```text -[root@ONE ~]}> systemctl status edb-efm-4.0 - edb-efm-4.0.service - EnterpriseDB Failover Manager 4.0 - Loaded: loaded (/usr/lib/systemd/system/edb-efm-4.0.service; disabled; vendor preset: disabled) +[root@ONE ~]}> systemctl status edb-efm-3.10 + edb-efm-3.10.service - EnterpriseDB Failover Manager 3.10 + Loaded: loaded (/usr/lib/systemd/system/edb-efm-3.10.service; disabled; vendor preset: disabled) Active: active (running) since Wed 2013-02-14 14:02:16 EST; 4s ago - Process: 58125 ExecStart=/bin/bash -c /usr/edb/edb-efm-4.0/bin/runefm.sh start ${CLUSTER} (code=exited, status=0/SUCCESS) + Process: 58125 ExecStart=/bin/bash -c /usr/edb/edb-efm-3.10/bin/runefm.sh start ${CLUSTER} (code=exited, status=0/SUCCESS) Main PID: 58180 (java) - CGroup: /system.slice/edb-efm-4.0.service - └─58180 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-0.b14.el7_4.x86_64/jre/bin/java -cp /usr/edb/edb-efm-4.0/lib/EFM-4.0.0.jar -Xmx128m... + CGroup: /system.slice/edb-efm-3.10.service + └─58180 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-0.b14.el7_4.x86_64/jre/bin/java -cp /usr/edb/edb-efm-3.10/lib/EFM-3.10.0.jar -Xmx128m... ``` diff --git a/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx index af66915f9c0..c7deef128fb 100644 --- a/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx +++ b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx @@ -1,14 +1,11 @@ --- title: "Controlling Logging" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/controlling_logging.html" --- -Failover Manager writes and stores one log file per agent and one startup log per agent in `/var/log/-4.0` (where `` specifies the name of the cluster). +Failover Manager writes and stores one log file per agent and one startup log per agent in `/var/log/-3.10` (where `` specifies the name of the cluster). You can control the level of detail written to the agent log by modifying the `jgroups.loglevel` and `efm.loglevel` parameters in the [cluster properties file](04_configuring_efm/01_cluster_properties/#loglevel): @@ -35,7 +32,7 @@ The logging facilities use the Java logging library and logging levels. The log For example, if you set the `efm.loglevel` parameter to `WARN`, Failover Manager will only log messages at the `WARN` level and above (`WARN` and `ERROR`). -By default, Failover Manager log files are rotated daily, compressed, and stored for a week. You can modify the file rotation schedule by changing settings in the log rotation file (`/etc/logrotate.d/efm-4.0`). For more information about modifying the log rotation schedule, consult the logrotate man page: +By default, Failover Manager log files are rotated daily, compressed, and stored for a week. You can modify the file rotation schedule by changing settings in the log rotation file (`/etc/logrotate.d/efm-3.10`). For more information about modifying the log rotation schedule, consult the logrotate man page: > `$ man logrotate` diff --git a/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx b/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx index 3087ea24575..63bfa1ac308 100644 --- a/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx +++ b/product_docs/docs/efm/3.10/efm_user/10_notifications.mdx @@ -1,9 +1,6 @@ --- title: "Notifications" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/notifications.html" --- @@ -41,7 +38,7 @@ The severity level designates the urgency of the notification. A notification wi You can use the [notification.level](04_configuring_efm/01_cluster_properties/#notification_level) property to specify the minimum severity level that will trigger a notification. !!! Note - In addition to sending notices to the administrative email address, all notifications are recorded in the cluster log file (`/var/log/efm-4.0/.log`). + In addition to sending notices to the administrative email address, all notifications are recorded in the cluster log file (`/var/log/efm-3.10/.log`). The conditions listed in the table below will trigger an `INFO` level notification: @@ -79,16 +76,16 @@ The conditions listed in the table below will trigger a *WARNING* level notifica | Agent exited on node_address for cluster cluster_name | The agent has exited. This is generated by an agent in the Idle state. | | Agent exited for cluster cluster_name | The agent has exited. This notification is usually generated during startup when an agent exits before startup has completed. | | Virtual IP address assigned to non-primary node | The virtual IP address appears to be assigned to a non-primary node. To avoid any conflicts, Failover Manager will release the VIP. You should confirm that the VIP is assigned to your primary node and manually reassign the address if it is not. | -| Virtual IP address not assigned to primary node. | The virtual IP address appears to not be assigned to a primary node. EDB Postgres Failover Manager will attempt to reacquire the VIP. | +| Virtual IP address not assigned to primary node | The virtual IP address appears to not be assigned to a primary node. EDB Postgres Failover Manager will attempt to reacquire the VIP. | | No standby agent in cluster for cluster cluster_name | The standbys on cluster_name have left the cluster. | | Standby agent failed for cluster cluster_name | A standby agent on cluster_name has left the cluster, but the coordinator has detected that the standby database is still running. | | Standby database failed for cluster cluster_name | A standby agent has signaled that its database has failed. The other nodes also cannot reach the standby database. | | Standby agent cannot reach database for cluster cluster_name | A standby agent has signaled database failure, but the other nodes have detected that the standby database is still running. | | Cluster cluster_name has dropped below three nodes | At least three nodes are required for full failover protection. Please add witness or agent node to the cluster. | | Subset of cluster cluster_name disconnected from primary | This node is no longer connected to the majority of the cluster cluster_name. Because this node is part of a subset of the cluster, failover will not be attempted. Current nodes that are visible are: node_address | -| Promotion has started on cluster cluster_name. | The promotion of a standby has started on cluster cluster_name. | +| Promotion has started on cluster cluster_name | The promotion of a standby has started on cluster cluster_name. | | Witness failure for cluster cluster_name | Witness running at node_address has left the cluster. | -| Idle agent failure for cluster cluster_name. | Idle agent running at node_address has left the cluster. | +| Idle agent failure for cluster cluster_name | Idle agent running at node_address has left the cluster. | | One or more nodes isolated from network for cluster cluster_name | This node appears to be isolated from the network. Other members seen in the cluster are: node_name | | Node no longer isolated from network for cluster cluster_name. | This node is no longer isolated from the network. | | Standby agent tried to promote, but primary DB is still running | The standby EFM agent tried to promote itself, but detected that the primary DB is still running on node_address. This usually indicates that the primary EFM agent has exited. Failover has NOT occurred. | @@ -96,62 +93,62 @@ The conditions listed in the table below will trigger a *WARNING* level notifica | Standby agent tried to promote, but could not verify primary DB | The standby EFM agent tried to promote itself, but could not detect whether or not the primary DB is still running on node_address. Failover has NOT occurred. | | Standby agent tried to promote, but VIP appears to still be assigned | The standby EFM agent tried to promote itself, but could not because the virtual IP address (VIP_address) appears to still be assigned to another node. Promoting under these circumstances could cause data corruption. Failover has NOT occurred. | | Standby agent tried to promote, but appears to be orphaned | The standby EFM agent tried to promote itself, but could not because the well-known server (server_address) could not be reached. This usually indicates a network issue that has separated the standby agent from the other agents. Failover has NOT occurred. | -| Potential manual failover required on cluster cluster_name. | A potential failover situation was detected for cluster cluster_name. Automatic failover has been disabled for this cluster, so manual intervention is required. | +| Failover has not occurred | An agent has detected that the master database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | +| Potential manual failover required on cluster cluster_name | A potential failover situation was detected for cluster cluster_name. Automatic failover has been disabled for this cluster, so manual intervention is required. | | Failover has completed on cluster cluster_name | Failover has completed on cluster cluster_name. | | Lock file for cluster cluster_name has been removed | The lock file for cluster cluster_name has been removed from: path_name on node node_address. This lock prevents multiple agents from monitoring the same cluster on the same node. Please restore this file to prevent accidentally starting another agent for cluster. | | A recovery file for cluster cluster_name has been found on primary node | A recovery file for cluster cluster_name has been found at: path_name on primary node node_address. This may be problematic should you attempt to restart the DB on this node. | | recovery_target_timeline is not set to latest in recovery settings | The recovery_target_timeline parameter is not set to latest in the recovery settings. The standby server will not be able to follow a timeline change that occurs when a new primary is promoted. | | Promotion has not occurred for cluster cluster_name | A promotion was attempted but there is already a node being promoted: ip_address. | | Standby not reconfigured after failover in cluster cluster_name | The auto.reconfigure property has been set to false for this node. The node has not been reconfigured to follow the new primary node after a failover. | -| Could not resume replay for standby standby_id. | Could not resume replay for standby. Manual intervention may be required. Error: error_message. | +| Could not resume replay for cluster cluster_name | Could not resume replay for standby being promoted. Manual intervention may be required. Error: error_decription This error is returned if the server encounters an error when invoking replay during the promotion of a standby. | +| Could not resume replay for standby standby_id | Could not resume replay for standby. Manual intervention may be required. Error: error_message. | | Possible problem with database timeout values | Your remote.timeout value (value) is higher than your local.timeout value (value). If the local database takes too long to respond, the local agent could assume that the database has failed though other agents can connect. While this will not cause a failover, it could force the local agent to stop monitoring, leaving you without failover protection. | | No standbys available for promotion in cluster cluster_name | The current number of standby nodes in the cluster has dropped to the minimum number: number. There cannot be a failover unless another standby node(s) is added or made promotable. | | No promotable standby for cluster cluster_name | The current failover priority list in the cluster is empty. You have removed the only promotable standby for the cluster cluster_name. There cannot be a failover unless another promotable standby node(s) is added or made promotable by adding to failover priority list. | -| Synchronous replication has been reconfigured for cluster cluster_name | The number of synchronous standby nodes in the cluster has dropped below number. The synchronous standby names on primary has been reconfigured to: new synchronous_standby_names value. | -| Synchronous replication has been disabled for cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped below count. The primary has been taken out of synchronous replication mode. | +| Synchronous replication has been disabled for cluster cluster_name | The number of synchronous standby nodes in the cluster has dropped below count. The primary has been taken out of synchronous replication mode. | | Could not reload database configuration. | Could not reload database configuration. Manual intervention is required. Error: error_message. | | Custom monitor timeout for cluster cluster_name | The following custom monitoring script has timed out: script_name | | Custom monitor 'safe mode' failure for cluster cluster_name | The following custom monitor script has failed, but is being run in "safe mode": script_name. Output: script_results | -| primary.shutdown.as.failure set to true for primary node | The primary.shutdown.as.failure property has been set to true for this cluster. Stopping the primary agent without stopping the entire cluster will be treated by the rest of the cluster as an immediate primary agent failure. If maintenance is required on the primary database, shut down the primary agent and wait for a notification from the remaining nodes that failover will not happen. | -| Primary cannot ping local database for cluster cluster_name | The Primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will become IDLE and attempt to resume monitoring the database. | The conditions listed in the table below will trigger a *SEVERE* notification: -| Subject | Description | -| -------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Standby database restarted but EFM cannot connect | The start or restart command for the database ran successfully but the database is not accepting connections. EFM will keep trying to connect for up to restart.connection.timeout seconds. | -| Unable to connect to DB on node_address | The maximum connections limit has been reached. | -| Unable to connect to DB on node_address | Invalid password for db.user=user_name. | -| Unable to connect to DB on node_address | Invalid authorization specification. | -| Primary cannot resume monitoring local database for cluster cluster_name | The primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will not release the VIP and/or create a recovery.conf file. The primary agent will remain IDLE until the resume command is run to resume monitoring the database. | -| Fencing script error | Fencing script script_name failed to execute successfully. Exit Value: exit_code Results: script_results Failover has NOT occurred. | -| Post-promotion script failed | Post-promotion script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Remote post-promotion script failed | Remote post-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | -| Remote pre-promotion script failed | Remote pre-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | -| Post-database failure script error | Post-database failure script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Agent resumed script error | Agent resumed script script_name failed to execute successfully. Results: script_results | -| Primary isolation script failed | Primary isolation script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Could not promote standby | The promote command failed on node. Could not promote standby. Error details: error_details | -| Error creating recovery.conf file on node_address for cluster cluster_name | There was an error creating the recovery.conf file on primary node node_address during promotion. Promotion has continued, but requires manual intervention to ensure that the old primary node can not be restarted. Error details: message_details | -| An unexpected error has occurred for cluster cluster_name | An unexpected error has occurred on this node. Please check the agent log for more information. Error: error_details | -| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. The cluster is telling the primary agent at ip_address to fence off the primary database to prevent two primarys when the rest of the failover manager cluster promotes a standby. | -| Isolated primary database shutdown. | The isolated primary database has been shutdown by failover manager. | -| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. Before the primary could finish detecting isolation, a standby was promoted and has rejoined this node in the cluster. This node is isolating itself to avoid more than one primary database. | -| Could not assign VIP to node node_address | Failover manager could not assign the VIP address for some reason. | -| primary_or_standby database failure for cluster cluster_name | The database has failed on the specified node. | -| Agent is timing out for cluster cluster_name | This agent has timed out trying to reach the local database. After the timeout, the agent could successfully ping the database and has resumed monitoring. However, the node should be checked to make sure it is performing normally to prevent a possible database or agent failure. | -| Resume timed out for cluster cluster_name | This agent could not resume monitoring after reconfiguring and restarting the local database. See agent log for details. | -| Internal state mismatch for cluster cluster_name | The failover manager cluster's internal state did not match the actual state of the cluster members. This is rare and can be caused by a timing issue of nodes joining the cluster and/or changing their state. The problem should be resolved, but you should check the cluster status as well to verify. Details of the mismatch can be found in the agent log file. | -| Failover has not occurred | An agent has detected that the primary database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | -| Database in wrong state on node_address | The standby agent has detected that the local database is no longer in recovery. The agent will now become idle. Manual intervention is required. | -| Database in wrong state on node_address | The primary agent has detected that the local database is in recovery. The agent will now become idle. Manual intervention is required. | -| Database connection failure for cluster cluster_name | This node is unable to connect to the database running on: node_addressUntil this is fixed, failover may not work properly because this node will not be able to check if the database is running or not. | -| Standby custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a standby node. The agent will stop monitoring the local database. Script location: script_name Script output: script_results | -| Primary custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a primary node. EFM will attempt to promote a standby. Script location: script_name Script output: script_results | -| Loopback address set for ping.server.ip | Loopback address is set for ping.server.ip property. This setting can interfere with the network isolation detection and hence it should be changed. | -| Load balancer attach script error | Load balancer attach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Load balancer detach script error | Load balancer detach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Not enough synchronous standbys available in cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped to count. All write queries on the primary will be blocked until enough synchronous standby nodes are added. | +| Subject | Description | +| -------------------------------------------------------------------------- | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Standby database restarted but EFM cannot connect | The start or restart command for the database ran successfully but the database is not accepting connections. EFM will keep trying to connect for up to restart.connection.timeout seconds. | +| Unable to connect to DB on node_address | The maximum connections limit has been reached. | +| Unable to connect to DB on node_address | Invalid password for db.user=user_name. | +| Unable to connect to DB on node_address | Invalid authorization specification. | +| Master cannot ping local database for cluster cluster_name | The primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will not release the VIP and/or create a recovery.conf file. The primary agent will remain IDLE until the resume command is run to resume monitoring the database. | +| Fencing script error | Fencing script script_name failed to execute successfully. Exit Value: exit_code Results: script_results Failover has NOT occurred. | +| Post-promotion script failed | Post-promotion script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Remote post-promotion script failed | Remote post-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | +| Remote pre-promotion script failed | Remote pre-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | +| Post-database failure script error | Post-database failure script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Agent resumed script error | Agent resumed script script_name failed to execute successfully. Results: script_results | +| Primary isolation script failed | Primary isolation script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Could not promote standby | The promote command failed on node. Could not promote standby. Error details: error_details | +| Error creating recovery.conf file on node_address for cluster cluster_name | There was an error creating the recovery.conf file on primary node node_address during promotion. Promotion has continued, but requires manual intervention to ensure that the old primary node can not be restarted. Error details: message_details | +| An unexpected error has occurred for cluster cluster_name | An unexpected error has occurred on this node. Please check the agent log for more information. Error: error_details | +| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. The cluster is telling the primary agent at ip_address to fence off the primary database to prevent two primarys when the rest of the failover manager cluster promotes a standby. | +| Isolated primary database shutdown. | The isolated primary database has been shutdown by failover manager. | +| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. Before the primary could finish detecting isolation, a standby was promoted and has rejoined this node in the cluster. This node is isolating itself to avoid more than one primary database. | +| Could not assign VIP to node node_address | Failover manager could not assign the VIP address for some reason. | +| primary_or_standby database failure for cluster cluster_name | The database has failed on the specified node. | +| Agent is timing out for cluster cluster_name | This agent has timed out trying to reach the local database. After the timeout, the agent could successfully ping the database and has resumed monitoring. However, the node should be checked to make sure it is performing normally to prevent a possible database or agent failure. | +| Resume timed out for cluster cluster_name | This agent could not resume monitoring after reconfiguring and restarting the local database. See agent log for details. | +| Internal state mismatch for cluster cluster_name | The failover manager cluster's internal state did not match the actual state of the cluster members. This is rare and can be caused by a timing issue of nodes joining the cluster and/or changing their state. The problem should be resolved, but you should check the cluster status as well to verify. Details of the mismatch can be found in the agent log file. | +| Failover has not occurred | An agent has detected that the primary database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | +| Database in wrong state on node_address | The standby agent has detected that the local database is no longer in recovery. The agent will now become idle. Manual intervention is required. | +| Database in wrong state on node_address | The primary agent has detected that the local database is in recovery. The agent will now become idle. Manual intervention is required. | +| Database connection failure for cluster cluster_name | This node is unable to connect to the database running on: node_addressUntil this is fixed, failover may not work properly because this node will not be able to check if the database is running or not. | +| Standby custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a standby node. The agent will stop monitoring the local database. Script location: script_name Script output: script_results | +| master.shutdown.as.failure set to true for master node | The master.shutdown.as.failure property has been set to true for this cluster. Stopping the primary agent without stopping the entire cluster will be treated by the rest of the cluster as an immediate primary agent failure. If maintenance is required on the primary database, shut down the primary agent and wait for a notification from the remaining nodes that failover will not happen.| +| Primary custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a primary node. EFM will attempt to promote a standby. Script location: script_name Script output: script_results | +| Loopback address set for ping.server.ip | Loopback address is set for ping.server.ip property. This setting can interfere with the network isolation detection and hence it should be changed. | +| Load balancer attach script error | Load balancer attach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Load balancer detach script error | Load balancer detach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | +| Not enough synchronous standbys available in cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped to count. All write queries on the primary will be blocked until enough synchronous standby nodes are added. | diff --git a/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx index f225d41f915..245654a3481 100644 --- a/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx +++ b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx @@ -1,9 +1,6 @@ --- title: "Supported Failover and Failure Scenarios" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/supported_scenarios.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx b/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx index 15455f32cd9..2aa9168c5ce 100644 --- a/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx +++ b/product_docs/docs/efm/3.10/efm_user/12_upgrading_existing_cluster.mdx @@ -1,58 +1,45 @@ --- title: "Upgrading an Existing Cluster" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/upgrading_existing_cluster.html" --- Failover Manager provides a utility to assist you when upgrading a Failover Manager cluster. To upgrade an existing cluster, you must: -1. Install Failover Manager 4.0 on each node of the cluster. For detailed information about installing Failover Manager, see [Installing Failover Manager](03_installing_efm/#installing_efm). -2. After installing Failover Manager, invoke the `efm upgrade-conf` utility to create the `.properties` and `.nodes` files for Failover Manager 4.0. The Failover Manager installer installs the upgrade utility ([efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf)) to the `/usr/edb/efm-4.0/bin directory`. To invoke the utility, assume root privileges, and invoke the command: +1. Install Failover Manager 3.10 on each node of the cluster. For detailed information about installing Failover Manager, see [Installing Failover Manager](03_installing_efm/#installing_efm). +2. After installing Failover Manager, invoke the `efm upgrade-conf` utility to create the `.properties` and `.nodes` files for Failover Manager 3.10. The Failover Manager installer installs the upgrade utility ([efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf)) to the `/usr/edb/efm-3.10/bin directory`. To invoke the utility, assume root privileges, and invoke the command: ```text efm upgrade-conf ``` -The efm `upgrade-conf` utility locates the `.properties` and `.nodes` files of pre-existing clusters and copies the parameter values to a new configuration file for use by Failover Manager. The utility saves the updated copy of the configuration files in the `/etc/edb/efm-4.0` directory. +The efm `upgrade-conf` utility locates the `.properties` and `.nodes` files of pre-existing clusters and copies the parameter values to a new configuration file for use by Failover Manager. The utility saves the updated copy of the configuration files in the `/etc/edb/efm-3.10` directory. -1. Modify the `.properties` and `.nodes` files for EFM 4.0, specifying any new preferences. Use your choice of editor to modify any additional properties in the properties file (located in the `/etc/edb/efm-4.0` directory) before starting the service for that node. For detailed information about property settings, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). +1. Modify the `.properties` and `.nodes` files for EFM 3.10, specifying any new preferences. Use your choice of editor to modify any additional properties in the properties file (located in the `/etc/edb/efm-3.10` directory) before starting the service for that node. For detailed information about property settings, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). !!! Note `db.bin` is a required property. When modifying the properties file, ensure that the `db.bin` property specifies the location of the Postgres `bin` directory. -1. Use a version-specific command to stop the old Failover Manager cluster; for example, you can use the following command to stop a version 4.0 cluster: +1. Use a version-specific command to stop the old Failover Manager cluster; for example, you can use the following command to stop a version 3.10 cluster: ```text -/usr/efm-4.0/bin/efm stop-cluster efm +/usr/efm-3.10/bin/efm stop-cluster efm ``` -1. Start the new [Failover manager service](08_controlling_efm_service/#controlling_efm_service) (`edb-efm-4.0`) on each node of the cluster. +1. Start the new [Failover manager service](08_controlling_efm_service/#controlling_efm_service) (`edb-efm-3.10`) on each node of the cluster. The following example demonstrates invoking the upgrade utility to create the `.properties` and `.nodes` files for a Failover Manager installation: ```text -[root@localhost efm-4.0]# /usr/edb/efm-4.0/bin/efm upgrade-conf efm -Checking directory /etc/edb/efm-3.10 +# /usr/edb/efm-3.10/bin/efm upgrade-conf efm +Checking directory /etc/edb/efm-3.9 Processing efm.properties file -The following properties were added in addition to those in previous ˓→installed version: -notification.text.prefix -encrypt.agent.messages -standby.restart.delay -The following properties were renamed from those in previous installed ˓→version: -stop.failed.master => stop.failed.primary -master.shutdown.as.failure => primary.shutdown.as.failure -script.master.isolated => script.primary.isolated -stop.isolated.master => stop.isolated.primary -reconfigure.sync.master => reconfigure.sync.primary -Checking directory /etc/edb/efm-3.10 +The following properties were added in addition to those in previous installed version: + external.address + update.physical.slots.period +Checking directory /etc/edb/efm-3.9 Processing efm.nodes file -db.password.encrypted re-encoded with stronger encryption. -Upgrade of files is finished. The owner and group for properties and ˓→nodes files have been set as 'efm'. -[root@localhost efm-4.0]# +Upgrade of files is finished. The owner and group for properties and nodes files have been set as 'efm'. ``` If you are [using a Failover Manager configuration without sudo](04_configuring_efm/04_extending_efm_permissions/#running_efm_without_sudo), include the `-source` flag and specify the name of the directory in which the configuration files reside when invoking `upgrade-conf`. If the directory is not the configuration default directory, the upgraded files will be created in the directory from which the `upgrade-conf` command was invoked. @@ -61,30 +48,30 @@ If you are [using a Failover Manager configuration without sudo](04_configuring_ ## Un-installing Failover Manager -After upgrading to Failover Manager 4.0, you can use your native package manager to remove previous installations of Failover Manager. For example, use the following command to remove Failover Manager 3.10 and any unneeded dependencies: +After upgrading to Failover Manager 3.10, you can use your native package manager to remove previous installations of Failover Manager. For example, use the following command to remove Failover Manager 3.9 and any unneeded dependencies: - On RHEL or CentOS 7.x: ```text -yum remove edb-efm310 +yum remove edb-efm39 ``` - On RHEL or CentOS 8.x: ```text -dnf remove edb-efm310 +dnf remove edb-efm39 ``` - On Debian or Ubuntu: ```text -apt-get remove edb-efm310 +apt-get remove edb-efm39 ``` - On SLES: ```text -zypper remove edb-efm310 +zypper remove edb-efm39 ``` ## Performing a Database Update (Minor Version) diff --git a/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx b/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx index f2c421c3ce1..c5ad78ecca1 100644 --- a/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx +++ b/product_docs/docs/efm/3.10/efm_user/13_troubleshooting.mdx @@ -1,9 +1,6 @@ --- title: "Troubleshooting" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/troubleshooting.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx b/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx index 9b19511aba2..a7c05767a4e 100644 --- a/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx +++ b/product_docs/docs/efm/3.10/efm_user/14_configuring_streaming_replication.mdx @@ -1,9 +1,6 @@ --- title: "Configuring Streaming Replication" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_streaming_replication.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx b/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx index 61e1a171302..86426644194 100644 --- a/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx +++ b/product_docs/docs/efm/3.10/efm_user/15_configuring_ssl_authentication.mdx @@ -1,9 +1,6 @@ --- title: "Configuring SSL Authentication on a Failover Manager Cluster" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/configuring_ssl_authentication.html" --- diff --git a/product_docs/docs/efm/3.10/efm_user/index.mdx b/product_docs/docs/efm/3.10/efm_user/index.mdx index ec9c73be97d..aca7638cdf0 100644 --- a/product_docs/docs/efm/3.10/efm_user/index.mdx +++ b/product_docs/docs/efm/3.10/efm_user/index.mdx @@ -1,12 +1,6 @@ --- title: "EDB Failover Manager" -#legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/whats_new.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/conclusion.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/index.html" --- **EDB Failover Manager** From b3ac7cac1298dc5ddff71c678ff8dd5af1dfbd18 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 4 Jun 2021 17:59:16 +0530 Subject: [PATCH 11/50] Updated version Former-commit-id: f3c32d2e0b484312ecb30014d2b7e04bc572d08d --- product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx index c7deef128fb..bed0ee3df60 100644 --- a/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx +++ b/product_docs/docs/efm/3.10/efm_user/09_controlling_logging.mdx @@ -62,7 +62,7 @@ After modifying the syslog configuration file, restart the `rsyslog` service to > `systemctl restart rsyslog.service` -After modifying the `rsyslog.conf` file on the Failover Manager host, you must modify the Failover Manager properties to enable logging. Use your choice of editor to [modify the properties file](04_configuring_efm/01_cluster_properties/#logtype_enabled) (`/etc/edb/efm-4.1/efm.properties.in`) specifying the type of logging that you wish to implement: +After modifying the `rsyslog.conf` file on the Failover Manager host, you must modify the Failover Manager properties to enable logging. Use your choice of editor to [modify the properties file](04_configuring_efm/01_cluster_properties/#logtype_enabled) (`/etc/edb/efm-3.10/efm.properties.in`) specifying the type of logging that you wish to implement: ```text # Which logging is enabled. From 24f2bac87c51f2093e0af58358ddcd24cb90be33 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 4 Jun 2021 18:29:19 +0530 Subject: [PATCH 12/50] Minor fixes Former-commit-id: 1f2fda083e48b7fc1c09a26e8e9654192ad56cce --- product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx | 11 +++++++---- .../01_cluster_properties/index.mdx | 8 ++++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx index 53bebcc410f..8afe8b06643 100644 --- a/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx +++ b/product_docs/docs/efm/3.10/efm_user/01_whats_new.mdx @@ -7,7 +7,10 @@ title: "What’s New" The following changes have been made to EDB Postgres Failover Manager to create version 3.10: -• Support for physical replication slot feature of Postgres -• Support for NAT addresses -• Introduction of a new command to check the status of a local node -• Replace `trigger_file` with `pg_ctl` utility for standby promotion +- Support for physical replication slot feature of Postgres + +- Support for NAT addresses + +- Introduction of a new command to check the status of a local node + +- Replace `trigger_file` with `pg_ctl` utility for standby promotion diff --git a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx index 6d409b62c0f..92dc8719c41 100644 --- a/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx +++ b/product_docs/docs/efm/3.10/efm_user/04_configuring_efm/01_cluster_properties/index.mdx @@ -705,6 +705,11 @@ The database parameter `synchronous_standby_names` on the primary node specifies # To raise num_sync, see the reconfigure.num.sync.max property below. reconfigure.num.sync=false ``` +!!! Note + + If you are using the `reconfigure.num.sync` property, ensure that the `wal_sender_timeout` in the primary database is set to at least ten seconds less than the `efm.node.timeout` value. + + @@ -725,6 +730,9 @@ Set the `reconfigure.sync.primary` property to `true` to take the primary databa # YOU ARE SURE THIS IS OK. reconfigure.sync.primary=false ``` +!!! Note + + If you are using the `reconfigure.sync.primary` property, ensure that the `wal_sender_timeout` in the primary database is set to at least ten seconds less than the `efm.node.timeout` value. From 02f12dc5b9162700f354b136453934c7ec0a1f06 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 4 Jun 2021 18:34:28 +0530 Subject: [PATCH 13/50] Added note Former-commit-id: 1eb801b075553e6b2dfc432cd9e9cdb6c309ac1d --- .../docs/efm/3.10/efm_user/11_supported_scenarios.mdx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx index 245654a3481..61f2854729a 100644 --- a/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx +++ b/product_docs/docs/efm/3.10/efm_user/11_supported_scenarios.mdx @@ -44,6 +44,12 @@ After returning the node to the cluster as a Standby, you can easily return the 1. If the cluster has more than one Standby node, use the `efm set-priority` command to set the node's failover priority to 1. 2. Invoke the [efm promote -switchover](07_using_efm_utility/#efm_promote) command to promote the node to its original role of Primary node. +!!! Note + + Failover Manager does not rebuild a failed primary database to become a standby. Before rebuilding, it is important to determine why the primary failed, and ensure that all the data is available on the new primary. + Once the server is ready to be reinstated as a standby, the old data directory can be removed and the server can be reinstated. For more information, refer to the PostgreSQL documentation on [Setting up a standby server](https://www.postgresql.org/docs/current/warm-standby.html#STANDBY-SERVER-SETUP). In some cases, you can also reinstate the server using [pg_rewind](https://www.postgresql.org/docs/current/app-pgrewind.html). + + ## Standby Database is Down From f2d397fc69d317525f3847037671c0bcaa7677ae Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Mon, 7 Jun 2021 16:20:06 +0530 Subject: [PATCH 14/50] Pgpool_4.2.3_June7 Former-commit-id: 6200094f4fd4540bfdda0154c2a9bcc69ce75f85 --- .../docs/pgpool/1.0/02_extensions.mdx | 56 +++--- .../1.0/03_configuring_connection_pooling.mdx | 190 ++++++++++++------ .../04_connecting_a_client_to_pgpool-II.mdx | 4 +- 3 files changed, 153 insertions(+), 97 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/02_extensions.mdx b/product_docs/docs/pgpool/1.0/02_extensions.mdx index 71249d894d9..6af24286b2f 100644 --- a/product_docs/docs/pgpool/1.0/02_extensions.mdx +++ b/product_docs/docs/pgpool/1.0/02_extensions.mdx @@ -13,10 +13,7 @@ Modules in the extensions directory are additional features to EDB Postgres Adva !!! Note Pgpool-II extensions are only delivered for supported combinations of EDB Postgres Advanced Server versions and operating systems. -Before installing Pgpool-II extensions, install the EDB Postgres Advanced Server on your host system. You can install the following Pgpool-specific extensions using the following command: - -- [Pgpool_adm](#pgpool_adm) -- [Pgpool_recovery](#pgpool_recovery) +Before installing Pgpool-II extensions, install the EDB Postgres Advanced Server on your host system. ## Installing Pgpool-II Extensions @@ -26,7 +23,7 @@ The following section walks you through the steps of installing Pgpool-II extens Assume superuser privileges and perform the following steps to install Pgpool-II extensions on a CentOS host: -1. To install the repository configuration, assume superuser privileges and invoke one of the following platform-specific commands: +1. To install the repository configuration, assume superuser privileges and invoke the platform-specific command: On CentOS 7: @@ -40,7 +37,7 @@ Assume superuser privileges and perform the following steps to install Pgpool-II dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm ``` -2. Replace the `USERNAME:PASSWORD` in the following command with the username and password of a registered EDB user: +2. Replace the `USERNAME:PASSWORD` variable with the username and password of a registered EDB user: ```text sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo @@ -96,7 +93,7 @@ Before installing the repository configuration, you must have credentials that a Perform the following steps to install Pgpool-II extensions on an RHEL host: -1. To install the repository configuration, assume superuser privileges and invoke one of the following platform-specific commands: +1. To install the repository configuration, assume superuser privileges and invoke the platform-specific command: On RHEL 7: @@ -110,7 +107,7 @@ Perform the following steps to install Pgpool-II extensions on an RHEL host: dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm ``` -2. Replace the `USERNAME:PASSWORD` in the following command with the username and password of a registered EDB user: +2. Replace the `USERNAME:PASSWORD` variable in the following command with the username and password of a registered EDB user: ```text sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo @@ -200,7 +197,7 @@ Perform the following steps to install Pgpool-II extensions on an RHEL/CentOS 7 yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm ``` -3. Replace the `USERNAME:PASSWORD` in the following command with the username and password of a registered EDB user: +3. Replace the `USERNAME:PASSWORD` variable in the following command with the username and password of a registered EDB user: ```text sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo @@ -232,10 +229,7 @@ Perform the following steps to install Pgpool-II extensions on an RHEL/CentOS 7 To install Pgpool-II extensions on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/user). -The following steps walk you through using the EDB apt repository to install a DEB package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. - -!!! Note - If you are using the pdf version of this document, using the cut/paste command may result in extra spaces or carriage returns in the pasted command. If a command fails, check the command carefully for additional characters. +The following steps walk you through using the EDB apt repository to install a DEB package. 1. Assume superuser privileges: @@ -243,7 +237,7 @@ The following steps walk you through using the EDB apt repository to install a D sudo su – ``` -2. Configure the EDB repository: +2. Configure the EDB repository. Substitute your EDB credentials for the `username` and `password` placeholders in the following command: On Debian 9 and Ubuntu 18: @@ -289,7 +283,7 @@ The following steps walk you through using the EDB apt repository to install a D apt-get install -y edb-as-pgpool-extensions ``` -In the above command, `` is the EDB Postgres Advanced Server version, and `` is the Pgpool-II extension version you want to install. + In the above command, `` is the EDB Postgres Advanced Server version, and `` is the Pgpool-II extension version you want to install. @@ -363,56 +357,60 @@ You can use the Zypper package manager to install the Pgpool-II extension on a S Perform the following steps to install Pgpool-II extensions: -1. Open StackBuilder Plus and select your EDB Postgres Advanced Server installation from the drop-down list on the `Welcome` window. Click `Next` to continue to the application selection page. Expand the `Add-ons, tools and utilities` node, and check the box next to the Pgpool-II extension to select and download the installer. Click `Next` to continue. Provide the credentials and click `Next`. +1. Open StackBuilder Plus and select your EDB Postgres Advanced Server installation from the drop-down list on the `Welcome` window. Click `Next` to continue to the application selection page. -2. The selected packages and the default download directory where the package will be installed are displayed; change the download directory location if required. Click `Next`. +2. Expand the `Add-ons, tools and utilities` node, and check the box next to the Pgpool-II extension to select and download the installer. Click `Next` to continue. -3. Once you have downloaded the installation files, a confirmation message is displayed. Click `Next` to start the installation. +3. Provide the credentials and click `Next`. -4. Select an installation language and click `OK`. +4. The selected packages and the default download directory where the package will be installed are displayed; change the download directory location if required. Click `Next`. -5. The Pgpool-II extensions installer welcomes you to the setup wizard. +5. Once you have downloaded the installation files, a confirmation message is displayed. Click `Next` to start the installation. + +6. Select an installation language and click `OK`. + +7. The Pgpool-II extensions installer welcomes you to the setup wizard. ![The Pgpool-II Extensions Welcome window](images/pg1.png) Fig. 1: The Pgpool-II Extensions Welcome window -6. Use the `Installation Directory` field to specify the directory in which you wish to install the Pgpool-II extensions software (the default installation directory is `/opt/edb/as`) Then, click `Next` to continue. +8. Use the `Installation Directory` field to specify the directory in which you wish to install the Pgpool-II extensions software (the default installation directory is `/opt/edb/as`) Then, click `Next` to continue. ![The Pgpool-II Extensions Installation Details Window](images/pg2.png) Fig. 2: The Pgpool-II Extensions Installation Details Window -7. The `Ready to Install` window notifies you when the installer has all of the information needed to install Pgpool-II extensions on your system. Click `Next` to install Pgpool-II extensions. +9. The `Ready to Install` window notifies you when the installer has all of the information needed to install Pgpool-II extensions on your system. Click `Next` to install Pgpool-II extensions. ![The Ready to Install window](images/pg3.png) Fig. 3: The Ready to Install window -8. Progress bars inform you as the installation progresses. +10. Progress bars inform you as the installation progresses. ![The installation progresses](images/pg4.png) -Fig. 4: The installation progresses + Fig. 4: The installation progresses -9. The installer notifies you when the setup wizard has completed the Pgpool-II installation. Click `Finish` to exit the installer. +11. The installer notifies you when the setup wizard has completed the Pgpool-II installation. Click `Finish` to exit the installer. ![The installation is complete](images/pg5.png) -Fig. 5: The installation is complete + Fig. 5: The installation is complete -10. The extensions will be available in the `/opt/edb/as/share/extension/pgpool*` directory. +12. The extensions will be available in the `/opt/edb/as/share/extension/pgpool*` directory. ## Creating Pgpool-II Extensions -You must install and create the extensions in each database in which you will be using Pgpool-II functionality. To ensure all extensions are available for future databases, you can add the extension to the `template1` database; any extensions installed in the `template1` database will be created in each of the databases that uses `template1` as a template during creation. +You must install and create the extensions in each database where you will be using Pgpool-II functionality. To ensure all extensions are available for future databases, you can add the extension to the `template1` database; any extensions installed in the `template1` database will be created in each of the databases that uses `template1` as a template during creation. **Pgpool_adm Extension** `Pgpool_adm` is a set of extensions that allows SQL access to PCP commands. To view information about PCP commands, see . - + After installing the `Pgpool_adm` extension, use the psql client application to connect to the database, and execute the following SQL command: ```text diff --git a/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx b/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx index b5e03731518..114c4696827 100644 --- a/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx +++ b/product_docs/docs/pgpool/1.0/03_configuring_connection_pooling.mdx @@ -56,27 +56,31 @@ To configure connection pooling with one database server: The following example shows how to connect with the EDB Postgres Advanced Server: - `connection_cache = on` + ```text + connection_cache = on - `backend_hostname0 = 'localhost'` + backend_hostname0 = 'localhost' - `backend_port0 = 5444` + backend_port0 = 5444 - `backend_weight0 = 1` + backend_weight0 = 1 - `backend_data_directory0 = '/var/lib/edb/as13/data'` + backend_data_directory0 = '/var/lib/edb/as13/data' + ``` The following example shows how to connect with the PostgreSQL Server: - `connection_cache = on` + ```text + connection_cache = on - `backend_hostname0 = 'localhost'` + backend_hostname0 = 'localhost' - `backend_port0 = 5432` + backend_port0 = 5432 - `backend_weight0 = 1` + backend_weight0 = 1 - `backend_data_directory0 = '/var/lib/pgsql/13/data'` + backend_data_directory0 = '/var/lib/pgsql/13/data' + ``` !!! Note In the `pgpool.conf` file, connection parameters have an appended digit that specifies a cluster node identifier. Database node `0` specifies values for the primary node. @@ -85,7 +89,7 @@ To configure connection pooling with one database server: 4. Optionally, configure the [PCP administrative interface](#pcp_configuration). -5. Start Pgpool-II and begin using your application using the following command: +5. Start Pgpool-II: ```text systemctl start edb-pgpool-.service @@ -97,25 +101,33 @@ To configure connection pooling with one database server: On EDB Postgres Advanced Server for CentOS 7: - ```./psql -d edb -p 9999 -U enterprisedb -h /tmp``` + ```text + ./psql -d edb -p 9999 -U enterprisedb -h /tmp + ``` On EDB Postgres Advanced Server for Debian: - ```./psql -d edb -p 9999 -U enterprisedb``` + ```text + ./psql -d edb -p 9999 -U enterprisedb + ``` On PostgreSQL Server for CentOS 7: - ```./psql -d postgres -p 9999 -U postgres -h /tmp``` + ```text + ./psql -d postgres -p 9999 -U postgres -h /tmp + ``` On PostgreSQL Server for Debian: - ```./psql -d postgres -p 9999 -U postgres``` + ```text + ./psql -d postgres -p 9999 -U postgres + ``` ## Configuring Load Balancing -EDB supports replication scenarios that use Pgpool-II load balancing with PostgreSQL streaming replication or Slony replication. The supported replication methods ensure that database updates made by client applications apply to multiple backend servers. For detailed information about the benefits of each replication method and detailed configuration instructions, please review project documentation for each utility. +EDB supports replication scenarios that use Pgpool-II load balancing with PostgreSQL streaming replication or Slony replication. The supported replication methods ensure that database updates made by client applications apply to multiple backend servers. For detailed information about the benefits of each replication method and configuration instructions, please review [project documentation](https://www.pgpool.net/docs/latest/en/html/runtime-config-running-mode.html) for each utility. When load balancing is enabled, Pgpool-II distributes some types of `SELECT` statements to backend servers, allowing multiple database servers and hosts to share the processing load of `SELECT` statements issued by client applications. @@ -123,77 +135,106 @@ When configuring Pgpool-II load balancing, the initial database environments in - Tables must have the same name, definition, and row content. - Schemas must exist in each backend application database. -- Configure roles and privileges on each backend server to ensure the result set of SQL statements are identical on all servers. +- Roles and privileges on each backend server must be configured to ensure the result set of SQL statements are identical on all servers. If you use `password` authentication, assign the same password to an associated user name on each database server. Use the same user name/password pair to connect Pgpool-II to each backend connection. Within a replication scenario, each backend is uniquely identified by the hostname (or IP address) and the port number on which the database server instance is listening for connections. You must ensure that the `pool_hba.conf` and `pg_hba.conf` files allow a connection between that server and the host on which Pgpool-II will be running. -The following example demonstrates how to implement Pgpool-II load balancing with two servers (the primary and replica nodes) in a Streaming Replication scenario. Configuring Pgpool-II load balancing for a Slony replication scenario is similar; please see the Slony documentation for information about configuring Slony replication. +The following example demonstrates how to implement Pgpool-II load balancing with two servers (the primary and replica nodes) in a Streaming Replication scenario. Configuring Pgpool-II load balancing for a Slony replication scenario is similar; please see the [Slony documentation](https://www.slony.info/adminguide/2.2/doc/adminguide/slony.pdf) for information about configuring Slony replication. **Configuring the Primary Node of the Replication Scenario** Open an SSH session with the primary node of the replication scenario, and modify the `pg_hba.conf` file (located in the `/var/lib/edb/as13/data` directory), adding connection information for the replication user (in the example that follows, `edbrepuser` resides on a standby node with an IP address of `107.178.217.178`): - `host replication edbrepuser 107.178.217.178/32 md5` +```text + host replication edbrepuser 107.178.217.178/32 md5 +``` The connection information should specify the address of the replication scenario's standby node and your preferred authentication method. Modify the `postgresql.conf` file (located in `/var/lib/edb/as13/data`), adding the following replication parameter and values to the end of the file: - `wal_level = replica` - `max_wal_senders = 10` - `checkpoint_segments = 8` - `wal_keep_segments = 0` +```text + wal_level = replica + max_wal_senders = 10 + checkpoint_segments = 8 + wal_keep_segments = 0 +``` Save the configuration file, and restart the server: - `systemctl restart edb-as-13` + To restart on RHEL/CentOS 7 and 8 platforms: -!!! Note - The above command is applicable for RHEL/CentOS 7 and 8 platforms. + ```text + systemctl restart edb-as-13 + ``` + + To restart on Debian 9.x | 10.x or Ubuntu 18.04 | 20.04 platforms: + + ```text + /usr/edb/as13/bin/epas_ctlcluster 13 main restart + ``` Use the `sudo su -` command to assume the identity of the `enterprisedb` database superuser: - `sudo su - enterprisedb` +```text + sudo su - enterprisedb + ``` Then, start a `psql` session, connecting to the `edb` database: - `psql -d edb` + ```text + psql -d edb + ``` At the `psql` command line, create a user with the `replication` attribute: - `CREATE ROLE edbrepuser WITH REPLICATION LOGIN PASSWORD 'password';` +```text + CREATE ROLE edbrepuser WITH REPLICATION LOGIN PASSWORD 'password'; + ``` **Configuring the Standby Node of the Replication Scenario** Open an SSH session with the standby server, and assume the identity of the database superuser (`enterprisedb`): - `sudo su - enterprisedb` +```text +sudo su - enterprisedb +``` With your choice of editor, create a `.pgpass` file in the home directory of the `enterprisedb` user. The `.pgpass` file holds the password of the replication user in plain-text form; if you are using a `.pgpass` file, you should ensure that only trusted users have access to the `.pgpass` file: Add an entry that specifies connection information for the replication user: - `*:5444:*:edbrepuser:password` +```text +*:5444:*:edbrepuser:password +``` The server will enforce restrictive permissions on the `.pgpass` file; use the following command to set the file permissions: - `chmod 600 .pgpass` +```text +chmod 600 .pgpass +``` Relinquish the identity of the database superuser: - `exit` +```text +exit +``` Then, assume superuser privileges: - `sudo su -` +```text +sudo su - +``` Use your platform-specific command to stop the database server before replacing the data directory on the standby node with the `data` directory of the primary node. Then, delete the `data` directory on the standby node: - `rm -rf /var/lib/edb/as13/data` +```text +rm -rf /var/lib/edb/as13/data +``` After deleting the existing `data` directory, use the `pg_basebackup` utility to copy the `data` directory of the primary node to the standby: @@ -205,20 +246,22 @@ The call to `pg_basebackup` specifies the IP address of the primary node and the Including the `-R` option creates the `standby.signal` file and appends connection settings to `postgresql.auto.conf` in the output directory (or into the base archive file when using tar format) to ease setting up a standby server. -For more information about the options available with the `pg_basebackup` utility, see the PostgreSQL core documentation at: - - +For more information about the options available with the `pg_basebackup` utility, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/app-pgbasebackup.html). When prompted by `pg_basebackup`, provide the password associated with the replication user. After copying the `data` directory, change ownership of the directory to the database superuser (`enterprisedb`): - `chown -R enterprisedb /var/lib/edb/as13/data` +```text +chown -R enterprisedb /var/lib/edb/as13/data +``` Modify the `postgresql.conf` file (located in `/var/lib/edb/as13/data`), specifying the following values at the end of the file: - `wal_level = replica` - `hot_standby = on` +```text +wal_level = replica +hot_standby = on +``` The `data` file has been copied from the primary node, and contains the replication parameters specified previously. @@ -228,21 +271,27 @@ Then, restart the server. At this point, the primary node will be replicating da Use your choice of editor to modify the `pgpool.conf` file. Within the `pgpool.conf` file, modify the parameter settings to specify that load balancing is enabled: - `load_balance_mode = on` +```text +load_balance_mode = on +``` Then, specify the connections settings for the primary database node in the parameter set that ends with a `0`. For example: - `backend_hostname0 = '146.148.46.44'` - `backend_port0 = 5444` - `backend_weight0 = 1` - `backend_data_directory0 = '/var/lib/edb/as13/data'` +```text +backend_hostname0 = '146.148.46.44' +backend_port0 = 5444 +backend_weight0 = 1 +backend_data_directory0 = '/var/lib/edb/as13/data' +``` Then, specify the connections settings for each node to which queries will be distributed. Increment the number that follows the parameter name for each node, and provide connection details: - `backend_hostname1 = '107.178.217.178'` - `backend_port1 = 5444` - `backend_weight1 = 1` - `backend_data_directory1 = '/var/lib/edb/as13/data'` +```text +backend_hostname1 = '107.178.217.178' +backend_port1 = 5444 +backend_weight1 = 1 +backend_data_directory1 = '/var/lib/edb/as13/data' +``` Use the `backend_weight` parameter to specify how queries will be distributed amongst the nodes. Specify a value of `1` to indicate that you wish (qualified) queries to be equally distributed across the nodes of the replication scenario. @@ -258,9 +307,9 @@ where <x.y> is the Pgpool release version. ## Configuring Client Authentication -When Pgpool-II is enabled, client applications connect to Pgpool-II, which acts as a middleman for a Postgres server. A connecting client application is first authenticated with the Pgpool-II server and then authenticated with the Postgres server. +When Pgpool-II is enabled, client applications connect to Pgpool-II, which acts as a middleman for a Postgres server. A connecting client application is first authenticated with the Pgpool-II server and then with the Postgres server. -Parameter settings in the `pool_hba.conf` configuration file determine the Pgpool-II authentication properties. The `pool_hba.conf` file is similar in format and function to the Postgres `pg_hba.conf` configuration file. Please refer to the Pgpool-II documentation for detailed information about `pool_hba.conf` entries. +Parameter settings in the `pool_hba.conf` configuration file determine the Pgpool-II authentication properties. The `pool_hba.conf` file is similar in format and function to the Postgres `pg_hba.conf` configuration file. Please refer to the [Pgpool-II documentation](https://www.pgpool.net/docs/latest/en/html/auth-pool-hba-conf.html) for detailed information about `pool_hba.conf` entries. To enable Pgpool-II authentication: @@ -270,7 +319,7 @@ To enable Pgpool-II authentication: 4. Restart Pgpool-II to reload the Pgpool-II configuration files. !!! Note - When authenticating with the database server, use the user names and passwords specified in the `pool_hba.conf` file; you must also specify those user names and passwords in the database server's `pg_hba.conf` file. + When authenticating with the database server, use the user names and passwords specified in the `pool_hba.conf` file; you must also specify these user names and passwords in the database server's `pg_hba.conf` file. @@ -280,13 +329,15 @@ PCP is an administrative interface for Pgpool-II that allows you to retrieve inf `pcp.conf` is the password configuration file for the PCP client. Before using PCP commands, you must modify the `pcp.conf` file, providing the user names and passwords you provide when invoking a PCP command. The user names in the `pcp.conf` file are entirely independent of the database server user names and passwords. -Use the following steps to enable PCP: +Use the following steps to configure PCP: 1. Copy the `pcp.conf.sample` file to `pcp.conf`. 2. Add an entry to the `pcp.conf` file in the following form: - `username:md5_password` + ```text + username:md5_password + ``` where: @@ -296,23 +347,27 @@ Use the following steps to enable PCP: You can use the `pg_md5` program to generate the encrypted password from the clear-text form as shown below: - `$ pg_md5 mypassword` - - `34819d7beeabb9260a5c854bc85b3e44` + ```text + $ pg_md5 mypassword + + 34819d7beeabb9260a5c854bc85b3e44 + ``` For example, the entry in the `pcp.conf` file for a PCP user named `pcpuser` with the password of `mypassword` is: - `# USERID:MD5PASSWD` - `pcpuser:34819d7beeabb9260a5c854bc85b3e44` + ```text + # USERID:MD5PASSWD + pcpuser:34819d7beeabb9260a5c854bc85b3e44 + ``` 3. Restart the Pgpool service. 4. When issuing a PCP command, specify the PCP user name and the unencrypted form of the password: -```text -$ pcp_node_info 5 localhost 9898 pcpuser mypassword 0 -localhost 5444 1 1.000000 -``` + ```text + $ pcp_node_info 5 localhost 9898 pcpuser mypassword 0 + localhost 5444 1 1.000000 + ``` After configuring PCP, you can use the following PCP commands to control Pgpool-II and retrieve information. @@ -334,7 +389,6 @@ After configuring PCP, you can use the following PCP commands to control Pgpool- | `pcp_recovery_node` | Attaches the given backend node with recovery | !!! Note - `pcp_health_check_stats` and `pcp_reload_config` commands are available from Pgpool version 4.2 onwards. To view more information about PCP command options, visit the [Pgpool project site](https://www.pgpool.net/docs/latest/en/html/pcp-commands.html). @@ -352,7 +406,7 @@ the cloud architecture. **max_pool**: Generally, advised to set `max_pool` to 1. Alternatively, for applications with many reconnects, `max_pool` can be set to the number of distinct combinations of users, databases, and connection options for the application connections. All but one connection in the pool would be stale connections, which consume a connection slot from Postgres without adding to the performance. -It is, therefore, advised not to configure `max_pool` beyond four to preserve a healthy ratio +It is, therefore, advised not to configure `max_pool` beyond 4 to preserve a healthy ratio between active and stale connections. As an example, for an application that constantly reconnects and uses two distinct users, both connecting to their own database, set it to 2. If both users would be able to connect to both databases, set it to 4. Note that increasing `max_pool` requires @@ -397,7 +451,9 @@ After modifying the parameter settings that implement Pgpool-II functionality fo When Pgpool-II starts, it records its process ID in a `pgpool.conf` file whose name is determined by the `pid_file_name` configuration parameter. The initial value of the `pid_file_name` parameter in the sample file is: - `pid_file_name = /var/run/edb/pgpool/edb-pgpool-.pid` +```text +pid_file_name = /var/run/edb/pgpool/edb-pgpool-.pid +``` Where <x.y> is the Pgpool release version. diff --git a/product_docs/docs/pgpool/1.0/04_connecting_a_client_to_pgpool-II.mdx b/product_docs/docs/pgpool/1.0/04_connecting_a_client_to_pgpool-II.mdx index a4d9bcaa4fe..d288c747112 100644 --- a/product_docs/docs/pgpool/1.0/04_connecting_a_client_to_pgpool-II.mdx +++ b/product_docs/docs/pgpool/1.0/04_connecting_a_client_to_pgpool-II.mdx @@ -10,7 +10,9 @@ legacyRedirectsGenerated: Client applications should connect directly to the Pgpool-II listener port on the Pgpool-II host. For example, to connect to the `edb` database (while using Pgpool-II functionality), enter: - `psql -d edb -U enterprisedb -h localhost -p 9999` +```txt +psql -d edb -U enterprisedb -h localhost -p 9999 +``` When invoked at the `psql` prompt, the following `SHOW` command keywords display Pgpool-II information: From e7e294ba57201ccaba139958dcaf0949cd073c3a Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Wed, 9 Jun 2021 11:23:59 +0530 Subject: [PATCH 15/50] Minor_edits_June9 Former-commit-id: fb9095c5558be41388975129c68ebef6639e7daf --- .../1.0/01_installing_and_configuring_the_pgpool-II.mdx | 2 +- product_docs/docs/pgpool/1.0/02_extensions.mdx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx index 6eb35b5045b..0afec0f086b 100644 --- a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx +++ b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx @@ -9,7 +9,7 @@ legacyRedirectsGenerated: !!! Note - Pgpool-II runs as a service on Linux systems. Windows systems does not support Pgpool. + Pgpool-II runs as a service on Linux systems. Pgpool-II is not supported on Windows. The following table lists the Pgpool version and their corresponding EDB Postgres Advanced Server and PostgreSQL versions. diff --git a/product_docs/docs/pgpool/1.0/02_extensions.mdx b/product_docs/docs/pgpool/1.0/02_extensions.mdx index 6af24286b2f..a704e1f4422 100644 --- a/product_docs/docs/pgpool/1.0/02_extensions.mdx +++ b/product_docs/docs/pgpool/1.0/02_extensions.mdx @@ -351,9 +351,9 @@ You can use the Zypper package manager to install the Pgpool-II extension on a S -### Installing Pgpool-II Extension Using Linux Installer +### Installing Pgpool-II Extension Using the Linux Graphical Installer - Graphical installers for Pgpool-II extensions are available via StackBuilder Plus (for EDB Postgres Advanced Server hosts) or Stack Builder (on PostgreSQL hosts). You can access StackBuilder Plus through your Linux start menu. Windows systems does not support it. + Graphical installers for Pgpool-II extensions are available via StackBuilder Plus (for EDB Postgres Advanced Server hosts) or Stack Builder (on PostgreSQL hosts). You can access StackBuilder Plus through your Linux start menu. It is not supported on Windows. Perform the following steps to install Pgpool-II extensions: From 1316fa7e5803a96818567bf9eb873f6e862ae8ce Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Fri, 11 Jun 2021 14:34:25 +0530 Subject: [PATCH 16/50] June11 Former-commit-id: 577727ae97b72d481a72c8b67e9e87619d408e6f --- product_docs/docs/pgpool/1.0/index.mdx | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/index.mdx b/product_docs/docs/pgpool/1.0/index.mdx index e365dea4148..a820cf3d9c7 100644 --- a/product_docs/docs/pgpool/1.0/index.mdx +++ b/product_docs/docs/pgpool/1.0/index.mdx @@ -37,10 +37,8 @@ The term Postgres refers to either PostgreSQL or EDB Postgres Advanced Server. **What's New** -Upstream merge with Pgpool-II 4.2.3 - -1. Upstream merge with Pgpool-II [4.2.3](https://www.pgpool.net/docs/42/en/html/release-4-2-3.html). -2. 4.2 is a major release. For more details on migrating to 4.2, see the [Migration Section](https://www.pgpool.net/docs/42/en/html/release-4-2-0.html#AEN10359). +- Upstream merge with Pgpool-II [4.2.3](https://www.pgpool.net/docs/42/en/html/release-4-2-3.html). +- 4.2 is a major release. For more details on migrating from earlier versions to version 4.2, see the [Migration Section](https://www.pgpool.net/docs/42/en/html/release-4-2-0.html#AEN10359).
From 64f934df152aae0fad51494a91f5dc6b276061bf Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 16 Jun 2021 11:49:08 +0530 Subject: [PATCH 17/50] Copying the previous release content Former-commit-id: 4eaec5bd8b7fece755fe4582548a18e51912e201 --- .../mongo_data_adapter/5.2.9/01_whats_new.mdx | 10 + .../5.2.9/02_requirements_overview.mdx | 23 + .../5.2.9/03_architecture_overview.mdx | 9 + .../04_installing_the_mongo_data_adapter.mdx | 338 ++++++++++++++ .../05_updating_the_mongo_data_adapter.mdx | 37 ++ .../5.2.9/06_features_of_mongo_fdw.mdx | 71 +++ .../07_configuring_the_mongo_data_adapter.mdx | 437 ++++++++++++++++++ ...8_example_using_the_mongo_data_adapter.mdx | 113 +++++ .../09_identifying_data_adapter_version.mdx | 19 + .../5.2.9/10_limitations.mdx | 11 + ...11_uninstalling_the_mongo_data_adapter.mdx | 27 ++ .../5.2.9/images/EDB_logo.png | 3 + .../ambari_administrative_interface.png | 3 + .../5.2.9/images/edb_logo.svg | 19 + .../5.2.9/images/installation_complete.png | 3 + .../installation_wizard_welcome_screen.png | 3 + .../images/mongo_server_with_postgres.png | 3 + .../progress_as_the_servers_restart.png | 3 + .../5.2.9/images/restart_the_server.png | 3 + .../5.2.9/images/setup_wizard_ready.png | 3 + .../specify_an_installation_directory.png | 3 + ...the_installation_wizard_welcome_screen.png | 3 + .../docs/mongo_data_adapter/5.2.9/index.mdx | 15 + 23 files changed, 1159 insertions(+) create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/02_requirements_overview.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/03_architecture_overview.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/08_example_using_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/09_identifying_data_adapter_version.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/10_limitations.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/images/EDB_logo.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/ambari_administrative_interface.png create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/images/edb_logo.svg create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/installation_complete.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/installation_wizard_welcome_screen.png create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/images/mongo_server_with_postgres.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/progress_as_the_servers_restart.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/restart_the_server.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/setup_wizard_ready.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/specify_an_installation_directory.png create mode 100755 product_docs/docs/mongo_data_adapter/5.2.9/images/the_installation_wizard_welcome_screen.png create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/index.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx new file mode 100644 index 00000000000..864a831e6ff --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx @@ -0,0 +1,10 @@ +--- +title: "What’s New" +--- + + + +The following features are added to create MongoDB Foreign Data Wrapper `5.2.8`: + +- Support for EDB Postgres Advanced Server 13. +- Support for Ubuntu 20.04 LTS platform. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/02_requirements_overview.mdx new file mode 100644 index 00000000000..6078203da1c --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/02_requirements_overview.mdx @@ -0,0 +1,23 @@ +--- +title: "Requirements Overview" +--- + +## Supported Versions + +The MongoDB Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.6 and above. + +## Supported Platforms + +The MongoDB Foreign Data Wrapper is supported on the following platforms: + +**Linux x86-64** + +> - RHEL 8.x/7.x +> - CentOS 8.x/7.x +> - OL 8.x/7.x +> - Ubuntu 20.04/18.04 LTS +> - Debian 10.x/9.x + +**Linux on IBM Power8/9 (LE)** + +> - RHEL 7.x diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/03_architecture_overview.mdx new file mode 100644 index 00000000000..3e48035f7a0 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/03_architecture_overview.mdx @@ -0,0 +1,9 @@ +--- +title: "Architecture Overview" +--- + + + +The MongoDB data wrapper provides an interface between a MongoDB server and a Postgres database. It transforms a Postgres statement (`SELECT`/`INSERT`/`DELETE`/`UPDATE`) into a query that is understood by the MongoDB database. + +![Using MongoDB FDW with Postgres](images/mongo_server_with_postgres.png) diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..b1450a6c26c --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx @@ -0,0 +1,338 @@ +--- +title: "Installing the MongoDB Foreign Data Wrapper" +--- + + + +The MongoDB Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. + + + +## Installing the MongoDB Foreign Data Wrapper using an RPM Package + +You can install the MongoDB Foreign Data Wrapper using an RPM package on the following platforms: + +- [RHEL 7](#rhel7) +- [RHEL 8](#rhel8) +- [CentOS 7](#centos7) +- [CentOS 8](#centos8) + + + +### On RHEL 7 + +Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +Enable the optional, extras, and HA repositories: + + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-mongo_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file to provide your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing the MongoDB Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: + + ``` + yum install edb-as-mongo_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On RHEL 8 + +Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: + + ```text + ARCH=$( /bin/arch ) + subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" + ``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-mongo_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file to provide your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing the MongoDB Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: + + ```text + dnf install edb-as-mongo_fdw + ``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On CentOS 7 + +Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +!!! Note + You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-mongo_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file to provide your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing the MongoDB Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: + + ```text + yum install edb-as-mongo_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On CentOS 8 + +Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +Enable the `PowerTools` repository: + + ```text + dnf config-manager --set-enabled PowerTools + ``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-mongo_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file to provide your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing the MongoDB Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: + + ```text + dnf install edb-as-mongo_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + +## Installing the MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host + +To install the MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/repository-access-request/). + +The following steps will walk you through using the EDB apt repository to install a Debian package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. + +1. Assume superuser privileges: + + ```text + sudo su – + ``` + +2. Configure the EDB repository: + + On Debian 9 and Ubuntu: + + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` + + On Debian 10: + + 1. Set up the EDB repository: + + ```text + sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` + + 1. Substitute your EDB credentials for the `username` and `password` in the following command: + + ```text + sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` + +3. Add support to your system for secure APT repositories: + + ```text + apt-get install apt-transport-https + ``` + +4. Add the EDB signing key: + + ```text + wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` + +5. Update the repository metadata: + + ```text + apt-get update + ``` + +6. Install the Debian package: + + ```text + apt-get install edb-as-mongo-fdw + ``` + +where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..aa29e7403f9 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx @@ -0,0 +1,37 @@ +--- +title: "Updating the MongoDB Foreign Data Wrapper" +--- + + + +**Updating an RPM Installation** + +If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-repo` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-repo` + +yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-as-mongo_fdw` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-as-mongo_fdw` + + where `xx` is the server version number. + +**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: + +> `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` +> +> where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx new file mode 100644 index 00000000000..aec81845037 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx @@ -0,0 +1,71 @@ +--- +title: "Features of the MongoDB Foreign Data Wrapper" +--- + + + +The key features of the MongoDB Foreign Data Wrapper are listed below: + +## Writable FDW + +The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. See also: + +[Example: Using the MongoDB Foreign Data Wrapper](08_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) + +[Data Type Mappings](07_configuring_the_mongo_data_adapter/#data-type-mappings) + +## Where Clause Push-down + +MongoDB Foreign Data Wrapper allows the push-down of `WHERE` clause only when clauses include comparison expressions that have a column and a constant as arguments. WHERE clause push-down is not supported where constant is an array. + +## Connection Pooling + +Mongo_FDW establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. + +## Automated Cleanup + +The MongoDB Foreign Data Wrapper allows the cleanup of foreign tables in a single operation using the `DROP EXTENSION` command. This feature is especially useful when a foreign table has been created for a temporary purpose. The syntax of a `DROP EXTENSION` command is: + +> `DROP EXTENSION mongo_fdw CASCADE;` + +For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). + +## Full Document Retrieval + +This feature allows to retrieve documents along with all their fields from collection without any knowledge of the fields in BSON document available in MongoDB's collection. Those retrieved documents are in the JSON format. + +You can retrieve all available fields in a collection residing in MongoDB Foreign Data Wrapper as explained in the following example: + +**Example**: + +The collection in MongoDB Foreign Data Wrapper: + +```text +> db.warehouse.find(); +{ "_id" : ObjectId("58a1ebbaf543ec0b90545859"), "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : ISODate("2014-12-12T07:12:10Z") } +{ "_id" : ObjectId("58a1ebbaf543ec0b9054585a"), "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : ISODate("2015-11-11T08:13:10Z") } +``` + +Steps for retrieving the document: + +1. Create foreign table with a column name `__doc`. The type of the column could be json, jsonb, text or varchar. + +```text +CREATE FOREIGN TABLE test_json(__doc json) SERVER mongo_server OPTIONS (database 'testdb', collection 'warehouse'); +``` + +1. Retrieve the document. + +```text +SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; +``` + +The output: + +```text +edb=#SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; + __doc --------------------------------------------------------------------------------------------------------------------------------------------------------- +{ "_id" : { "$oid" : "58a1ebbaf543ec0b90545859" }, "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : { "$date" : 1418368330000 } } +{ "_id" : { "$oid" : "58a1ebbaf543ec0b9054585a" }, "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : { "$date" : 1447229590000 } } +(2 rows) +``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..b04ccf9e345 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx @@ -0,0 +1,437 @@ +--- +title: "Configuring the MongoDB Foreign Data Wrapper" +--- + + + +Before using the MongoDB Foreign Data Wrapper, you must: + +> 1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. +> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. +> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. +> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. + + + +## CREATE EXTENSION + +Use the `CREATE EXTENSION` command to create the `mongo_fdw` extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be querying the MongoDB server, and invoke the command: + +```text +CREATE EXTENSION [IF NOT EXISTS] mongo_fdw [WITH] [SCHEMA schema_name]; +``` + +**Parameters** + +`IF NOT EXISTS` + +> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. + +`schema_name` + +> Optionally specify the name of the schema in which to install the extension's objects. + +**Example** + +The following command installs the MongoDB foreign data wrapper: + +> `CREATE EXTENSION mongo_fdw;` + +For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: + +> . + + + +## CREATE SERVER + +Use the `CREATE SERVER` command to define a connection to a foreign server. The syntax is: + +```text +CREATE SERVER server_name FOREIGN DATA WRAPPER mongo_fdw + [OPTIONS (option 'value' [, ...])] +``` + +The role that defines the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To create a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `CREATE SERVER` command. + +**Parameters** + +`server_name` + +> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. + +`FOREIGN_DATA_WRAPPER` + +> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. + +`OPTIONS` + +> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: + +| **Option** | **Description** | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| address | The address or hostname of the Mongo server. The default value is `127.0.0.1`. | +| port | The port number of the Mongo Server. Valid range is 0 to 65535. The default value is `27017`. | +| authentication_database | The database against which user will be authenticated. This option is only valid with password based authentication. | +| ssl | Requests an authenticated, encrypted SSL connection. By default, the value is set to `false`. Set the value to `true` to enable ssl. See to understand the options. | +| pem_file | SSL option | +| pem_pwd | SSL option. | +| ca_file | SSL option | +| ca_dir | SSL option | +| crl_file | SSL option | +| weak_cert_validation | SSL option | + +**Example** + +The following command creates a foreign server named `mongo_server` that uses the `mongo_fdw` foreign data wrapper to connect to a host with an IP address of `127.0.0.1`: + +```text +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw OPTIONS (host '127.0.0.1', port '27017'); +``` + +The foreign server uses the default port (`27017`) for the connection to the client on the MongoDB cluster. + +For more information about using the `CREATE SERVER` command, see: + +> + + + +## CREATE USER MAPPING + +Use the `CREATE USER MAPPING` command to define a mapping that associates a Postgres role with a foreign server: + +```text +CREATE USER MAPPING FOR role_name SERVER server_name + [OPTIONS (option 'value' [, ...])]; +``` + +You must be the owner of the foreign server to create a user mapping for that server. + +**Parameters** + +`role_name` + +> Use `role_name` to specify the role that will be associated with the foreign server. + +`server_name` + +> Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. + +`OPTIONS` + +> Use the `OPTIONS` clause to specify connection information for the foreign server. +> +> `username`: the name of the user on the MongoDB server. +> +> `password`: the password associated with the username. + +**Example** + +The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: + +> `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` + +If the database host uses secure authentication, provide connection credentials when creating the user mapping: + +```text +CREATE USER MAPPING FOR enterprisedb SERVER mongo_server OPTIONS (username 'mongo_user', password 'mongo_pass'); +``` + +The command creates a user mapping for a role named `enterprisedb` that is associated with a server named `mongo_server`. When connecting to the MongoDB server, the server will authenticate as `mongo_user`, and provide a password of `mongo_pass`. + +For detailed information about the `CREATE USER MAPPING` command, see: + +> + + + +## CREATE FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the MongoDB host. Before creating a foreign table definition on the Postgres server, connect to the MongoDB server and create a collection; the columns in the table will map to columns in a table on the Postgres server. Then, use the `CREATE FOREIGN TABLE` command to define a table on the Postgres server with columns that correspond to the collection that resides on the MongoDB host. The syntax is: + +```text +CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name ( [ + { column_name data_type [ OPTIONS ( option 'value' [, ... ] ) ] [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint } + [, ... ] +] ) +[ INHERITS ( parent_table [, ... ] ) ] + SERVER server_name [ OPTIONS ( option 'value' [, ... ] ) ] +``` + +where `column_constraint` is: + +```text +[ CONSTRAINT constraint_name ] +{ NOT NULL | NULL | CHECK (expr) [ NO INHERIT ] | DEFAULT default_expr } +``` + +and `table_constraint` is: + +```text +[ CONSTRAINT constraint_name ] CHECK (expr) [ NO INHERIT ] +``` + +**Parameters** + +`table_name` + +> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. + +`IF NOT EXISTS` + +> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. + +`column_name` + +> Specifies the name of a column in the new table; each column should correspond to a column described on the MongoDB server. + +`data_type` + +> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. + +`COLLATE collation` + +> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. + +`INHERITS (parent_table [, ... ])` + +> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. + +`CONSTRAINT constraint_name` + +> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. + +`NOT NULL` + +> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. + +`NULL` + +> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. + +`CHECK (expr) [NO INHERIT]` + +> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. +> +> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. +> +> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. + +`DEFAULT default_expr` + +> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. + +`SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` + +> To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. +> +> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: + +| option | value | +| ---------- | --------------------------------------------------------------------------------- | +| database | The name of the database to query. The default value is `test`. | +| collection | The name of the collection to query. The default value is the foreign table name. | + +**Example** + +To use data that is stored on MongoDB server, you must create a table on the Postgres host that maps the columns of a MongoDB collection to the columns of a Postgres table. For example, for a MongoDB collection with the following definition: + +```text +db.warehouse.find +( + { + "warehouse_id" : 1 + } +).pretty() +{ + "_id" : ObjectId("53720b1904864dc1f5a571a0"), + "warehouse_id" : 1, + "warehouse_name" : "UPS", + "warehouse_created" : ISODate("2014-12-12T07:12:10Z") +} +``` + +You should execute a command on the Postgres server that creates a comparable table on the Postgres server: + +```text +CREATE FOREIGN TABLE warehouse +( + _id NAME, + warehouse_id INT, + warehouse_name TEXT, + warehouse_created TIMESTAMPZ +) +SERVER mongo_server +OPTIONS (database 'db', collection 'warehouse'); +``` + +The first column of the table must be `_id` of the type `name`. + +Include the `SERVER` clause to specify the name of the database stored on the MongoDB server and the name of the table (`warehouse`) that corresponds to the table on the Postgres server. + +For more information about using the `CREATE FOREIGN TABLE` command, see: + +> + +!!! Note + MongoDB foreign data wrapper supports the write capability feature. + + + +### Data Type Mappings + +When using the foreign data wrapper, you must create a table on the Postgres server that mirrors the table that resides on the MongoDB server. The MongoDB data wrapper will automatically convert the following MongoDB data types to the target Postgres type: + +| **MongoDB (BSON Type)** | **Postgres** | +| ---------------------------- | ---------------------------------------- | +| ARRAY JSON BOOL BOOL | | +| BINARY BYTE | A | +| DATE_TIME DATE DOCUMENT JSON | /TIMESTAMP/TIMESTAMPTZ | +| DOUBLE FLOA | T/FLOAT4/FLOAT8/DOUBLE PRECISION/NUMERIC | +| INT32 SMAL | LINT/INT2/INT/INTEGER/INT4 | +| INT64 BIGI OID NAME | NT/INT8 | +| UTF8 BPCH | AR/VARCHAR/CHARCTER VARYING/TEXT | + +## DROP EXTENSION + +Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the MongoDB server, and run the command: + +```text +DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + +`name` + +> Specify the name of the installed extension. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + +**Example** + +The following command removes the extension from the existing database: + +> `DROP EXTENSION mongo_fdw;` + +For more information about using the foreign data wrapper `DROP EXTENSION` command, see: + +> . + +## DROP SERVER + +Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: + +```text +DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + +`name` + +> Specify the name of the installed server. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop the server if any objects are dependent on it. + +**Example** + +The following command removes a foreign server named `mongo_server`: + +> `DROP SERVER mongo_server;` + +For more information about using the `DROP SERVER` command, see: + +> + +## DROP USER MAPPING + +Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. + +```text +DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + +`user_name` + +> Specify the user name of the mapping. + +`server_name` + +> Specify the name of the server that defines a connection to the MongoDB cluster. + +**Example** + +The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: + +> `DROP USER MAPPING FOR enterprisedb SERVER mongo_server;` + +For detailed information about the `DROP USER MAPPING` command, see: + +> + +## DROP FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the MongoDB host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. + +```text +DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + +`name` + +> Specify the name of the foreign table. + +`CASCADE` + +> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + +`RESTRICT` + +> Do not allow to drop foreign table if any objects are dependent on it. + +**Example** + +```text +DROP FOREIGN TABLE warehouse; +``` + +For more information about using the `DROP FOREIGN TABLE` command, see: + +> diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/08_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/08_example_using_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..38f2f35b122 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/08_example_using_the_mongo_data_adapter.mdx @@ -0,0 +1,113 @@ +--- +title: "Example: Using the MongoDB Foreign Data Wrapper" +--- + + + +Before using the MongoDB foreign data wrapper, you must connect to your database with a client application. The following examples demonstrate using the wrapper with the psql client. After connecting to psql, you can follow the steps in the example below: + +```text +-- load extension first time after install +CREATE EXTENSION mongo_fdw; + +-- create server object +CREATE SERVER mongo_server + FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address '127.0.0.1', port '27017'); + +-- create user mapping +CREATE USER MAPPING FOR enterprisedb + SERVER mongo_server + OPTIONS (username 'mongo_user', password 'mongo_pass'); + +-- create foreign table +CREATE FOREIGN TABLE warehouse + ( + _id name, + warehouse_id int, + warehouse_name text, + warehouse_created timestamptz + ) + SERVER mongo_server + OPTIONS (database 'db', collection 'warehouse'); + +-- Note: first column of the table must be "_id" of type "name". + +-- select from table +SELECT * FROM warehouse WHERE warehouse_id = 1; + _id | warehouse_id | warehouse_name | warehouse_created +--------------------------+--------------+----------------+--------------------------- + 53720b1904864dc1f5a571a0 | 1 | UPS | 2014-12-12 12:42:10+05:30 +(1 row) + +db.warehouse.find +( + { + "warehouse_id" : 1 + } +).pretty() +{ + "_id" : ObjectId("53720b1904864dc1f5a571a0"), + "warehouse_id" : 1, + "warehouse_name" : "UPS", + "warehouse_created" : ISODate("2014-12-12T07:12:10Z") +} + +-- insert row in table +INSERT INTO warehouse VALUES (0, 2, 'Laptop', '2015-11-11T08:13:10Z'); + +db.warehouse.insert +( + { + "warehouse_id" : NumberInt(2), + "warehouse_name" : "Laptop", + "warehouse_created" : ISODate("2015-11-11T08:13:10Z") + } +) + +-- delete row from table +DELETE FROM warehouse WHERE warehouse_id = 2; + +db.warehouse.remove +( + { + "warehouse_id" : 2 + } +) + +-- update a row of table +UPDATE warehouse SET warehouse_name = 'UPS_NEW' WHERE warehouse_id = 1; + +db.warehouse.update +( + { + "warehouse_id" : 1 + }, + { + "warehouse_id" : 1, + "warehouse_name" : "UPS_NEW", + "warehouse_created" : ISODate("2014-12-12T07:12:10Z") + } +) + +-- explain a table +EXPLAIN SELECT * FROM warehouse WHERE warehouse_id = 1; + QUERY PLAN +----------------------------------------------------------------- + Foreign Scan on warehouse (cost=0.00..0.00 rows=1000 width=84) + Filter: (warehouse_id = 1) + Foreign Namespace: db.warehouse +(3 rows) + +-- collect data distribution statistics +ANALYZE warehouse; + +-- drop foreign table +DROP FOREIGN TABLE warehouse; + +-- drop user mapping +DROP USER MAPPING FOR enterprisedb SERVER mongo_server; + +-- drop server +DROP SERVER mongo_server; +``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/09_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/09_identifying_data_adapter_version.mdx new file mode 100644 index 00000000000..b1d0564acc4 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/09_identifying_data_adapter_version.mdx @@ -0,0 +1,19 @@ +--- +title: "Identifying the MongoDB Foreign Data Wrapper Version" +--- + + + +The MongoDB Foreign Data Wrapper includes a function that you can use to identify the currently installed version of the `.so` file for the data wrapper. To use the function, connect to the Postgres server, and enter: + +```text +SELECT mongo_fdw_version(); +``` + +The function returns the version number: + +```text +mongo_fdw_version +----------------- + +``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/10_limitations.mdx new file mode 100644 index 00000000000..acdd2f2383c --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/10_limitations.mdx @@ -0,0 +1,11 @@ +--- +title: "Limitations" +--- + + + +The following limitations apply to MongoDB Foreign Data Wrapper: + +- If the BSON document key contains uppercase letters or occurs within a nested document, MongoDB Foreign Data Wrapper requires the corresponding column names to be declared in double quotes. +- PostgreSQL limits column names to 63 characters by default. You can increase the `NAMEDATALEN` constant in `src/include/pg_config_manual.h`, compile, and re-install when column names extend beyond 63 characters. +- MongoDB Foreign Data Wrapper errors out on BSON field which is not listed in the known types (For example: byte, arrays). It throws an error: `Cannot convert BSON type to column type`. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..8313284a962 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx @@ -0,0 +1,27 @@ +--- +title: "Uninstalling the MongoDB Foreign Data Wrapper" +--- + + + +**Uninstalling an RPM Package** + +You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: + +- On RHEL or CentOS 7: + + `yum remove edb-as-mongo_fdw` + +- On RHEL or CentOS 8: + + `dnf remove edb-as-mongo_fdw` + +Where `xx` is the server version number. + +**Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +- To uninstall MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. + + `apt-get remove edb-as-mongo-fdw` + +Where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/EDB_logo.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/EDB_logo.png new file mode 100644 index 00000000000..f4a93cf57f5 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/EDB_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07423b012a855204780fe5a2a5a1e33607304a5c3020ae4acbf3d575691dedd6 +size 12136 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/ambari_administrative_interface.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/ambari_administrative_interface.png new file mode 100755 index 00000000000..d44e42a740e --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/ambari_administrative_interface.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4acb08665b6a1df9494f91f9ab64a8f4d0979f61947e19162f419d134e351ea +size 150222 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/edb_logo.svg b/product_docs/docs/mongo_data_adapter/5.2.9/images/edb_logo.svg new file mode 100644 index 00000000000..f24d1dfefee --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/edb_logo.svg @@ -0,0 +1,19 @@ + + + edb-logo-disc-dark + + + + \ No newline at end of file diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_complete.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_complete.png new file mode 100755 index 00000000000..311d632a71e --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_complete.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52a4437577b7a64d7f36c4f837b9a0fab90b163b201055bd817f0e3cbaf112a +size 39463 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_wizard_welcome_screen.png new file mode 100755 index 00000000000..aaf582bc781 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_wizard_welcome_screen.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85ea24919ac97d6f8ebb882da665c22e4d5c0942b8491faa5e07be8b93007b60 +size 38341 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/mongo_server_with_postgres.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/mongo_server_with_postgres.png new file mode 100644 index 00000000000..76915580c4c --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/mongo_server_with_postgres.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:480cdbe86e1f31a6fd03d26a86425a25d681e515e747217c0c3961cb0a36027c +size 49128 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/progress_as_the_servers_restart.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/progress_as_the_servers_restart.png new file mode 100755 index 00000000000..43523c7d1ad --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/progress_as_the_servers_restart.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46a0feaf37642c3aa87fe8267259687dfa9c9571f1c2663297159ef98356e2fd +size 85080 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/restart_the_server.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/restart_the_server.png new file mode 100755 index 00000000000..2518b46d46d --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/restart_the_server.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e612201379d56b4dffcfb4222ceb765532ca5d097504c1dbabdc6a812afaba9 +size 33996 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/setup_wizard_ready.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/setup_wizard_ready.png new file mode 100755 index 00000000000..922e318868d --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/setup_wizard_ready.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ba6a1a88fe8a91b94571b57a36077fce7b3346e850a38f9bf015166ace93e36 +size 16833 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/specify_an_installation_directory.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/specify_an_installation_directory.png new file mode 100755 index 00000000000..208c85c46af --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/specify_an_installation_directory.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dae28ab7f567617da49816514a3fa5eb6161e611c416295cfe2f829cd941f98e +size 20596 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.2.9/images/the_installation_wizard_welcome_screen.png new file mode 100755 index 00000000000..2da19033b0e --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/images/the_installation_wizard_welcome_screen.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fd52b490dd37c86dca15975a7dbc9bdd47c7ae4ab0912d1bf570d785c521f79 +size 33097 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx new file mode 100644 index 00000000000..5117f306aad --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx @@ -0,0 +1,15 @@ +--- +title: "MongoDB Foreign Data Wrapper Guide" +--- + +The MongoDB Foreign Data Wrapper (`mongo_fdw`) is a Postgres extension that allows you to access data that resides on a MongoDB database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. + +The MongoDB Foreign Data Wrapper can be installed with an RPM package. You can download an installer from the [EDB website](https://www.enterprisedb.com/software-downloads-postgres/). + +This guide uses the term `Postgres` to refer to an instance of EDB Postgres Advanced Server. + +
+ +whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion + +
From c50563684edff27f4a815ecb673186e1ead04b6f Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 16 Jun 2021 12:06:36 +0530 Subject: [PATCH 18/50] Content for upcoming Hadoop FDW release Former-commit-id: 66a723382588da70568bae45fdf11ca65aac1205 --- .../2.0.8/01_whats_new.mdx | 10 + .../2.0.8/02_requirements_overview.mdx | 25 + .../2.0.8/03_architecture_overview.mdx | 13 + .../04_supported_authentication_methods.mdx | 59 +++ .../05_installing_the_hadoop_data_adapter.mdx | 339 +++++++++++++ .../06_updating_the_hadoop_data_adapter.mdx | 39 ++ .../2.0.8/07_features_of_hdfs_fdw.mdx | 23 + ...08_configuring_the_hadoop_data_adapter.mdx | 480 ++++++++++++++++++ .../09_using_the_hadoop_data_adapter.mdx | 302 +++++++++++ .../10_identifying_data_adapter_version.mdx | 19 + ...1_uninstalling_the_hadoop_data_adapter.mdx | 29 ++ .../2.0.8/images/EDB_logo.png | 3 + .../ambari_administrative_interface.png | 3 + .../2.0.8/images/edb_logo.svg | 56 ++ ..._distributed_file_system_with_postgres.png | 3 + .../2.0.8/images/installation_complete.png | 3 + .../installation_wizard_welcome_screen.png | 3 + .../progress_as_the_servers_restart.png | 3 + .../2.0.8/images/restart_the_server.png | 3 + .../2.0.8/images/setup_wizard_ready.png | 3 + .../specify_an_installation_directory.png | 3 + ...the_installation_wizard_welcome_screen.png | 3 + .../docs/hadoop_data_adapter/2.0.8/index.mdx | 15 + 23 files changed, 1439 insertions(+) create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/03_architecture_overview.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/04_supported_authentication_methods.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/07_features_of_hdfs_fdw.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/10_identifying_data_adapter_version.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/images/EDB_logo.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/ambari_administrative_interface.png create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/images/edb_logo.svg create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/hadoop_distributed_file_system_with_postgres.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_complete.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_wizard_welcome_screen.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/progress_as_the_servers_restart.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/restart_the_server.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/setup_wizard_ready.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/specify_an_installation_directory.png create mode 100755 product_docs/docs/hadoop_data_adapter/2.0.8/images/the_installation_wizard_welcome_screen.png create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx new file mode 100644 index 00000000000..ca79f3e6a32 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx @@ -0,0 +1,10 @@ +--- +title: "What’s New" +--- + + + +The following features are added to create Hadoop Foreign Data Wrapper `2.0.7`: + +- Support for EDB Postgres Advanced Server 13. +- Support for Ubuntu 20.04 LTS platform. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx new file mode 100644 index 00000000000..afaf5d7b204 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx @@ -0,0 +1,25 @@ +--- +title: "Requirements Overview" +--- + +## Supported Versions + +The Hadoop Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.6 and above. + +## Supported Platforms + +The Hadoop Foreign Data Wrapper is supported on the following platforms: + +**Linux x86-64** + +> - RHEL 8.x and 7.x +> - CentOS 8.x and 7.x +> - OL 8.x and 7.x +> - Ubuntu 20.04 and 18.04 LTS +> - Debian 10.x and 9.x + +**Linux on IBM Power8/9 (LE)** + +> - RHEL 7.x + +The Hadoop Foreign Data Wrapper supports use of the Hadoop file system using a HiveServer2 interface or Apache Spark using the Spark Thrift Server. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/03_architecture_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/03_architecture_overview.mdx new file mode 100644 index 00000000000..87c8fb6d024 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/03_architecture_overview.mdx @@ -0,0 +1,13 @@ +--- +title: "Architecture Overview" +--- + + + +Hadoop is a framework that allows you to store a large data set in a distributed file system. + +The Hadoop data wrapper provides an interface between a Hadoop file system and a Postgres database. The Hadoop data wrapper transforms a Postgres `SELECT` statement into a query that is understood by the HiveQL or Spark SQL interface. + +![Using a Hadoop distributed file system with Postgres](images/hadoop_distributed_file_system_with_postgres.png) + +When possible, the Foreign Data Wrapper asks the Hive or Spark server to perform the actions associated with the `WHERE` clause of a `SELECT` statement. Pushing down the `WHERE` clause improves performance by decreasing the amount of data moving across the network. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/04_supported_authentication_methods.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/04_supported_authentication_methods.mdx new file mode 100644 index 00000000000..24377cbadda --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/04_supported_authentication_methods.mdx @@ -0,0 +1,59 @@ +--- +title: "Supported Authentication Methods" +--- + + + +The Hadoop Foreign Data Wrapper supports `NOSASL` and `LDAP` authentication modes. To use `NOSASL`, do not specify any `OPTIONS` while creating user mapping. For `LDAP` authentication mode, specify `username` and `password` in `OPTIONS` while creating user mapping. + +## Using LDAP Authentication + +When using the Hadoop Foreign Data Wrapper with `LDAP` authentication, you must first configure the `Hive Server` or `Spark Server` to use LDAP authentication. The configured server must provide a `hive-site.xml` file that includes the connection details for the LDAP server. For example: + +```text + + hive.server2.authentication + LDAP + + Expects one of [nosasl, none, ldap, kerberos, pam, custom]. + Client authentication types. + NONE: no authentication check + LDAP: LDAP/AD based authentication + KERBEROS: Kerberos/GSSAPI authentication + CUSTOM: Custom authentication provider + (Use with property hive.server2.custom.authentication.class) + PAM: Pluggable authentication module + NOSASL: Raw transport + + + + hive.server2.authentication.ldap.url + ldap://localhost + LDAP connection URL + + + hive.server2.authentication.ldap.baseDN + ou=People,dc=itzgeek,dc=local + LDAP base DN + +``` + +Then, when starting the hive server, include the path to the `hive-site.xml` file in the command. For example: + +```text +./hive --config path_to_hive-site.xml_file --service hiveServer2 +``` + +Where *path_to_hive-site.xml_file* specifies the complete path to the `hive‑site.xml` file. + +When creating the user mapping, you must provide the name of a registered LDAP user and the corresponding password as options. For details, see [Create User Mapping](08_configuring_the_hadoop_data_adapter/#create-user-mapping). + + + +## Using NOSASL Authentication + +When using `NOSASL` authentication with the Hadoop Foreign Data Wrapper, set the authorization to `None`, and the authentication method to `NOSASL` on the `Hive Server` or `Spark Server`. For example, if you start the `Hive Server` at the command line, include the `hive.server2.authentication` configuration parameter in the command: + +```text +hive --service hiveserver2 --hiveconf hive.server2.authentication=NOSASL +``` diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..854ef80da63 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx @@ -0,0 +1,339 @@ +--- +title: "Installing the Hadoop Foreign Data Wrapper" +--- + + + +The Hadoop Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. + + + +## Installing the Hadoop Foreign Data Wrapper using an RPM Package + +You can install the Hadoop Foreign Data Wrapper using an RPM package on the following platforms: + +- [RHEL 7](#rhel7) +- [RHEL 8](#rhel8) +- [CentOS 7](#centos7) +- [CentOS 8](#centos8) + + + +### On RHEL 7 + +Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +Enable the optional, extras, and HA repositories: + + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-hdfs_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing Hadoop Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following commands to install the Hadoop Foreign Data Wrapper: + + ``` + yum install edb-as-hdfs_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On RHEL 8 + +Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: + + ```text + ARCH=$( /bin/arch ) + subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" + ``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-hdfs_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing Hadoop Foreign Data Wrapper** + +After saving your changes to the configuration file, use the below command to install the Hadoop Foreign Data Wrapper: + + ```text + dnf install edb-as-hdfs_fdw + ``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On CentOS 7 + +Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` + +!!! Note + You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-hdfs_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing Hadoop Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the Hadoop Foreign Data Wrapper: + + ```text + yum install edb-as-hdfs_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On CentOS 8 + +Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +Enable the `PowerTools` repository: + + ```text + dnf config-manager --set-enabled PowerTools + ``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-as-hdfs_fdw`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + + ```text + [edb] + name=EnterpriseDB RPMs $releasever - $basearch + baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY + ``` + +**Installing Hadoop Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the Hadoop Foreign Data Wrapper: + + ```text + dnf install edb-as-hdfs_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + +## Installing the Hadoop Foreign Data Wrapper on a Debian or Ubuntu Host + +To install the Hadoop Foreign Data Wrapper on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/repository-access-request/). + +The following steps will walk you through on using the EDB apt repository to install a Debian package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. + +1. Assume superuser privileges: + + ```text + sudo su – + ``` + +2. Configure the EnterpriseDB repository: + + On Debian 9 and Ubuntu: + + > ```text + > sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` + + On Debian 10: + + 1. Set up the EDB repository: + + > ```text + > sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` + + 1. Substitute your EDB credentials for the `username` and `password` in the following command: + + > ```text + > sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + > ``` + +3. Add support to your system for secure APT repositories: + + ```text + apt-get install apt-transport-https + ``` + +4. Add the EDB signing key: + + ```text + wget -q -O - https://username:password + @apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` + +5. Update the repository metadata: + + ```text + apt-get update + ``` + +6. Install the package: + + ```text + apt-get install edb-as-hdfs-fdw + ``` + +where `xx` is the server version number. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..17040838963 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx @@ -0,0 +1,39 @@ +--- +title: "Updating the Hadoop Foreign Data Wrapper" +--- + + + +**Updating an RPM Installation** + +If you have an existing RPM installation of Hadoop Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-repo` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-repo` + +yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-as-hdfs_fdw` + + where `xx` is the server version number. + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-as-hdfs_fdw` + + where `xx` is the server version number. + +**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: + +> `apt-get --only-upgrade install edb-as-hdfs-fdw` +> +> where `xx` is the server version number. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/07_features_of_hdfs_fdw.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/07_features_of_hdfs_fdw.mdx new file mode 100644 index 00000000000..66cacb62851 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/07_features_of_hdfs_fdw.mdx @@ -0,0 +1,23 @@ +--- +title: "Features of the Hadoop Foreign Data Wrapper" +--- + + + +The key features of the Hadoop Foreign Data Wrapper are listed below: + +## Where Clause Push-down + +Hadoop Foreign Data Wrappper allows the push-down of `WHERE` clause to the foreign server for execution. This feature optimizes remote queries to reduce the number of rows transferred from foreign servers. + +## Column Push-down + +Hadoop Foreign Data Wrapper supports column push-down. As a result, the query brings back only those columns that are a part of the select target list. + +## Automated Cleanup + +Hadoop Foreign Data Wrappper allows the cleanup of foreign tables in a single operation using `DROP EXTENSION` command. This feature is specifically useful when a foreign table is set for a temporary purpose. The syntax is: + +> `DROP EXTENSION hdfs_fdw CASCADE;` + +For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..d2462cf1e8a --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx @@ -0,0 +1,480 @@ +--- +title: "Configuring the Hadoop Foreign Data Wrapper" +--- + + + +Before creating the extension and the database objects that use the extension, you must modify the Postgres host, providing the location of the supporting libraries. + +After installing Postgres, modify the `postgresql.conf` located in: + +> `/var/lib/edb/as_version/data` + +Modify the configuration file with your editor of choice, adding the `hdfs_fdw.jvmpath` parameter to the end of the configuration file, and setting the value to specify the location of the Java virtual machine (`libjvm.so`). Set the value of `hdfs_fdw.classpath` to indicate the location of the java class files used by the adapter; use a colon (:) as a delimiter between each path. For example: + +> ```text +> hdfs_fdw.classpath= +> '/usr/edb/as12/lib/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' +> ``` +> +> !!! Note +> The jar files (hive-jdbc-1.0.1-standalone.jar and hadoop-common-2.6.4.jar) mentioned in the above example should be copied from respective Hive and Hadoop sources or website to PostgreSQL instance where Hadoop Foreign Data Wrapper is installed. +> +> If you are using EDB Advanced Server and have a `DATE` column in your database, you must set `edb_redwood_date = OFF` in the `postgresql.conf` file. + +After setting the parameter values, restart the Postgres server. For detailed information about controlling the service on an Advanced Server host, see the EDB Postgres Advanced Server Installation Guide, available at: + +> + +Before using the Hadoop Foreign Data Wrapper, you must: + +> 1. Use the [CREATE EXTENSION](#create-extension) command to create the extension on the Postgres host. +> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the Hadoop file system. +> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. +> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Advanced Server database that corresponds to a database that resides on the Hadoop cluster. + + + +## CREATE EXTENSION + +Use the `CREATE EXTENSION` command to create the `hdfs_fdw` extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be querying the Hive or Spark server, and invoke the command: + +```text +CREATE EXTENSION [IF NOT EXISTS] hdfs_fdw [WITH] [SCHEMA schema_name]; +``` + +**Parameters** + +`IF NOT EXISTS` + +> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. + +`schema_name` + +> Optionally specify the name of the schema in which to install the extension's objects. + +**Example** + +The following command installs the `hdfs_fdw` hadoop foreign data wrapper: + +> `CREATE EXTENSION hdfs_fdw;` + +For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: + +> . + + + +## CREATE SERVER + +Use the `CREATE SERVER` command to define a connection to a foreign server. The syntax is: + +```text +CREATE SERVER server_name FOREIGN DATA WRAPPER hdfs_fdw + [OPTIONS (option 'value' [, ...])] +``` + +The role that defines the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To create a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `CREATE SERVER` command. + +**Parameters** + +`server_name` + +> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. + +`FOREIGN_DATA_WRAPPER` + +> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `hdfs_fdw` foreign data wrapper when connecting to the cluster. + +`OPTIONS` + +> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: + +| Option | Description | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| host | The address or hostname of the Hadoop cluster. The default value is \`localhost\`. | +| port | The port number of the Hive Thrift Server or Spark Thrift Server. The default is \`10000\`. | +| client_type | Specify hiveserver2 or spark as the client type. To use the ANALYZE statement on Spark, you must specify a value of spark; if you do not specify a value for client_type, the default value is hiveserver2. | +| auth_type
| The authentication type of the client; specify LDAP or NOSASL. If you do not specify an auth_type, the data wrapper will decide the auth_type value on the basis of the user mapping:- If the user mapping includes a user name and password, the data wrapper will use LDAP authentication.
- If the user mapping does not include a user name and password, the data wrapper will use NOSASL authentication.
| +| connect_timeout | The length of time before a connection attempt times out. The default value is \`300\` seconds. | +| fetch_size | A user-specified value that is provided as a parameter to the JDBC API setFetchSize. The default value is \`10,000\`. | +| log_remote_sql | If true, logging will include SQL commands executed on the remote hive server and the number of times that a scan is repeated. The default is \`false\`. | +| query_timeout | Use query_timeout to provide the number of seconds after which a request will timeout if it is not satisfied by the Hive server. Query timeout is not supported by the Hive JDBC driver. | +| use_remote_estimate | Include the use_remote_estimate to instruct the server to use EXPLAIN commands on the remote server when estimating processing costs. By default, use_remote_estimate is false, and remote tables are assumed to have \`1000\` rows. | + +**Example** + +The following command creates a foreign server named `hdfs_server` that uses the `hdfs_fdw` foreign data wrapper to connect to a host with an IP address of `170.11.2.148`: + +```text +CREATE SERVER hdfs_server FOREIGN DATA WRAPPER hdfs_fdw OPTIONS (host '170.11.2.148', port '10000', client_type 'hiveserver2', auth_type 'LDAP', connect_timeout '10000', query_timeout '10000'); +``` + +The foreign server uses the default port (`10000`) for the connection to the client on the Hadoop cluster; the connection uses an LDAP server. + +For more information about using the `CREATE SERVER` command, see: + +> + + + +## CREATE USER MAPPING + +Use the `CREATE USER MAPPING` command to define a mapping that associates a Postgres role with a foreign server: + +```text +CREATE USER MAPPING FOR role_name SERVER server_name + [OPTIONS (option 'value' [, ...])]; +``` + +You must be the owner of the foreign server to create a user mapping for that server. + +Please note: the Hadoop Foreign Data Wrapper supports NOSASL and LDAP authentication. If you are creating a user mapping for a server that uses LDAP authentication, use the `OPTIONS` clause to provide the connection credentials (the username and password) for an existing LDAP user. If the server uses NOSASL authentication, omit the OPTIONS clause when creating the user mapping. + +**Parameters** + +`role_name` + +> Use `role_name` to specify the role that will be associated with the foreign server. + +`server_name` + +> Use `server_name` to specify the name of the server that defines a connection to the Hadoop cluster. + +`OPTIONS` + +> Use the `OPTIONS` clause to specify connection information for the foreign server. If you are using LDAP authentication, provide a: +> +> `username`: the name of the user on the LDAP server. +> +> `password`: the password associated with the username. +> +> If you do not provide a user name and password, the data wrapper will use NOSASL authentication. + +**Example** + +The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `hdfs_server`: + +> `CREATE USER MAPPING FOR enterprisedb SERVER hdfs_server;` + +If the database host uses LDAP authentication, provide connection credentials when creating the user mapping: + +```text +CREATE USER MAPPING FOR enterprisedb SERVER hdfs_server OPTIONS (username 'alice', password '1safepwd'); +``` + +The command creates a user mapping for a role named `enterprisedb` that is associated with a server named `hdfs_server`. When connecting to the LDAP server, the Hive or Spark server will authenticate as `alice`, and provide a password of `1safepwd`. + +For detailed information about the `CREATE USER MAPPING` command, see: + +> + + + +## CREATE FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the Hadoop host. Before creating a foreign table definition on the Postgres server, connect to the Hive or Spark server and create a table; the columns in the table will map to to columns in a table on the Postgres server. Then, use the `CREATE FOREIGN TABLE` command to define a table on the Postgres server with columns that correspond to the table that resides on the Hadoop host. The syntax is: + +```text +CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name ( [ + { column_name data_type [ OPTIONS ( option 'value' [, ... ] ) ] [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint } + [, ... ] +] ) +[ INHERITS ( parent_table [, ... ] ) ] + SERVER server_name [ OPTIONS ( option 'value' [, ... ] ) ] +``` + +where `column_constraint` is: + +```text +[ CONSTRAINT constraint_name ] +{ NOT NULL | NULL | CHECK (expr) [ NO INHERIT ] | DEFAULT default_expr } +``` + +and `table_constraint` is: + +```text +[ CONSTRAINT constraint_name ] CHECK (expr) [ NO INHERIT ] +``` + +**Parameters** + +`table_name` + +> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. + +`IF NOT EXISTS` + +> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. + +`column_name` + +> Specifies the name of a column in the new table; each column should correspond to a column described on the Hive or Spark server. + +`data_type` + +> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the Hive or Spark server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the Hive or Spark server. If the server cannot identify a compatible data type, it will return an error. + +`COLLATE collation` + +> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. + +`INHERITS (parent_table [, ... ])` + +> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. + +`CONSTRAINT constraint_name` + +> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. + +`NOT NULL` + +> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. + +`NULL` + +> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. + +`CHECK (expr) [NO INHERIT]` + +> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. +> +> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. +> +> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. + +`DEFAULT default_expr` + +> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. + +`SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` + +> To create a foreign table that will allow you to query a table that resides on a Hadoop file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the Hadoop data adapter. +> +> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: + +| option | value | +| ---------- | --------------------------------------------------------------------------------------- | +| dbname | The name of the database on the Hive server; the database name is required. | +| table_name | The name of the table on the Hive server; the default is the name of the foreign table. | + +**Example** + +To use data that is stored on a distributed file system, you must create a table on the Postgres host that maps the columns of a Hadoop table to the columns of a Postgres table. For example, for a Hadoop table with the following definition: + +```text +CREATE TABLE weblogs ( + client_ip STRING, + full_request_date STRING, + day STRING, + month STRING, + month_num INT, + year STRING, + hour STRING, + minute STRING, + second STRING, + timezone STRING, + http_verb STRING, + uri STRING, + http_status_code STRING, + bytes_returned STRING, + referrer STRING, + user_agent STRING) +row format delimited +fields terminated by '\t'; +``` + +You should execute a command on the Postgres server that creates a comparable table on the Postgres server: + +```text +CREATE FOREIGN TABLE weblogs +( + client_ip TEXT, + full_request_date TEXT, + day TEXT, + Month TEXT, + month_num INTEGER, + year TEXT, + hour TEXT, + minute TEXT, + second TEXT, + timezone TEXT, + http_verb TEXT, + uri TEXT, + http_status_code TEXT, + bytes_returned TEXT, + referrer TEXT, + user_agent TEXT +) +SERVER hdfs_server + OPTIONS (dbname 'webdata', table_name 'weblogs'); +``` + +Include the `SERVER` clause to specify the name of the database stored on the Hadoop file system (`webdata`) and the name of the table (`weblogs`) that corresponds to the table on the Postgres server. + +For more information about using the `CREATE FOREIGN TABLE` command, see: + +> + +### Data Type Mappings + +When using the foreign data wrapper, you must create a table on the Postgres server that mirrors the table that resides on the Hive server. The Hadoop data wrapper will automatically convert the following Hive data types to the target Postgres type: + +| **Hive** | **Postgres** | +| ----------- | ---------------- | +| BIGINT | BIGINT/INT8 | +| BOOLEAN | BOOL/BOOLEAN | +| BINARY | BYTEA | +| CHAR | CHAR | +| DATE | DATE | +| DOUBLE | FLOAT8 | +| FLOAT | FLOAT/FLOAT4 | +| INT/INTEGER | INT/INTEGER/INT4 | +| SMALLINT | SMALLINT/INT2 | +| STRING | TEXT | +| TIMESTAMP | TIMESTAMP | +| TINYINT | INT2 | +| VARCHAR | VARCHAR | + +## DROP EXTENSION + +Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the Hadoop server, and run the command: + +```text +DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + +`name` + +> Specify the name of the installed extension. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + +**Example** + +The following command removes the extension from the existing database: + +> `DROP EXTENSION hdfs_fdw;` + +For more information about using the foreign data wrapper `DROP EXTENSION` command, see: + +> . + +## DROP SERVER + +Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: + +```text +DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + +`name` + +> Specify the name of the installed server. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop the server if any objects are dependent on it. + +**Example** + +The following command removes a foreign server named `hdfs_server`: + +> `DROP SERVER hdfs_server;` + +For more information about using the `DROP SERVER` command, see: + +> + +## DROP USER MAPPING + +Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. + +```text +DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + +`user_name` + +> Specify the user name of the mapping. + +`server_name` + +> Specify the name of the server that defines a connection to the Hadoop cluster. + +**Example** + +The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `hdfs_server`: + +> `DROP USER MAPPING FOR enterprisedb SERVER hdfs_server;` + +For detailed information about the `DROP USER MAPPING` command, see: + +> + +## DROP FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the Hadoop host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. + +```text +DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + +`name` + +> Specify the name of the foreign table. + +`CASCADE` + +> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + +`RESTRICT` + +> Do not allow to drop foreign table if any objects are dependent on it. + +**Example** + +```text +DROP FOREIGN TABLE warehouse; +``` + +For more information about using the `DROP FOREIGN TABLE` command, see: + +> diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..c67ba31ead7 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx @@ -0,0 +1,302 @@ +--- +title: "Using the Hadoop Foreign Data Wrapper" +--- + + + +You can use the Hadoop Foreign Data Wrapper either through the Apache Hive or the Apache Spark. Both Hive and Spark store metadata in the configured metastore, where databases and tables are created using HiveQL. + +## Using HDFS FDW with Apache Hive on Top of Hadoop + +`Apache Hive` data warehouse software facilitates querying and managing large datasets residing in distributed storage. Hive provides a mechanism to project structure onto this data and query the data using a SQL-like language called `HiveQL`. At the same time, this language allows traditional map/reduce programmers to plug in their custom mappers and reducers when it is inconvenient or inefficient to express this logic in `HiveQL`. + +There are two versions of Hive - `HiveServer1` and `HiveServer2` which can be downloaded from the [Apache Hive website](https://hive.apache.org/downloads.html). + +!!! Note + The Hadoop Foreign Data Wrapper supports only `HiveServer2`. + +To use HDFS FDW with Apache Hive on top of Hadoop: + +Step 1: Download [weblogs_parse](http://wiki.pentaho.com/download/attachments/23531451/weblogs_parse.zip?version=1&modificationDate=1327096242000/) and follow the instructions at the [Wiki Pentaho website](https://wiki.pentaho.com/display/BAD/Transforming+Data+within+Hive/). + +Step 2: Upload `weblog_parse.txt` file using these commands: + +```text +hadoop fs -mkdir /weblogs +hadoop fs -mkdir /weblogs/parse +hadoop fs -put weblogs_parse.txt /weblogs/parse/part-00000 +``` + +Step 3: Start `HiveServer`, if not already running, using following command: + +```text +$HIVE_HOME/bin/hiveserver2 +``` + +or + +```text +$HIVE_HOME/bin/hive --service hiveserver2 +``` + +Step 4: Connect to `HiveServer2` using the hive `beeline` client. For example: + +```text +$ beeline +Beeline version 1.0.1 by Apache Hive +beeline> !connect jdbc:hive2://localhost:10000/default;auth=noSasl +``` + +Step 5: Create a table in Hive. The example creates a table named `weblogs`" + +```text +CREATE TABLE weblogs ( + client_ip STRING, + full_request_date STRING, + day STRING, + month STRING, + month_num INT, + year STRING, + hour STRING, + minute STRING, + second STRING, + timezone STRING, + http_verb STRING, + uri STRING, + http_status_code STRING, + bytes_returned STRING, + referrer STRING, + user_agent STRING) +row format delimited +fields terminated by '\t'; +``` + +Step 6: Load data into the table. + +```text +hadoop fs -cp /weblogs/parse/part-00000 /user/hive/warehouse/weblogs/ +``` + +Step 7: Access your data from Postgres; you can now use the `weblog` table. Once you are connected using psql, follow the below steps: + +```text +-- set the GUC variables appropriately, e.g. : +hdfs_fdw.jvmpath='/home/edb/Projects/hadoop_fdw/jdk1.8.0_111/jre/lib/amd64/server/' +hdfs_fdw.classpath='/usr/local/edbas/lib/postgresql/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' + +-- load extension first time after install +CREATE EXTENSION hdfs_fdw; + +-- create server object +CREATE SERVER hdfs_server + FOREIGN DATA WRAPPER hdfs_fdw + OPTIONS (host '127.0.0.1'); + +-- create user mapping +CREATE USER MAPPING FOR postgres + SERVER hdfs_server OPTIONS (username 'hive_username', password 'hive_password'); + +-- create foreign table +CREATE FOREIGN TABLE weblogs +( + client_ip TEXT, + full_request_date TEXT, + day TEXT, + Month TEXT, + month_num INTEGER, + year TEXT, + hour TEXT, + minute TEXT, + second TEXT, + timezone TEXT, + http_verb TEXT, + uri TEXT, + http_status_code TEXT, + bytes_returned TEXT, + referrer TEXT, + user_agent TEXT +) +SERVER hdfs_server + OPTIONS (dbname 'default', table_name 'weblogs'); + + +-- select from table +postgres=# SELECT DISTINCT client_ip IP, count(*) + FROM weblogs GROUP BY IP HAVING count(*) > 5000 ORDER BY 1; + ip | count +-----------------+------- + 13.53.52.13 | 5494 + 14.323.74.653 | 16194 + 322.6.648.325 | 13242 + 325.87.75.336 | 6500 + 325.87.75.36 | 6498 + 361.631.17.30 | 64979 + 363.652.18.65 | 10561 + 683.615.622.618 | 13505 +(8 rows) + +-- EXPLAIN output showing WHERE clause being pushed down to remote server. +EXPLAIN (VERBOSE, COSTS OFF) SELECT client_ip, full_request_date, uri FROM weblogs WHERE http_status_code = 200; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Foreign Scan on public.weblogs + Output: client_ip, full_request_date, uri + Remote SQL: SELECT client_ip, full_request_date, uri FROM default.weblogs WHERE ((http_status_code = '200')) +(3 rows) +``` + +## Using HDFS FDW with Apache Spark on Top of Hadoop + +Apache Spark is a general purpose distributed computing framework which supports a wide variety of use cases. It provides real time streaming as well as batch processing with speed, ease of use, and sophisticated analytics. Spark does not provide a storage layer as it relies on third party storage providers like Hadoop, HBASE, Cassandra, S3 etc. Spark integrates seamlessly with Hadoop and can process existing data. Spark SQL is 100% compatible with `HiveQL` and can be used as a replacement of `Hiveserver2`, using `Spark Thrift Server`. + +To use HDFS FDW with Apache Spark on top of Hadoop: + +Step 1: Download and install Apache Spark in local mode. + +Step 2: In the folder `$SPARK_HOME/conf` create a file `spark-defaults.conf` containing the following line: + +```text +spark.sql.warehouse.dir hdfs://localhost:9000/user/hive/warehouse +``` + +By default, Spark uses `derby` for both the meta data and the data itself (called a warehouse in Spark). To have Spark use Hadoop as a warehouse, you should add this property. + +Step 3: Start the Spark Thrift Server. + +```text +./start-thriftserver.sh +``` + +Step 4: Make sure the Spark Thrift server is running and writing to a log file. + +Step 5: Create a local file (`names.txt`) that contains the following entries: + +```text +$ cat /tmp/names.txt +1,abcd +2,pqrs +3,wxyz +4,a_b_c +5,p_q_r +, +``` + +Step 6: Connect to Spark Thrift Server2 using the Spark `beeline` client. For example: + +```text +$ beeline +Beeline version 1.2.1.spark2 by Apache Hive +beeline> !connect jdbc:hive2://localhost:10000/default;auth=noSasl org.apache.hive.jdbc.HiveDriver +``` + +Step 7: Prepare the sample data on Spark. Run the following commands in the `beeline` command line tool: + +```text +./beeline +Beeline version 1.2.1.spark2 by Apache Hive +beeline> !connect jdbc:hive2://localhost:10000/default;auth=noSasl org.apache.hive.jdbc.HiveDriver +Connecting to jdbc:hive2://localhost:10000/default;auth=noSasl +Enter password for jdbc:hive2://localhost:10000/default;auth=noSasl: +Connected to: Spark SQL (version 2.1.1) +Driver: Hive JDBC (version 1.2.1.spark2) +Transaction isolation: TRANSACTION_REPEATABLE_READ +0: jdbc:hive2://localhost:10000> create database my_test_db; ++---------+--+ +| Result | ++---------+--+ ++---------+--+ +No rows selected (0.379 seconds) +0: jdbc:hive2://localhost:10000> use my_test_db; ++---------+--+ +| Result | ++---------+--+ ++---------+--+ +No rows selected (0.03 seconds) +0: jdbc:hive2://localhost:10000> create table my_names_tab(a int, name string) + row format delimited fields terminated by ' '; ++---------+--+ +| Result | ++---------+--+ ++---------+--+ +No rows selected (0.11 seconds) +0: jdbc:hive2://localhost:10000> + +0: jdbc:hive2://localhost:10000> load data local inpath '/tmp/names.txt' + into table my_names_tab; ++---------+--+ +| Result | ++---------+--+ ++---------+--+ +No rows selected (0.33 seconds) +0: jdbc:hive2://localhost:10000> select * from my_names_tab; ++-------+---------+--+ +| a | name | ++-------+---------+--+ +| 1 | abcd | +| 2 | pqrs | +| 3 | wxyz | +| 4 | a_b_c | +| 5 | p_q_r | +| NULL | NULL | ++-------+---------+--+ +``` + +The following commands list the corresponding files in Hadoop: + +```text +$ hadoop fs -ls /user/hive/warehouse/ +Found 1 items +drwxrwxrwx - org.apache.hive.jdbc.HiveDriver supergroup 0 2020-06-12 17:03 /user/hive/warehouse/my_test_db.db + +$ hadoop fs -ls /user/hive/warehouse/my_test_db.db/ +Found 1 items +drwxrwxrwx - org.apache.hive.jdbc.HiveDriver supergroup 0 2020-06-12 17:03 /user/hive/warehouse/my_test_db.db/my_names_tab +``` + +Step 8: Access your data from Postgres using psql: + +```text +-- set the GUC variables appropriately, e.g. : +hdfs_fdw.jvmpath='/home/edb/Projects/hadoop_fdw/jdk1.8.0_111/jre/lib/amd64/server/' +hdfs_fdw.classpath='/usr/local/edbas/lib/postgresql/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' + +-- load extension first time after install +CREATE EXTENSION hdfs_fdw; + +-- create server object +CREATE SERVER hdfs_server + FOREIGN DATA WRAPPER hdfs_fdw + OPTIONS (host '127.0.0.1', port '10000', client_type 'spark', auth_type 'NOSASL'); + +-- create user mapping +CREATE USER MAPPING FOR postgres + SERVER hdfs_server OPTIONS (username 'spark_username', password 'spark_password'); + +-- create foreign table +CREATE FOREIGN TABLE f_names_tab( a int, name varchar(255)) SERVER hdfs_svr + OPTIONS (dbname 'testdb', table_name 'my_names_tab'); + +-- select the data from foreign server +select * from f_names_tab; + a | name +---+-------- + 1 | abcd + 2 | pqrs + 3 | wxyz + 4 | a_b_c + 5 | p_q_r + 0 | +(6 rows) + +-- EXPLAIN output showing WHERE clause being pushed down to remote server. +EXPLAIN (verbose, costs off) SELECT name FROM f_names_tab WHERE a > 3; + QUERY PLAN +-------------------------------------------------------------------------- + Foreign Scan on public.f_names_tab + Output: name + Remote SQL: SELECT name FROM my_test_db.my_names_tab WHERE ((a > '3')) +(3 rows) +``` + +!!! Note + The same port was being used while creating foreign server because the Spark Thrift Server is compatible with the Hive Thrift Server. Applications using Hiveserver2 would work with Spark except for the behaviour of the `ANALYZE` command and the connection string in the case of `NOSASL`. We recommend using `ALTER SERVER` and changing the `client_type` option if Hive is to be replaced with Spark. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/10_identifying_data_adapter_version.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/10_identifying_data_adapter_version.mdx new file mode 100644 index 00000000000..fa6e51f1d5c --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/10_identifying_data_adapter_version.mdx @@ -0,0 +1,19 @@ +--- +title: "Identifying the Hadoop Foreign Data Wrapper Version" +--- + + + +The Hadoop Foreign Data Wrapper includes a function that you can use to identify the currently installed version of the `.so` file for the data wrapper. To use the function, connect to the Postgres server, and enter: + +```text +SELECT hdfs_fdw_version(); +``` + +The function returns the version number: + +```text +hdfs_fdw_version +----------------- + +``` diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..2ce7ab1ca46 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx @@ -0,0 +1,29 @@ +--- +title: "Uninstalling the Hadoop Foreign Data Wrapper" +--- + + + +**Uninstalling an RPM Package** + +You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: + +- On RHEL or CentOS 7: + + `yum remove edb-as-hdfs_fdw` + +> where `xx` is the server version number. + +- On RHEL or CentOS 8: + + `dnf remove edb-as-hdfs_fdw` + +> where `xx` is the server version number. + +**Uninstalling Hadoop Foreign Data Wrapper on a Debian or Ubuntu Host** + +- To uninstall Hadoop Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. + + `apt-get remove edb-as-hdfs-fdw` + +> where `xx` is the server version number. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/EDB_logo.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/EDB_logo.png new file mode 100644 index 00000000000..f4a93cf57f5 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/EDB_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07423b012a855204780fe5a2a5a1e33607304a5c3020ae4acbf3d575691dedd6 +size 12136 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/ambari_administrative_interface.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/ambari_administrative_interface.png new file mode 100755 index 00000000000..d44e42a740e --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/ambari_administrative_interface.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4acb08665b6a1df9494f91f9ab64a8f4d0979f61947e19162f419d134e351ea +size 150222 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/edb_logo.svg b/product_docs/docs/hadoop_data_adapter/2.0.8/images/edb_logo.svg new file mode 100644 index 00000000000..74babf2f8da --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/edb_logo.svg @@ -0,0 +1,56 @@ + + + + +logo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/hadoop_distributed_file_system_with_postgres.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/hadoop_distributed_file_system_with_postgres.png new file mode 100755 index 00000000000..ff6e32d8e94 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/hadoop_distributed_file_system_with_postgres.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda731e9f3b5018bda72c52b85737198530d8864d7ed5d57e02bcd2a58b537bc +size 70002 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_complete.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_complete.png new file mode 100755 index 00000000000..311d632a71e --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_complete.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52a4437577b7a64d7f36c4f837b9a0fab90b163b201055bd817f0e3cbaf112a +size 39463 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_wizard_welcome_screen.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_wizard_welcome_screen.png new file mode 100755 index 00000000000..aaf582bc781 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/installation_wizard_welcome_screen.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85ea24919ac97d6f8ebb882da665c22e4d5c0942b8491faa5e07be8b93007b60 +size 38341 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/progress_as_the_servers_restart.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/progress_as_the_servers_restart.png new file mode 100755 index 00000000000..43523c7d1ad --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/progress_as_the_servers_restart.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46a0feaf37642c3aa87fe8267259687dfa9c9571f1c2663297159ef98356e2fd +size 85080 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/restart_the_server.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/restart_the_server.png new file mode 100755 index 00000000000..2518b46d46d --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/restart_the_server.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e612201379d56b4dffcfb4222ceb765532ca5d097504c1dbabdc6a812afaba9 +size 33996 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/setup_wizard_ready.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/setup_wizard_ready.png new file mode 100755 index 00000000000..922e318868d --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/setup_wizard_ready.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ba6a1a88fe8a91b94571b57a36077fce7b3346e850a38f9bf015166ace93e36 +size 16833 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/specify_an_installation_directory.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/specify_an_installation_directory.png new file mode 100755 index 00000000000..208c85c46af --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/specify_an_installation_directory.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dae28ab7f567617da49816514a3fa5eb6161e611c416295cfe2f829cd941f98e +size 20596 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/hadoop_data_adapter/2.0.8/images/the_installation_wizard_welcome_screen.png new file mode 100755 index 00000000000..2da19033b0e --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/images/the_installation_wizard_welcome_screen.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fd52b490dd37c86dca15975a7dbc9bdd47c7ae4ab0912d1bf570d785c521f79 +size 33097 diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx new file mode 100644 index 00000000000..bb911fb9abe --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Hadoop Foreign Data Wrapper Guide" +--- + +The Hadoop Foreign Data Wrapper (`hdfs_fdw`) is a Postgres extension that allows you to access data that resides on a Hadoop file system from EDB Postgres Advanced Server. The foreign data wrapper makes the Hadoop file system a read-only data source that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. + +The Hadoop Foreign Data Wrapper can be installed with an RPM package. You can download an installer from the [EDB website](https://www.enterprisedb.com/software-downloads-postgres/). + +This guide uses the term `Postgres` to refer to an instance of EDB Postgres Advanced Server. + +
+ +whats_new requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter updating_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version uninstalling_the_hadoop_data_adapter conclusion + +
From 1b6d06fb9b5278b4ead2eddc0d0b491437c7e910 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 16 Jun 2021 21:49:48 +0530 Subject: [PATCH 19/50] fixing alignments Former-commit-id: d28b9fe1f5966f263bb9d1214da9c388d35e2881 --- .../2.0.7/02_requirements_overview.mdx | 12 ++++++------ .../05_installing_the_hadoop_data_adapter.mdx | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx index afaf5d7b204..13360808b72 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx @@ -12,14 +12,14 @@ The Hadoop Foreign Data Wrapper is supported on the following platforms: **Linux x86-64** -> - RHEL 8.x and 7.x -> - CentOS 8.x and 7.x -> - OL 8.x and 7.x -> - Ubuntu 20.04 and 18.04 LTS -> - Debian 10.x and 9.x + - RHEL 8.x and 7.x + - CentOS 8.x and 7.x + - OL 8.x and 7.x + - Ubuntu 20.04 and 18.04 LTS + - Debian 10.x and 9.x **Linux on IBM Power8/9 (LE)** -> - RHEL 7.x + - RHEL 7.x The Hadoop Foreign Data Wrapper supports use of the Hadoop file system using a HiveServer2 interface or Apache Spark using the Spark Thrift Server. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx index 854ef80da63..b2146e0b1c4 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx @@ -293,23 +293,23 @@ The following steps will walk you through on using the EDB apt repository to ins On Debian 9 and Ubuntu: - > ```text - > sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - > ``` + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` On Debian 10: 1. Set up the EDB repository: - > ```text - > sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - > ``` + ```text + sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` 1. Substitute your EDB credentials for the `username` and `password` in the following command: - > ```text - > sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - > ``` + ```text + sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` 3. Add support to your system for secure APT repositories: From 1ec7b3af3d527000e7109ed48dfdb830608a8b7f Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Mon, 21 Jun 2021 16:54:24 +0000 Subject: [PATCH 20/50] Deploy to test site Former-commit-id: 428a69d897faf7f906a083a15261d6d8a4fac1cd --- .../deploy-hadoop-da-test-branch.yml | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 .github/workflows/deploy-hadoop-da-test-branch.yml diff --git a/.github/workflows/deploy-hadoop-da-test-branch.yml b/.github/workflows/deploy-hadoop-da-test-branch.yml new file mode 100644 index 00000000000..fe9f390520e --- /dev/null +++ b/.github/workflows/deploy-hadoop-da-test-branch.yml @@ -0,0 +1,60 @@ +name: Deploy Hadoop Data Adapter upcoming release branch to Netlify +on: + push: + branches: + - content/hadoop_data_adapter/2.0.8/upcoming_release +jobs: + build-deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: content/hadoop_data_adapter/2.0.8/upcoming_release + fetch-depth: 0 # fetch whole repo so git-restore-mtime can work + - name: Update submodules + run: git submodule update --init --remote + - name: Adjust file watchers limit + run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p + + - uses: actions/setup-node@v1 + with: + node-version: '14.x' + - name: Install yarn + run: sudo npm -g install yarn + - name: Yarn install + run: yarn install --immutable + env: + NODE_ENV: ${{ secrets.NODE_ENV }} + + - name: Checking Gatsby cache + id: gatsby-cache-build + uses: actions/cache@v2 + with: + path: | + public + .cache + key: ${{ runner.os }}-gatsby-build-develop-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-gatsby-build-develop- + + - name: Fix mtimes + run: yarn fix-mtimes --force + - name: Gatsby build + run: yarn build + env: + APP_ENV: staging + NODE_ENV: ${{ secrets.NODE_ENV }} + NODE_OPTIONS: --max-old-space-size=4096 + ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_INDEX_NAME: edb-docs-staging + INDEX_ON_BUILD: false + + - name: Netlify deploy + run: | + sudo yarn global add netlify-cli + netlify deploy --dir=public --prod + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} + NETLIFY_SITE_ID: ${{ secrets.NETLIFY_WIP1_SITE_ID }} + From 2135350fc33d59cb891523cee491dca81462be55 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Mon, 21 Jun 2021 16:58:32 +0000 Subject: [PATCH 21/50] Deploy to test site Former-commit-id: 78ce3aced68a4eef183a9d57ee87a788a3c88d64 --- .../workflows/deploy-mongo-da-test-branch.yml | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 .github/workflows/deploy-mongo-da-test-branch.yml diff --git a/.github/workflows/deploy-mongo-da-test-branch.yml b/.github/workflows/deploy-mongo-da-test-branch.yml new file mode 100644 index 00000000000..f9d51b47693 --- /dev/null +++ b/.github/workflows/deploy-mongo-da-test-branch.yml @@ -0,0 +1,60 @@ +name: Deploy MongoDB Data Adapter upcoming release branch to Netlify +on: + push: + branches: + - content/mongo-data-adapter/5-2-9/upcoming-release +jobs: + build-deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: content/mongo-data-adapter/5-2-9/upcoming-release + fetch-depth: 0 # fetch whole repo so git-restore-mtime can work + - name: Update submodules + run: git submodule update --init --remote + - name: Adjust file watchers limit + run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p + + - uses: actions/setup-node@v1 + with: + node-version: '14.x' + - name: Install yarn + run: sudo npm -g install yarn + - name: Yarn install + run: yarn install --immutable + env: + NODE_ENV: ${{ secrets.NODE_ENV }} + + - name: Checking Gatsby cache + id: gatsby-cache-build + uses: actions/cache@v2 + with: + path: | + public + .cache + key: ${{ runner.os }}-gatsby-build-develop-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-gatsby-build-develop- + + - name: Fix mtimes + run: yarn fix-mtimes --force + - name: Gatsby build + run: yarn build + env: + APP_ENV: staging + NODE_ENV: ${{ secrets.NODE_ENV }} + NODE_OPTIONS: --max-old-space-size=4096 + ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_INDEX_NAME: edb-docs-staging + INDEX_ON_BUILD: false + + - name: Netlify deploy + run: | + sudo yarn global add netlify-cli + netlify deploy --dir=public --prod + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} + NETLIFY_SITE_ID: ${{ secrets.NETLIFY_WIP2_SITE_ID }} + From 3d270ed6a0fbf6726a32b00aefe41f343f00fd8e Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Mon, 21 Jun 2021 17:31:48 +0000 Subject: [PATCH 22/50] trigger run Former-commit-id: 1bc411c8714cce8fa9c72c0faacd3ff3d0d4d663 --- .github/workflows/deploy-mongo-da-test-branch.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/deploy-mongo-da-test-branch.yml b/.github/workflows/deploy-mongo-da-test-branch.yml index f9d51b47693..bdd5b477603 100644 --- a/.github/workflows/deploy-mongo-da-test-branch.yml +++ b/.github/workflows/deploy-mongo-da-test-branch.yml @@ -57,4 +57,3 @@ jobs: env: NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} NETLIFY_SITE_ID: ${{ secrets.NETLIFY_WIP2_SITE_ID }} - From 8e3b3d4211d1bf29c8cc3bc1f5e23921a0f8c655 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Mon, 21 Jun 2021 18:48:05 +0000 Subject: [PATCH 23/50] poke it again Former-commit-id: 4c1e66ef74a8f6ee7b58a6606cef0ef3d54495d7 --- .github/workflows/deploy-mongo-da-test-branch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-mongo-da-test-branch.yml b/.github/workflows/deploy-mongo-da-test-branch.yml index bdd5b477603..14fa73dc6e7 100644 --- a/.github/workflows/deploy-mongo-da-test-branch.yml +++ b/.github/workflows/deploy-mongo-da-test-branch.yml @@ -1,4 +1,4 @@ -name: Deploy MongoDB Data Adapter upcoming release branch to Netlify +name: Deploy MongoDB FDW upcoming release branch to Netlify on: push: branches: From 16f82ff0767d772c6fd6ec4041eb7fb71fd83d89 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Mon, 21 Jun 2021 18:51:22 +0000 Subject: [PATCH 24/50] The sad bit is, this is at least the 3rd time I've made this exact error Former-commit-id: 2bd1fc03e8e496e31598f172e86bc5e1ca9981e5 --- .github/workflows/deploy-mongo-da-test-branch.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-mongo-da-test-branch.yml b/.github/workflows/deploy-mongo-da-test-branch.yml index 14fa73dc6e7..1e3d6648d0f 100644 --- a/.github/workflows/deploy-mongo-da-test-branch.yml +++ b/.github/workflows/deploy-mongo-da-test-branch.yml @@ -2,14 +2,14 @@ name: Deploy MongoDB FDW upcoming release branch to Netlify on: push: branches: - - content/mongo-data-adapter/5-2-9/upcoming-release + - content/mongo-data-adapter/5.2.9/upcoming-release jobs: build-deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: - ref: content/mongo-data-adapter/5-2-9/upcoming-release + ref: content/mongo-data-adapter/5.2.9/upcoming-release fetch-depth: 0 # fetch whole repo so git-restore-mtime can work - name: Update submodules run: git submodule update --init --remote From fc73e4928e90c6af95d0bb3eb7ab9153f1919ff9 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Mon, 21 Jun 2021 18:53:05 +0000 Subject: [PATCH 25/50] just copy-paste the name Former-commit-id: 1bf94979ed93bb4aff3b37e85b8b22844fdee893 --- .github/workflows/deploy-mongo-da-test-branch.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-mongo-da-test-branch.yml b/.github/workflows/deploy-mongo-da-test-branch.yml index 1e3d6648d0f..f4581292586 100644 --- a/.github/workflows/deploy-mongo-da-test-branch.yml +++ b/.github/workflows/deploy-mongo-da-test-branch.yml @@ -2,14 +2,14 @@ name: Deploy MongoDB FDW upcoming release branch to Netlify on: push: branches: - - content/mongo-data-adapter/5.2.9/upcoming-release + - content/mongo_data_adapter/5.2.9/upcoming_release jobs: build-deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: - ref: content/mongo-data-adapter/5.2.9/upcoming-release + ref: content/mongo_data_adapter/5.2.9/upcoming_release fetch-depth: 0 # fetch whole repo so git-restore-mtime can work - name: Update submodules run: git submodule update --init --remote From d65e3b71eb61e74a6ef550481477963b930f96bd Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Tue, 22 Jun 2021 09:33:58 +0530 Subject: [PATCH 26/50] Delete 01_whats_new.mdx Former-commit-id: 0fc8d40bc37ad4b35e4bedc566a05bed6e27a289 --- .../docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx deleted file mode 100644 index ca79f3e6a32..00000000000 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "What’s New" ---- - - - -The following features are added to create Hadoop Foreign Data Wrapper `2.0.7`: - -- Support for EDB Postgres Advanced Server 13. -- Support for Ubuntu 20.04 LTS platform. From 6ecfe80adef28ea9cb509f5f4c51388afcbf1b17 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Tue, 22 Jun 2021 09:35:35 +0530 Subject: [PATCH 27/50] Update index.mdx Former-commit-id: 8b632f5574abda5579a06ddda9cb25ad74330a5d --- product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx index bb911fb9abe..0467201e53d 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx @@ -10,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-whats_new requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter updating_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version uninstalling_the_hadoop_data_adapter conclusion +requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter updating_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version uninstalling_the_hadoop_data_adapter conclusion
From b715750e2e179357022dfe69c6c4db2cb3f7b82f Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Tue, 22 Jun 2021 10:02:50 +0530 Subject: [PATCH 28/50] Deleted What's New section Former-commit-id: fb99a0dedfa8abe67ddfd790728bf3775cb62636 --- .../docs/mongo_data_adapter/5.2.9/01_whats_new.mdx | 10 ---------- product_docs/docs/mongo_data_adapter/5.2.9/index.mdx | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx deleted file mode 100644 index 864a831e6ff..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "What’s New" ---- - - - -The following features are added to create MongoDB Foreign Data Wrapper `5.2.8`: - -- Support for EDB Postgres Advanced Server 13. -- Support for Ubuntu 20.04 LTS platform. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx index 5117f306aad..ab14a10bd26 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx @@ -10,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion +requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion
From 9362feb8645d57e6c7bf5edbf417ea2fea1bdc8c Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 23 Jun 2021 11:17:10 +0530 Subject: [PATCH 29/50] Update for FDW-348 Former-commit-id: fc2e5fb4d6c9d4e8ed2bfd97c4bb64329ed008f9 --- .../05_updating_the_mongo_data_adapter.mdx | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx index aa29e7403f9..a32e883b0bb 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx @@ -10,21 +10,29 @@ If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you ca - On RHEL or CentOS 7: - > `yum upgrade edb-repo` + `yum upgrade edb-repo` + +- On RHEL or CentOS 7 on PPCLE: + + `yum upgrade edb-repo` - On RHEL or CentOS 8: - > `dnf upgrade edb-repo` + `dnf upgrade edb-repo` yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: - On RHEL or CentOS 7: - > `yum upgrade edb-as-mongo_fdw` + `yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` + +- On RHEL or CentOS 7 on PPCLE: + + `yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` - On RHEL or CentOS 8: - > `dnf upgrade edb-as-mongo_fdw` + `dnf upgrade edb-as-mongo_fdw` where `xx` is the server version number. @@ -32,6 +40,6 @@ yum or dnf will update the `edb.repo` file to enable access to the current EDB r To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: -> `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` -> -> where `xx` is the server version number. + `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` + + where `xx` is the server version number. From 490ec90a4491e285441b37ad5bfcffdef41d9557 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 23 Jun 2021 14:39:57 +0530 Subject: [PATCH 30/50] Including section for PPCLE Former-commit-id: 4940cdfc99dbb9f5fcb166020e4aea79a8abf328 --- .../04_installing_the_mongo_data_adapter.mdx | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx index b1450a6c26c..a0287d8e886 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx @@ -13,6 +13,7 @@ The MongoDB Foreign Data Wrapper can be installed with an RPM package. During th You can install the MongoDB Foreign Data Wrapper using an RPM package on the following platforms: - [RHEL 7](#rhel7) +- [RHEL 7 PPCLE](#rhel7_PPCLE) - [RHEL 8](#rhel8) - [CentOS 7](#centos7) - [CentOS 8](#centos8) @@ -83,6 +84,87 @@ When you install an RPM package that is signed by a source that is not recognize During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + +### On RHEL 7 PPCLE + +Before installing the MySQL Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: +1. Use the following commands to install Advance Toolchain: +```text +rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b + +cat > /etc/yum.repos.d/advance-toolchain.repo < + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install the MySQL Foreign Data Wrapper. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +```text +yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + +```text +[edb] +name=EnterpriseDB RPMs $releasever - $basearch +baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +``` + +**Installing MySQL Foreign Data Wrapper** + +After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: + + ``` + yum install edb-as-mongo_fdw + ``` + +where `xx` is the server version number. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + ### On RHEL 8 From b0dd91c9b1fe05305fb0fdba6c48fb098bc71e86 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 23 Jun 2021 15:51:52 +0530 Subject: [PATCH 31/50] Adding PPCLE section and restructuring the sections Former-commit-id: c518643e69f72fcb17d022f9ec47116a7f5e9b15 --- .../05_installing_the_hadoop_data_adapter.mdx | 319 ++++++++---------- 1 file changed, 137 insertions(+), 182 deletions(-) diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx index 854ef80da63..d6eee389a88 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx @@ -4,7 +4,7 @@ title: "Installing the Hadoop Foreign Data Wrapper" -The Hadoop Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. +The Hadoop Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. If yum encounters a dependency that it cannot resolve, it will provide a list of the required dependencies that you must manually resolve. @@ -12,270 +12,225 @@ The Hadoop Foreign Data Wrapper can be installed with an RPM package. During the You can install the Hadoop Foreign Data Wrapper using an RPM package on the following platforms: +- [RHEL or CentOS 7 PPCLE](#rhel_centos7_PPCLE) - [RHEL 7](#rhel7) - [RHEL 8](#rhel8) - [CentOS 7](#centos7) - [CentOS 8](#centos8) - - -### On RHEL 7 + -Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: +### On RHEL or CentOS 7 PPCLE -Install the `epel-release` package: +1. Use the following command to create a configuration file and install Advance Toolchain: - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` + ```text + rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b + + cat > /etc/yum.repos.d/advance-toolchain.repo < + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -After receiving your repository credentials you can: +4. Install the EPEL repository: -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-hdfs_fdw`. + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` -**Creating a Repository Configuration File** +5. On RHEL 7 PPCLE, enable the additional repositories to resolve EPEL dependencies: -To create the repository configuration file, assume superuser privileges, and invoke the following command: + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` +6. Install the selected package: -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + ```text + dnf install edb-as-hdfs_fdw + ``` -**Modifying the file, providing your user name and password** + where `xx` is the server version number. -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` + -**Installing Hadoop Foreign Data Wrapper** +### On RHEL 7 -After saving your changes to the configuration file, use the following commands to install the Hadoop Foreign Data Wrapper: +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - ``` - yum install edb-as-hdfs_fdw - ``` + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -where `xx` is the server version number. +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. +3. Install the EPEL repository: - + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` -### On RHEL 8 +4. Enable the additional repositories to resolve dependencies: -Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` -Install the `epel-release` package: +5. Install the selected package: - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` + ```text + dnf install edb-as-hdfs_fdw + ``` -Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: + where `xx` is the server version number. - ```text - ARCH=$( /bin/arch ) - subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" - ``` -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - -After receiving your repository credentials you can: + -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-hdfs_fdw`. +### On RHEL 8 -**Creating a Repository Configuration File** +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -To create the repository configuration file, assume superuser privileges, and invoke the following command: +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - ```text - dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. +3. Install the EPEL repository: -**Modifying the file, providing your user name and password** + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. +4. Enable the additional repositories to resolve dependencies: - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` + ```text + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" + ``` -**Installing Hadoop Foreign Data Wrapper** +5. Disable the built-in PostgreSQL module: -After saving your changes to the configuration file, use the below command to install the Hadoop Foreign Data Wrapper: + ```text + dnf -qy module disable postgresql + ``` +6. Install the selected package: + + ```text + dnf install edb-as-hdfs_fdw + ``` - ```text - dnf install edb-as-hdfs_fdw - ``` + where `xx` is the server version number. -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. ### On CentOS 7 -Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -!!! Note - You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - - - -After receiving your repository credentials you can: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-hdfs_fdw`. - -**Creating a Repository Configuration File** - -To create the repository configuration file, assume superuser privileges, and invoke the following command: +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -**Modifying the file, providing your user name and password** + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. +3. Install the EPEL repository: - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` -**Installing Hadoop Foreign Data Wrapper** +4. Install the selected package: -After saving your changes to the configuration file, use the following command to install the Hadoop Foreign Data Wrapper: + ```text + dnf install edb-as-hdfs_fdw + ``` - ```text - yum install edb-as-hdfs_fdw - ``` + where `xx` is the server version number. -where `xx` is the server version number. -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. ### On CentOS 8 -Before installing the Hadoop Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -Enable the `PowerTools` repository: - - ```text - dnf config-manager --set-enabled PowerTools - ``` - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - - - -After receiving your repository credentials you can: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-hdfs_fdw`. - -**Creating a Repository Configuration File** - -To create the repository configuration file, assume superuser privileges, and invoke the following command: +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -**Modifying the file, providing your user name and password** +3. Install the EPEL repository: -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + ```text + dnf -y install epel-release + ``` - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` +4. Enable the additional repositories to resolve dependencies: -**Installing Hadoop Foreign Data Wrapper** + ```text + dnf config-manager --set-enabled PowerTools + ``` -After saving your changes to the configuration file, use the following command to install the Hadoop Foreign Data Wrapper: +5. Disable the built-in PostgreSQL module: - ```text - dnf install edb-as-hdfs_fdw - ``` + ```text + dnf -qy module disable postgresql + ``` +6. Install the selected package: + + ```text + dnf install edb-as-hdfs_fdw + ``` -where `xx` is the server version number. + where `xx` is the server version number. -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. ## Installing the Hadoop Foreign Data Wrapper on a Debian or Ubuntu Host From 6ab82d282476d541b4691a768038c91579270891 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Wed, 23 Jun 2021 15:59:06 +0530 Subject: [PATCH 32/50] Restructured the installation sections Former-commit-id: 8f3642339cf8b6de4368b78c28a68c17b97e8c46 --- .../04_installing_the_mongo_data_adapter.mdx | 399 ++++++------------ 1 file changed, 136 insertions(+), 263 deletions(-) diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx index a0287d8e886..9fe2ab8e364 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx @@ -2,9 +2,10 @@ title: "Installing the MongoDB Foreign Data Wrapper" --- - -The MongoDB Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. + + +The MongoDB Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. If yum encounters a dependency that it cannot resolve, it will provide a list of the required dependencies that you must manually resolve. @@ -12,156 +13,103 @@ The MongoDB Foreign Data Wrapper can be installed with an RPM package. During th You can install the MongoDB Foreign Data Wrapper using an RPM package on the following platforms: +- [RHEL or CentOS 7 PPCLE](#rhel_centos7_PPCLE) - [RHEL 7](#rhel7) -- [RHEL 7 PPCLE](#rhel7_PPCLE) - [RHEL 8](#rhel8) - [CentOS 7](#centos7) - [CentOS 8](#centos8) - - -### On RHEL 7 - -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` + -Enable the optional, extras, and HA repositories: +### On RHEL or CentOS 7 PPCLE - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` +1. Use the following command to create a configuration file and install Advance Toolchain: -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + ```text + rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b - + cat > /etc/yum.repos.d/advance-toolchain.repo <-mongo_fdw`. + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -**Creating a Repository Configuration File** +3. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -To create the repository configuration file, assume superuser privileges, and invoke the following command: + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` +4. Install the EPEL repository: -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` -**Modifying the file to provide your user name and password** - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ``` - yum install edb-as-mongo_fdw - ``` - -where `xx` is the server version number. +5. On RHEL 7 PPCLE, enable the additional repositories to resolve EPEL dependencies: -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. +6. Install the selected package: - + ```text + dnf install edb-as-mongo_fdw + ``` -### On RHEL 7 PPCLE + where `xx` is the server version number. -Before installing the MySQL Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: -1. Use the following commands to install Advance Toolchain: -```text -rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b -cat > /etc/yum.repos.d/advance-toolchain.repo < - -After receiving your repository credentials you can: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install the MySQL Foreign Data Wrapper. + -**Creating a Repository Configuration File** +### On RHEL 7 -To create the repository configuration file, assume superuser privileges, and invoke the following command: +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -**Modifying the file, providing your user name and password** + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. +3. Install the EPEL repository: -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` -**Installing MySQL Foreign Data Wrapper** +4. Enable the additional repositories to resolve dependencies: -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: + ```text + subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" + ``` - ``` - yum install edb-as-mongo_fdw - ``` +5. Install the selected package: -where `xx` is the server version number. + ```text + dnf install edb-as-mongo_fdw + ``` -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + where `xx` is the server version number. -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. @@ -169,195 +117,120 @@ During the installation, yum may encounter a dependency that it cannot resolve. ### On RHEL 8 -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` - ```text - ARCH=$( /bin/arch ) - subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" - ``` +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` - +3. Install the EPEL repository: -After receiving your repository credentials: + ```text + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. +4. Enable the additional repositories to resolve dependencies: -**Creating a Repository Configuration File** + ```text + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" + ``` -To create the repository configuration file, assume superuser privileges, and invoke the following command: +5. Disable the built-in PostgreSQL module: - ```text - dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` + ```text + dnf -qy module disable postgresql + ``` +6. Install the selected package: + + ```text + dnf install edb-as-mongo_fdw + ``` -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + where `xx` is the server version number. -**Modifying the file to provide your user name and password** -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ```text - dnf install edb-as-mongo_fdw - ``` - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. ### On CentOS 7 -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -!!! Note - You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` - +3. Install the EPEL repository: -After receiving your repository credentials you can: + ```text + yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + ``` -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. +4. Install the selected package: -**Creating a Repository Configuration File** + ```text + dnf install edb-as-mongo_fdw + ``` -To create the repository configuration file, assume superuser privileges, and invoke the following command: + where `xx` is the server version number. - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. -**Modifying the file to provide your user name and password** - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ```text - yum install edb-as-mongo_fdw - ``` - -where `xx` is the server version number. - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. ### On CentOS 8 -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -Enable the `PowerTools` repository: - - ```text - dnf config-manager --set-enabled PowerTools - ``` - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +1. To create the repository configuration file, assume superuser privileges, and invoke the following command: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` -After receiving your repository credentials: +2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. + ```text + sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo + ``` -**Creating a Repository Configuration File** +3. Install the EPEL repository: -To create the repository configuration file, assume superuser privileges, and invoke the following command: + ```text + dnf -y install epel-release + ``` - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` +4. Enable the additional repositories to resolve dependencies: -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + ```text + dnf config-manager --set-enabled PowerTools + ``` -**Modifying the file to provide your user name and password** +5. Disable the built-in PostgreSQL module: -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ```text - dnf install edb-as-mongo_fdw - ``` - -where `xx` is the server version number. + ```text + dnf -qy module disable postgresql + ``` +6. Install the selected package: + + ```text + dnf install edb-as-mongo_fdw + ``` -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + where `xx` is the server version number. -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. ## Installing the MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host From 22414f9c3dfeb61a3a401c0cd95a7b115c7141e0 Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Thu, 24 Jun 2021 16:20:43 +0530 Subject: [PATCH 33/50] What's New Former-commit-id: 8d98ecce0df29e83e07025439269b6e9ea6c5d46 --- product_docs/docs/pgpool/1.0/index.mdx | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/index.mdx b/product_docs/docs/pgpool/1.0/index.mdx index a820cf3d9c7..6213d274955 100644 --- a/product_docs/docs/pgpool/1.0/index.mdx +++ b/product_docs/docs/pgpool/1.0/index.mdx @@ -37,8 +37,20 @@ The term Postgres refers to either PostgreSQL or EDB Postgres Advanced Server. **What's New** -- Upstream merge with Pgpool-II [4.2.3](https://www.pgpool.net/docs/42/en/html/release-4-2-3.html). -- 4.2 is a major release. For more details on migrating from earlier versions to version 4.2, see the [Migration Section](https://www.pgpool.net/docs/42/en/html/release-4-2-0.html#AEN10359). +This release contains a merge with upstream, which includes the following bug fixes: + +- Fix race condition between detach_false_primary and follow_primary_command. +- Fix broken database/app redirect preference in statement level load balancing mode. +- Fix pgpool crash when query cache enabled for non-streaming and logical replication mode. +- Fix query cache not being created in other than streaming and logical replication mode. +- Fix scenario where no primary node is found when detach_false_primary and follow_primary_command operation are running concurrently +- Fix hang when using asyncpg (Python frontend driver with asynchronous I/O) +- Enhance debug message upon receiving startup packet. Now it will print all the GUC variables in the log instead of just username, database name and application name. It will help clients to see why cached connections are not used. + +For more information, please refer to the [Upstream release notes](https://www.pgpool.net/docs/42/en/html/release-4-2-3.html). + +!!! Note + 4.2 is a major release. For more details on migrating from earlier versions to version 4.2, see the [Migration Section](https://www.pgpool.net/docs/42/en/html/release-4-2-0.html#AEN10359).
From b5c247e62a2cb759441c3dff0c7ca0d92d327c99 Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Thu, 24 Jun 2021 11:40:28 +0000 Subject: [PATCH 34/50] New PDFs generated by Github Actions Former-commit-id: 4ac69df447f044253caf183187308b77391149c5 From 1125400c4fbe0b999f2b3605bfa001ad3ec5b022 Mon Sep 17 00:00:00 2001 From: sheetal Date: Thu, 24 Jun 2021 19:02:28 +0530 Subject: [PATCH 35/50] Changed reference of OEL to OL Former-commit-id: eb773f81860ba5c2bf7c9e474190832215918088 --- .../docs/epas/12/epas_inst_linux/02_supported_platforms.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/product_docs/docs/epas/12/epas_inst_linux/02_supported_platforms.mdx b/product_docs/docs/epas/12/epas_inst_linux/02_supported_platforms.mdx index 920cc3e423d..f7d142b2cc9 100644 --- a/product_docs/docs/epas/12/epas_inst_linux/02_supported_platforms.mdx +++ b/product_docs/docs/epas/12/epas_inst_linux/02_supported_platforms.mdx @@ -13,11 +13,11 @@ For information about the platforms and versions supported by Advanced Server, v !!! Note - Advanced Server is no longer supported on RHEL/CentOS/OEL 6.x platforms. It is strongly recommended that EDB products running on these platforms be migrated to a supported platform. + Advanced Server is no longer supported on RHEL/CentOS/OL 6.x platforms. It is strongly recommended that EDB products running on these platforms be migrated to a supported platform. **Limitations** The following limitations apply to EDB Postgres Advanced Server: - The `data` directory of a production database should not be stored on an NFS file system. -- The LLVM JIT package is supported on RHEL or CentOS 7.x and 8.x only. LLVM JIT is not supported on PPC-LE 64 running RHEL or CentOS 7.x or 8.x. +- The LLVM JIT package is supported on RHEL or CentOS 7.x, 8.x, and SLES. LLVM JIT is not supported on PPC-LE 64 running RHEL or CentOS 7.x or 8.x. From 57e0bc983742f5659d5257f4465003c05cdc6860 Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Thu, 24 Jun 2021 13:57:19 +0000 Subject: [PATCH 36/50] New PDFs generated by Github Actions Former-commit-id: 68a8261f6fa77d6090cb44c1ce19f4cba0dbad7c From 436aebcfe21f0fcbee1444971afc9027dd59179f Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Thu, 24 Jun 2021 15:17:53 -0600 Subject: [PATCH 37/50] Fix two out-of-date URLs for BART 2.5.2 Former-commit-id: 477db584655856bfa2e985c3413a0308c0d70d70 --- src/constants/product-stubs.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/constants/product-stubs.js b/src/constants/product-stubs.js index a37ed3103a8..4a88add3313 100644 --- a/src/constants/product-stubs.js +++ b/src/constants/product-stubs.js @@ -351,7 +351,7 @@ export const productStubs = { href: 'https://www.enterprisedb.com/edb-docs/d/edb-backup-and-recovery-tool/reference/reference-guide/2.5.2/index.html', pdf: - 'https://www.enterprisedb.com/edb-docs/static/docs/bart/2.5.2/edb_bart_ref.pdf', + 'https://www.enterprisedb.com/edb-docs/static/docs/bart/2.5.2/EDB_Postgres_Backup_and_Recovery_Reference_Guide.pdf', }, ], }, @@ -364,7 +364,7 @@ export const productStubs = { href: 'https://www.enterprisedb.com/edb-docs/d/edb-backup-and-recovery-tool/user-guides/backup-recovery-guide/2.5.2/index.html', pdf: - 'https://www.enterprisedb.com/edb-docs/static/docs/bart/2.5.2/edb_bart_user.pdf', + 'https://www.enterprisedb.com/edb-docs/static/docs/bart/2.5.2/EDB_Postgres_Backup_and_Recovery_User_Guide.pdf', }, ], }, From fd4ab32340659dc0b85710e404588302c2fd4654 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 12:08:14 +0530 Subject: [PATCH 38/50] Update 05_installing_the_hadoop_data_adapter.mdx Former-commit-id: b29504f176c7cff9782aa6fc04befc8d21957729 --- .../2.0.8/05_installing_the_hadoop_data_adapter.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx index d6eee389a88..9ec328fdbf5 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/05_installing_the_hadoop_data_adapter.mdx @@ -70,7 +70,7 @@ You can install the Hadoop Foreign Data Wrapper using an RPM package on the foll dnf install edb-as-hdfs_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -107,7 +107,7 @@ You can install the Hadoop Foreign Data Wrapper using an RPM package on the foll dnf install edb-as-hdfs_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -151,7 +151,7 @@ You can install the Hadoop Foreign Data Wrapper using an RPM package on the foll dnf install edb-as-hdfs_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -183,7 +183,7 @@ You can install the Hadoop Foreign Data Wrapper using an RPM package on the foll dnf install edb-as-hdfs_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -228,7 +228,7 @@ You can install the Hadoop Foreign Data Wrapper using an RPM package on the foll dnf install edb-as-hdfs_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -291,4 +291,4 @@ The following steps will walk you through on using the EDB apt repository to ins apt-get install edb-as-hdfs-fdw ``` -where `xx` is the server version number. + where `xx` is the server version number. From bbc804bfc9beb179356236f7f2ebd1c7df7b7667 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 12:54:12 +0530 Subject: [PATCH 39/50] Adding What's new Former-commit-id: ce5f1e592a09184c0540fd89b833b3740fe7f617 --- .../docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx | 11 +++++++++++ product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx new file mode 100644 index 00000000000..17bebc9e2f4 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx @@ -0,0 +1,11 @@ +--- +title: "What’s New" +--- + + + +The following features are added to create Hadoop Foreign Data Wrapper `2.0.8`: + +- Support for Hadoop version 3.2.x +- Support for Hive version 3.1.x +- Support for Spark version 3.0.x. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx index 0467201e53d..bb911fb9abe 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/index.mdx @@ -10,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter updating_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version uninstalling_the_hadoop_data_adapter conclusion +whats_new requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter updating_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version uninstalling_the_hadoop_data_adapter conclusion
From 2313850b371561037328354abcc29a8702ca47c5 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 14:02:37 +0530 Subject: [PATCH 40/50] Fixing review comments Former-commit-id: 08e0c14ca493e1e039c527fa3f05e29b828ff6ae --- .../06_updating_the_hadoop_data_adapter.mdx | 18 +- ...08_configuring_the_hadoop_data_adapter.mdx | 174 +++++++++--------- .../09_using_the_hadoop_data_adapter.mdx | 4 +- ...1_uninstalling_the_hadoop_data_adapter.mdx | 4 +- 4 files changed, 100 insertions(+), 100 deletions(-) diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx index 17040838963..400a0b649a0 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/06_updating_the_hadoop_data_adapter.mdx @@ -4,36 +4,36 @@ title: "Updating the Hadoop Foreign Data Wrapper" -**Updating an RPM Installation** +## Updating an RPM Installation If you have an existing RPM installation of Hadoop Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: - On RHEL or CentOS 7: - > `yum upgrade edb-repo` + `yum upgrade edb-repo` - On RHEL or CentOS 8: - > `dnf upgrade edb-repo` + `dnf upgrade edb-repo` yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: - On RHEL or CentOS 7: - > `yum upgrade edb-as-hdfs_fdw` + `yum upgrade edb-as-hdfs_fdw` where `xx` is the server version number. - On RHEL or CentOS 8: - > `dnf upgrade edb-as-hdfs_fdw` + `dnf upgrade edb-as-hdfs_fdw` where `xx` is the server version number. -**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** +## Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: -> `apt-get --only-upgrade install edb-as-hdfs-fdw` -> -> where `xx` is the server version number. + `apt-get --only-upgrade install edb-as-hdfs-fdw` + + where `xx` is the server version number. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx index d2462cf1e8a..4a560503398 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/08_configuring_the_hadoop_data_adapter.mdx @@ -8,30 +8,30 @@ Before creating the extension and the database objects that use the extension, y After installing Postgres, modify the `postgresql.conf` located in: -> `/var/lib/edb/as_version/data` + `/var/lib/edb/as_version/data` Modify the configuration file with your editor of choice, adding the `hdfs_fdw.jvmpath` parameter to the end of the configuration file, and setting the value to specify the location of the Java virtual machine (`libjvm.so`). Set the value of `hdfs_fdw.classpath` to indicate the location of the java class files used by the adapter; use a colon (:) as a delimiter between each path. For example: -> ```text -> hdfs_fdw.classpath= -> '/usr/edb/as12/lib/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' -> ``` -> -> !!! Note -> The jar files (hive-jdbc-1.0.1-standalone.jar and hadoop-common-2.6.4.jar) mentioned in the above example should be copied from respective Hive and Hadoop sources or website to PostgreSQL instance where Hadoop Foreign Data Wrapper is installed. -> -> If you are using EDB Advanced Server and have a `DATE` column in your database, you must set `edb_redwood_date = OFF` in the `postgresql.conf` file. + ```text + hdfs_fdw.classpath= + '/usr/edb/as12/lib/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' + ``` + + !!! Note + The jar files (hive-jdbc-1.0.1-standalone.jar and hadoop-common-2.6.4.jar) mentioned in the above example should be copied from respective Hive and Hadoop sources or website to PostgreSQL instance where Hadoop Foreign Data Wrapper is installed. + + If you are using EDB Advanced Server and have a `DATE` column in your database, you must set `edb_redwood_date = OFF` in the `postgresql.conf` file. After setting the parameter values, restart the Postgres server. For detailed information about controlling the service on an Advanced Server host, see the EDB Postgres Advanced Server Installation Guide, available at: -> + Before using the Hadoop Foreign Data Wrapper, you must: -> 1. Use the [CREATE EXTENSION](#create-extension) command to create the extension on the Postgres host. -> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the Hadoop file system. -> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. -> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Advanced Server database that corresponds to a database that resides on the Hadoop cluster. + 1. Use the [CREATE EXTENSION](#create-extension) command to create the extension on the Postgres host. + 2. Use the [CREATE SERVER](#create-server) command to define a connection to the Hadoop file system. + 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. + 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Advanced Server database that corresponds to a database that resides on the Hadoop cluster. @@ -47,21 +47,21 @@ CREATE EXTENSION [IF NOT EXISTS] hdfs_fdw [WITH] [SCHEMA schema_name]; `IF NOT EXISTS` -> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. + Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. `schema_name` -> Optionally specify the name of the schema in which to install the extension's objects. + Optionally specify the name of the schema in which to install the extension's objects. **Example** The following command installs the `hdfs_fdw` hadoop foreign data wrapper: -> `CREATE EXTENSION hdfs_fdw;` + `CREATE EXTENSION hdfs_fdw;` For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: -> . + . @@ -80,15 +80,15 @@ The role that defines the server is the owner of the server; use the `ALTER SERV `server_name` -> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. + Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. `FOREIGN_DATA_WRAPPER` -> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `hdfs_fdw` foreign data wrapper when connecting to the cluster. + Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `hdfs_fdw` foreign data wrapper when connecting to the cluster. `OPTIONS` -> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: + Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: | Option | Description | | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -114,7 +114,7 @@ The foreign server uses the default port (`10000`) for the connection to the cli For more information about using the `CREATE SERVER` command, see: -> + @@ -135,27 +135,27 @@ Please note: the Hadoop Foreign Data Wrapper supports NOSASL and LDAP authentica `role_name` -> Use `role_name` to specify the role that will be associated with the foreign server. + Use `role_name` to specify the role that will be associated with the foreign server. `server_name` -> Use `server_name` to specify the name of the server that defines a connection to the Hadoop cluster. + Use `server_name` to specify the name of the server that defines a connection to the Hadoop cluster. `OPTIONS` -> Use the `OPTIONS` clause to specify connection information for the foreign server. If you are using LDAP authentication, provide a: -> -> `username`: the name of the user on the LDAP server. -> -> `password`: the password associated with the username. -> -> If you do not provide a user name and password, the data wrapper will use NOSASL authentication. + Use the `OPTIONS` clause to specify connection information for the foreign server. If you are using LDAP authentication, provide a: + + `username`: the name of the user on the LDAP server. + + `password`: the password associated with the username. + + If you do not provide a user name and password, the data wrapper will use NOSASL authentication. **Example** The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `hdfs_server`: -> `CREATE USER MAPPING FOR enterprisedb SERVER hdfs_server;` + `CREATE USER MAPPING FOR enterprisedb SERVER hdfs_server;` If the database host uses LDAP authentication, provide connection credentials when creating the user mapping: @@ -167,7 +167,7 @@ The command creates a user mapping for a role named `enterprisedb` that is assoc For detailed information about the `CREATE USER MAPPING` command, see: -> + @@ -202,57 +202,57 @@ and `table_constraint` is: `table_name` -> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. + Specify the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. `IF NOT EXISTS` -> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. + Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. `column_name` -> Specifies the name of a column in the new table; each column should correspond to a column described on the Hive or Spark server. + Specifies the name of a column in the new table; each column should correspond to a column described on the Hive or Spark server. `data_type` -> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the Hive or Spark server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the Hive or Spark server. If the server cannot identify a compatible data type, it will return an error. + Specify the data type of the column; when possible, specify the same data type for each column on the Postgres server and the Hive or Spark server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the Hive or Spark server. If the server cannot identify a compatible data type, it will return an error. `COLLATE collation` -> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. + Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. `INHERITS (parent_table [, ... ])` -> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. + Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. `CONSTRAINT constraint_name` -> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. + Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. `NOT NULL` -> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. + Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. `NULL` -> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. + Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. `CHECK (expr) [NO INHERIT]` -> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. -> -> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. -> -> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. + Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. + + A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. + + Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. `DEFAULT default_expr` -> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. + Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. `SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` -> To create a foreign table that will allow you to query a table that resides on a Hadoop file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the Hadoop data adapter. -> -> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: + To create a foreign table that will allow you to query a table that resides on a Hadoop file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the Hadoop data adapter. + + Use the `OPTIONS` clause to specify the following `options` and their corresponding values: | option | value | | ---------- | --------------------------------------------------------------------------------------- | @@ -315,7 +315,7 @@ Include the `SERVER` clause to specify the name of the database stored on the Ha For more information about using the `CREATE FOREIGN TABLE` command, see: -> + ### Data Type Mappings @@ -349,29 +349,29 @@ DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. `name` -> Specify the name of the installed extension. It is optional. -> -> `CASCADE` -> -> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. -> -> `RESTRICT` -> -> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + Specify the name of the installed extension. It is optional. + + `CASCADE` + + Automatically drop objects that depend on the extension. It drops all the other dependent objects too. + + `RESTRICT` + + Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. **Example** The following command removes the extension from the existing database: -> `DROP EXTENSION hdfs_fdw;` + `DROP EXTENSION hdfs_fdw;` For more information about using the foreign data wrapper `DROP EXTENSION` command, see: -> . + . ## DROP SERVER @@ -387,29 +387,29 @@ The role that drops the server is the owner of the server; use the `ALTER SERVER `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. `name` -> Specify the name of the installed server. It is optional. -> -> `CASCADE` -> -> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. -> -> `RESTRICT` -> -> Do not allow to drop the server if any objects are dependent on it. + Specify the name of the installed server. It is optional. + + `CASCADE` + + Automatically drop objects that depend on the server. It should drop all the other dependent objects too. + + `RESTRICT` + + Do not allow to drop the server if any objects are dependent on it. **Example** The following command removes a foreign server named `hdfs_server`: -> `DROP SERVER hdfs_server;` + `DROP SERVER hdfs_server;` For more information about using the `DROP SERVER` command, see: -> + ## DROP USER MAPPING @@ -423,25 +423,25 @@ DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. `user_name` -> Specify the user name of the mapping. + Specify the user name of the mapping. `server_name` -> Specify the name of the server that defines a connection to the Hadoop cluster. + Specify the name of the server that defines a connection to the Hadoop cluster. **Example** The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `hdfs_server`: -> `DROP USER MAPPING FOR enterprisedb SERVER hdfs_server;` + `DROP USER MAPPING FOR enterprisedb SERVER hdfs_server;` For detailed information about the `DROP USER MAPPING` command, see: -> + ## DROP FOREIGN TABLE @@ -455,19 +455,19 @@ DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. `name` -> Specify the name of the foreign table. + Specify the name of the foreign table. `CASCADE` -> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. `RESTRICT` -> Do not allow to drop foreign table if any objects are dependent on it. + Do not allow to drop foreign table if any objects are dependent on it. **Example** @@ -477,4 +477,4 @@ DROP FOREIGN TABLE warehouse; For more information about using the `DROP FOREIGN TABLE` command, see: -> + diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx index c67ba31ead7..3a97f15e7be 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/09_using_the_hadoop_data_adapter.mdx @@ -147,7 +147,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT client_ip, full_request_date, uri FROM weblo ## Using HDFS FDW with Apache Spark on Top of Hadoop -Apache Spark is a general purpose distributed computing framework which supports a wide variety of use cases. It provides real time streaming as well as batch processing with speed, ease of use, and sophisticated analytics. Spark does not provide a storage layer as it relies on third party storage providers like Hadoop, HBASE, Cassandra, S3 etc. Spark integrates seamlessly with Hadoop and can process existing data. Spark SQL is 100% compatible with `HiveQL` and can be used as a replacement of `Hiveserver2`, using `Spark Thrift Server`. +Apache Spark is a general purpose distributed computing framework which supports a wide variety of use cases. It provides real time streaming as well as batch processing with speed, ease of use, and sophisticated analytics. Spark does not provide a storage layer as it relies on third party storage providers like Hadoop, HBASE, Cassandra, S3, and so on. Spark integrates seamlessly with Hadoop and can process existing data. Spark SQL is 100% compatible with `HiveQL` and can be used as a replacement of `Hiveserver2`, using `Spark Thrift Server`. To use HDFS FDW with Apache Spark on top of Hadoop: @@ -299,4 +299,4 @@ EXPLAIN (verbose, costs off) SELECT name FROM f_names_tab WHERE a > 3; ``` !!! Note - The same port was being used while creating foreign server because the Spark Thrift Server is compatible with the Hive Thrift Server. Applications using Hiveserver2 would work with Spark except for the behaviour of the `ANALYZE` command and the connection string in the case of `NOSASL`. We recommend using `ALTER SERVER` and changing the `client_type` option if Hive is to be replaced with Spark. + This example uses the same port while creating foreign server because the Spark Thrift Server is compatible with the Hive Thrift Server. Applications using Hiveserver2 would work with Spark except for the behaviour of the `ANALYZE` command and the connection string in the case of `NOSASL`. We recommend using `ALTER SERVER` and changing the `client_type` option if Hive is to be replaced with Spark. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx index 2ce7ab1ca46..5264a389c50 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/11_uninstalling_the_hadoop_data_adapter.mdx @@ -4,7 +4,7 @@ title: "Uninstalling the Hadoop Foreign Data Wrapper" -**Uninstalling an RPM Package** +## Uninstalling an RPM Package You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: @@ -20,7 +20,7 @@ You can use the `yum remove` or `dnf remove` command to remove a package install > where `xx` is the server version number. -**Uninstalling Hadoop Foreign Data Wrapper on a Debian or Ubuntu Host** +## Uninstalling Hadoop Foreign Data Wrapper on a Debian or Ubuntu Host - To uninstall Hadoop Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. From bf15ec2e532a5fcdff0d735a238d9aa99b5e7b7f Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 15:39:20 +0530 Subject: [PATCH 41/50] Fixing review comments Former-commit-id: 4e9bc30ea69632955685151a5efd73dde0f8d707 --- .../mongo_data_adapter/5.2.9/01_whats_new.mdx | 10 ++ .../04_installing_the_mongo_data_adapter.mdx | 10 +- .../05_updating_the_mongo_data_adapter.mdx | 4 +- .../5.2.9/06_features_of_mongo_fdw.mdx | 24 +-- .../07_configuring_the_mongo_data_adapter.mdx | 158 +++++++++--------- ...11_uninstalling_the_mongo_data_adapter.mdx | 4 +- 6 files changed, 109 insertions(+), 101 deletions(-) create mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx new file mode 100644 index 00000000000..3ba10216617 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx @@ -0,0 +1,10 @@ +--- +title: "What’s New" +--- + + + +The following features are added to create MongoDB Foreign Data Wrapper `5.2.9`: + +- Updated mongo-c-driver to 1.17.3 +- Updated json-c to 0.15 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx index 9fe2ab8e364..5f94de4e76f 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx @@ -71,7 +71,7 @@ You can install the MongoDB Foreign Data Wrapper using an RPM package on the fol dnf install edb-as-mongo_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -108,7 +108,7 @@ You can install the MongoDB Foreign Data Wrapper using an RPM package on the fol dnf install edb-as-mongo_fdw ``` - where `xx` is the server version number. + where `xx` is the server version number. @@ -184,9 +184,7 @@ You can install the MongoDB Foreign Data Wrapper using an RPM package on the fol dnf install edb-as-mongo_fdw ``` - where `xx` is the server version number. - - + where `xx` is the server version number. @@ -290,4 +288,4 @@ The following steps will walk you through using the EDB apt repository to instal apt-get install edb-as-mongo-fdw ``` -where `xx` is the server version number. + where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx index a32e883b0bb..63a59b8c5ef 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx @@ -4,7 +4,7 @@ title: "Updating the MongoDB Foreign Data Wrapper" -**Updating an RPM Installation** +## Updating an RPM Installation If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: @@ -36,7 +36,7 @@ yum or dnf will update the `edb.repo` file to enable access to the current EDB r where `xx` is the server version number. -**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** +## Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx index aec81845037..972cdcb480d 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx @@ -8,38 +8,38 @@ The key features of the MongoDB Foreign Data Wrapper are listed below: ## Writable FDW -The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. See also: +The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. -[Example: Using the MongoDB Foreign Data Wrapper](08_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) +See also: -[Data Type Mappings](07_configuring_the_mongo_data_adapter/#data-type-mappings) +- [Example: Using the MongoDB Foreign Data Wrapper](08_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) -## Where Clause Push-down +- [Data Type Mappings](07_configuring_the_mongo_data_adapter/#data-type-mappings) -MongoDB Foreign Data Wrapper allows the push-down of `WHERE` clause only when clauses include comparison expressions that have a column and a constant as arguments. WHERE clause push-down is not supported where constant is an array. +## WHERE Clause Push-down + +MongoDB Foreign Data Wrapper allows the push-down of the `WHERE` clause only when clauses include the comparison expressions that have a column and a constant as arguments. `WHERE` clause push-down is not supported where the constant is an array. ## Connection Pooling -Mongo_FDW establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. +The MongoDB Foreign Data Wrapper establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. ## Automated Cleanup The MongoDB Foreign Data Wrapper allows the cleanup of foreign tables in a single operation using the `DROP EXTENSION` command. This feature is especially useful when a foreign table has been created for a temporary purpose. The syntax of a `DROP EXTENSION` command is: -> `DROP EXTENSION mongo_fdw CASCADE;` + `DROP EXTENSION mongo_fdw CASCADE;` For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). ## Full Document Retrieval -This feature allows to retrieve documents along with all their fields from collection without any knowledge of the fields in BSON document available in MongoDB's collection. Those retrieved documents are in the JSON format. +This feature allows you to retrieve documents along with all their fields from collection without any knowledge of the fields in the BSON document available in MongoDB's collection. Those retrieved documents are in JSON format. You can retrieve all available fields in a collection residing in MongoDB Foreign Data Wrapper as explained in the following example: **Example**: -The collection in MongoDB Foreign Data Wrapper: - ```text > db.warehouse.find(); { "_id" : ObjectId("58a1ebbaf543ec0b90545859"), "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : ISODate("2014-12-12T07:12:10Z") } @@ -48,13 +48,13 @@ The collection in MongoDB Foreign Data Wrapper: Steps for retrieving the document: -1. Create foreign table with a column name `__doc`. The type of the column could be json, jsonb, text or varchar. +1. Create foreign table with a column name `__doc`. The type of the column could be json, jsonb, text, or varchar. ```text CREATE FOREIGN TABLE test_json(__doc json) SERVER mongo_server OPTIONS (database 'testdb', collection 'warehouse'); ``` -1. Retrieve the document. +2. Retrieve the document. ```text SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx index b04ccf9e345..2a549faa7d5 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx @@ -6,10 +6,10 @@ title: "Configuring the MongoDB Foreign Data Wrapper" Before using the MongoDB Foreign Data Wrapper, you must: -> 1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. -> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. -> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. -> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. + 1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. + 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. + 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. + 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. @@ -25,21 +25,21 @@ CREATE EXTENSION [IF NOT EXISTS] mongo_fdw [WITH] [SCHEMA schema_name]; `IF NOT EXISTS` -> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. + Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. `schema_name` -> Optionally specify the name of the schema in which to install the extension's objects. + Optionally specify the name of the schema in which to install the extension's objects. **Example** The following command installs the MongoDB foreign data wrapper: -> `CREATE EXTENSION mongo_fdw;` + `CREATE EXTENSION mongo_fdw;` For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: -> . + . @@ -58,15 +58,15 @@ The role that defines the server is the owner of the server; use the `ALTER SERV `server_name` -> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. + Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. `FOREIGN_DATA_WRAPPER` -> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. + Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. `OPTIONS` -> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: + Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: | **Option** | **Description** | | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -74,12 +74,12 @@ The role that defines the server is the owner of the server; use the `ALTER SERV | port | The port number of the Mongo Server. Valid range is 0 to 65535. The default value is `27017`. | | authentication_database | The database against which user will be authenticated. This option is only valid with password based authentication. | | ssl | Requests an authenticated, encrypted SSL connection. By default, the value is set to `false`. Set the value to `true` to enable ssl. See to understand the options. | -| pem_file | SSL option | +| pem_file | SSL option. | | pem_pwd | SSL option. | -| ca_file | SSL option | -| ca_dir | SSL option | -| crl_file | SSL option | -| weak_cert_validation | SSL option | +| ca_file | SSL option. | +| ca_dir | SSL option. | +| crl_file | SSL option. | +| weak_cert_validation | SSL option. | **Example** @@ -93,7 +93,7 @@ The foreign server uses the default port (`27017`) for the connection to the cli For more information about using the `CREATE SERVER` command, see: -> + @@ -112,25 +112,25 @@ You must be the owner of the foreign server to create a user mapping for that se `role_name` -> Use `role_name` to specify the role that will be associated with the foreign server. + Use `role_name` to specify the role that will be associated with the foreign server. `server_name` -> Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. + Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. `OPTIONS` -> Use the `OPTIONS` clause to specify connection information for the foreign server. -> -> `username`: the name of the user on the MongoDB server. -> -> `password`: the password associated with the username. + Use the `OPTIONS` clause to specify connection information for the foreign server. + + `username`: the name of the user on the MongoDB server. + + `password`: the password associated with the username. **Example** The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: -> `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` + `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` If the database host uses secure authentication, provide connection credentials when creating the user mapping: @@ -142,7 +142,7 @@ The command creates a user mapping for a role named `enterprisedb` that is assoc For detailed information about the `CREATE USER MAPPING` command, see: -> + @@ -177,57 +177,57 @@ and `table_constraint` is: `table_name` -> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. + Specify the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. `IF NOT EXISTS` -> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. + Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. `column_name` -> Specifies the name of a column in the new table; each column should correspond to a column described on the MongoDB server. + Specify the name of a column in the new table; each column should correspond to a column described on the MongoDB server. `data_type` -> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. + Specify the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. `COLLATE collation` -> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. + Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. `INHERITS (parent_table [, ... ])` -> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. + Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. `CONSTRAINT constraint_name` -> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. + Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. `NOT NULL` -> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. + Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. `NULL` -> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. + Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. `CHECK (expr) [NO INHERIT]` -> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. -> -> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. -> -> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. + Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. + + A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. + + Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. `DEFAULT default_expr` -> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. + Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. `SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` -> To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. -> -> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: + To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. + + Use the `OPTIONS` clause to specify the following `options` and their corresponding values: | option | value | | ---------- | --------------------------------------------------------------------------------- | @@ -273,7 +273,7 @@ Include the `SERVER` clause to specify the name of the database stored on the Mo For more information about using the `CREATE FOREIGN TABLE` command, see: -> + !!! Note MongoDB foreign data wrapper supports the write capability feature. @@ -306,29 +306,29 @@ DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. `name` -> Specify the name of the installed extension. It is optional. -> -> `CASCADE` -> -> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. -> -> `RESTRICT` -> -> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + Specify the name of the installed extension. It is optional. + + `CASCADE` + + Automatically drop objects that depend on the extension. It drops all the other dependent objects too. + + `RESTRICT` + + Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. **Example** The following command removes the extension from the existing database: -> `DROP EXTENSION mongo_fdw;` + `DROP EXTENSION mongo_fdw;` For more information about using the foreign data wrapper `DROP EXTENSION` command, see: -> . + . ## DROP SERVER @@ -344,29 +344,29 @@ The role that drops the server is the owner of the server; use the `ALTER SERVER `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. `name` -> Specify the name of the installed server. It is optional. -> -> `CASCADE` -> -> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. -> -> `RESTRICT` -> -> Do not allow to drop the server if any objects are dependent on it. + Specify the name of the installed server. It is optional. + + `CASCADE` + + Automatically drop objects that depend on the server. It should drop all the other dependent objects too. + + `RESTRICT` + + Do not allow to drop the server if any objects are dependent on it. **Example** The following command removes a foreign server named `mongo_server`: -> `DROP SERVER mongo_server;` + `DROP SERVER mongo_server;` For more information about using the `DROP SERVER` command, see: -> + ## DROP USER MAPPING @@ -380,25 +380,25 @@ DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. `user_name` -> Specify the user name of the mapping. + Specify the user name of the mapping. `server_name` -> Specify the name of the server that defines a connection to the MongoDB cluster. + Specify the name of the server that defines a connection to the MongoDB cluster. **Example** The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: -> `DROP USER MAPPING FOR enterprisedb SERVER mongo_server;` + `DROP USER MAPPING FOR enterprisedb SERVER mongo_server;` For detailed information about the `DROP USER MAPPING` command, see: -> + ## DROP FOREIGN TABLE @@ -412,19 +412,19 @@ DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] `IF EXISTS` -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. `name` -> Specify the name of the foreign table. + Specify the name of the foreign table. `CASCADE` -> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. `RESTRICT` -> Do not allow to drop foreign table if any objects are dependent on it. + Do not allow to drop foreign table if any objects are dependent on it. **Example** @@ -434,4 +434,4 @@ DROP FOREIGN TABLE warehouse; For more information about using the `DROP FOREIGN TABLE` command, see: -> + diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx index 8313284a962..d0d4f05040a 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx @@ -4,7 +4,7 @@ title: "Uninstalling the MongoDB Foreign Data Wrapper" -**Uninstalling an RPM Package** +## Uninstalling an RPM Package You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: @@ -18,7 +18,7 @@ You can use the `yum remove` or `dnf remove` command to remove a package install Where `xx` is the server version number. -**Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** +## Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host - To uninstall MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. From 5e40f0f16527251cbc4b1a88a43977fc56338aa9 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 15:51:51 +0530 Subject: [PATCH 42/50] Delete 01_what's_new.mdx Former-commit-id: 8aeb05cb9984703c83245b33fd9861bf6c9b4cbc --- .../docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx deleted file mode 100644 index 17bebc9e2f4..00000000000 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/01_what's_new.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "What’s New" ---- - - - -The following features are added to create Hadoop Foreign Data Wrapper `2.0.8`: - -- Support for Hadoop version 3.2.x -- Support for Hive version 3.1.x -- Support for Spark version 3.0.x. From 38fd866f355142b8bd1c67d9a4e1dec591ea8c99 Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Fri, 25 Jun 2021 16:14:08 +0530 Subject: [PATCH 43/50] fixed_comments Former-commit-id: 99f6d2f2854958921ed48a80f88c10dcc94b99d8 --- .../1.0/01_installing_and_configuring_the_pgpool-II.mdx | 6 +++--- product_docs/docs/pgpool/1.0/02_extensions.mdx | 6 +++--- product_docs/docs/pgpool/1.0/index.mdx | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx index 0afec0f086b..cef997a5ce2 100644 --- a/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx +++ b/product_docs/docs/pgpool/1.0/01_installing_and_configuring_the_pgpool-II.mdx @@ -41,7 +41,7 @@ Before installing the repository configuration, you must have credentials that a Perform the following steps to install Pgpool-II on a CentOS host: -1. To install the repository configuration, assume superuser privileges and invoke the platform-specific command: +1. To install the repository configuration, assume superuser privileges, and invoke the platform-specific command: On CentOS 7: @@ -131,7 +131,7 @@ Before creating the repository configuration file, you must have credentials tha Perform the following steps to install Pgpool-II: -1. To create the repository configuration file, assume superuser privileges and invoke the platform-specific command: +1. To create the repository configuration file, assume superuser privileges, and invoke the platform-specific command: On RHEL 7: @@ -285,7 +285,7 @@ Perform the following steps to install Pgpool-II on an RHEL/CentOS 7 PPC64LE Hos To install Pgpool-II on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/user). -Perform the following steps to to install a Debian package using the EDB apt repository. +Perform the following steps to install a Debian package using the EDB apt repository. 1. Assume superuser privileges: diff --git a/product_docs/docs/pgpool/1.0/02_extensions.mdx b/product_docs/docs/pgpool/1.0/02_extensions.mdx index a704e1f4422..fd26720b824 100644 --- a/product_docs/docs/pgpool/1.0/02_extensions.mdx +++ b/product_docs/docs/pgpool/1.0/02_extensions.mdx @@ -23,7 +23,7 @@ The following section walks you through the steps of installing Pgpool-II extens Assume superuser privileges and perform the following steps to install Pgpool-II extensions on a CentOS host: -1. To install the repository configuration, assume superuser privileges and invoke the platform-specific command: +1. To install the repository configuration, assume superuser privileges, and invoke the platform-specific command: On CentOS 7: @@ -93,7 +93,7 @@ Before installing the repository configuration, you must have credentials that a Perform the following steps to install Pgpool-II extensions on an RHEL host: -1. To install the repository configuration, assume superuser privileges and invoke the platform-specific command: +1. To install the repository configuration, assume superuser privileges, and invoke the platform-specific command: On RHEL 7: @@ -191,7 +191,7 @@ Perform the following steps to install Pgpool-II extensions on an RHEL/CentOS 7 # End of the configuration file ``` -2. To install the repository configuration, assume superuser privileges and invoke the following command: +2. To install the repository configuration, assume superuser privileges, and invoke the following command: ```text yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm diff --git a/product_docs/docs/pgpool/1.0/index.mdx b/product_docs/docs/pgpool/1.0/index.mdx index 6213d274955..3c5fad6588b 100644 --- a/product_docs/docs/pgpool/1.0/index.mdx +++ b/product_docs/docs/pgpool/1.0/index.mdx @@ -11,7 +11,7 @@ legacyRedirectsGenerated: - "/edb-docs/p/pgpool-ii/1.0" --- -Pgpool-II acts as a middleware between client applications and a PostgreSQL database server. +Pgpool-II acts as middleware between client applications and a PostgreSQL database server. Using Pgpool-II adds the following benefits to your application connection infrastructure: From 2c9a821a58ce3b221011cb76c4c56319d4f08599 Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Fri, 25 Jun 2021 11:03:59 +0000 Subject: [PATCH 44/50] New PDFs generated by Github Actions Former-commit-id: baee2e1c62124655afeb8fae8ce7a9f2ddf0fc72 From 9a93e2ba8324b9bf6dd32b07a11b0e95ddaa09b4 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 17:06:47 +0530 Subject: [PATCH 45/50] Adding What's New section Former-commit-id: 1535e5c311a8c4c46e8b0758c4eb7d6b15c1b01a --- product_docs/docs/mongo_data_adapter/5.2.9/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx index ab14a10bd26..640c3591f3f 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx @@ -10,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion +whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion
From e0d476cf5eaf09d9fac9156780ebe89a8c8a5f5c Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra <53602601+abhilasha-narendra@users.noreply.github.com> Date: Fri, 25 Jun 2021 18:03:30 +0530 Subject: [PATCH 46/50] Delete deploy-hadoop-da-test-branch.yml Former-commit-id: 273b479e3deb03ab0b485c2d11ded0b4b42fe1bf --- .../deploy-hadoop-da-test-branch.yml | 60 ------------------- 1 file changed, 60 deletions(-) delete mode 100644 .github/workflows/deploy-hadoop-da-test-branch.yml diff --git a/.github/workflows/deploy-hadoop-da-test-branch.yml b/.github/workflows/deploy-hadoop-da-test-branch.yml deleted file mode 100644 index fe9f390520e..00000000000 --- a/.github/workflows/deploy-hadoop-da-test-branch.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: Deploy Hadoop Data Adapter upcoming release branch to Netlify -on: - push: - branches: - - content/hadoop_data_adapter/2.0.8/upcoming_release -jobs: - build-deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - ref: content/hadoop_data_adapter/2.0.8/upcoming_release - fetch-depth: 0 # fetch whole repo so git-restore-mtime can work - - name: Update submodules - run: git submodule update --init --remote - - name: Adjust file watchers limit - run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p - - - uses: actions/setup-node@v1 - with: - node-version: '14.x' - - name: Install yarn - run: sudo npm -g install yarn - - name: Yarn install - run: yarn install --immutable - env: - NODE_ENV: ${{ secrets.NODE_ENV }} - - - name: Checking Gatsby cache - id: gatsby-cache-build - uses: actions/cache@v2 - with: - path: | - public - .cache - key: ${{ runner.os }}-gatsby-build-develop-${{ github.run_id }} - restore-keys: | - ${{ runner.os }}-gatsby-build-develop- - - - name: Fix mtimes - run: yarn fix-mtimes --force - - name: Gatsby build - run: yarn build - env: - APP_ENV: staging - NODE_ENV: ${{ secrets.NODE_ENV }} - NODE_OPTIONS: --max-old-space-size=4096 - ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} - ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} - ALGOLIA_INDEX_NAME: edb-docs-staging - INDEX_ON_BUILD: false - - - name: Netlify deploy - run: | - sudo yarn global add netlify-cli - netlify deploy --dir=public --prod - env: - NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} - NETLIFY_SITE_ID: ${{ secrets.NETLIFY_WIP1_SITE_ID }} - From 4df411f9b80b93c2dfe63c34bd4711832ab5ef43 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra <53602601+abhilasha-narendra@users.noreply.github.com> Date: Fri, 25 Jun 2021 18:04:22 +0530 Subject: [PATCH 47/50] Delete deploy-mongo-da-test-branch.yml Former-commit-id: 415940749c354c8bf541c74fa777e8de5eede172 --- .../workflows/deploy-mongo-da-test-branch.yml | 59 ------------------- 1 file changed, 59 deletions(-) delete mode 100644 .github/workflows/deploy-mongo-da-test-branch.yml diff --git a/.github/workflows/deploy-mongo-da-test-branch.yml b/.github/workflows/deploy-mongo-da-test-branch.yml deleted file mode 100644 index f4581292586..00000000000 --- a/.github/workflows/deploy-mongo-da-test-branch.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Deploy MongoDB FDW upcoming release branch to Netlify -on: - push: - branches: - - content/mongo_data_adapter/5.2.9/upcoming_release -jobs: - build-deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - ref: content/mongo_data_adapter/5.2.9/upcoming_release - fetch-depth: 0 # fetch whole repo so git-restore-mtime can work - - name: Update submodules - run: git submodule update --init --remote - - name: Adjust file watchers limit - run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p - - - uses: actions/setup-node@v1 - with: - node-version: '14.x' - - name: Install yarn - run: sudo npm -g install yarn - - name: Yarn install - run: yarn install --immutable - env: - NODE_ENV: ${{ secrets.NODE_ENV }} - - - name: Checking Gatsby cache - id: gatsby-cache-build - uses: actions/cache@v2 - with: - path: | - public - .cache - key: ${{ runner.os }}-gatsby-build-develop-${{ github.run_id }} - restore-keys: | - ${{ runner.os }}-gatsby-build-develop- - - - name: Fix mtimes - run: yarn fix-mtimes --force - - name: Gatsby build - run: yarn build - env: - APP_ENV: staging - NODE_ENV: ${{ secrets.NODE_ENV }} - NODE_OPTIONS: --max-old-space-size=4096 - ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} - ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} - ALGOLIA_INDEX_NAME: edb-docs-staging - INDEX_ON_BUILD: false - - - name: Netlify deploy - run: | - sudo yarn global add netlify-cli - netlify deploy --dir=public --prod - env: - NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} - NETLIFY_SITE_ID: ${{ secrets.NETLIFY_WIP2_SITE_ID }} From cb6badd9751b1937e5b156908b10ec79861bc923 Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 18:12:57 +0530 Subject: [PATCH 48/50] Create 01_whats_new.mdx Former-commit-id: 87659093124cfc7bebfe5963dc03ba222a20e4f0 --- .../docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx new file mode 100644 index 00000000000..30138f9d121 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/01_whats_new.mdx @@ -0,0 +1,11 @@ +--- +title: "What’s New" +--- + + + +The following features are added to create Hadoop Foreign Data Wrapper `2.0.8`: + +- Support for Hadoop version 3.2.x +- Support for Hive version 3.1.x +- Support for Spark version 3.0.x From 37c186730e67165c9d791e17a2dd1926bc653cbe Mon Sep 17 00:00:00 2001 From: Abhilasha Narendra Date: Fri, 25 Jun 2021 18:15:39 +0530 Subject: [PATCH 49/50] Update 02_requirements_overview.mdx Former-commit-id: e021bcfb2a7e4d1eb3bbc4b86b707e6a29da3939 --- .../2.0.8/02_requirements_overview.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx index afaf5d7b204..13360808b72 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.8/02_requirements_overview.mdx @@ -12,14 +12,14 @@ The Hadoop Foreign Data Wrapper is supported on the following platforms: **Linux x86-64** -> - RHEL 8.x and 7.x -> - CentOS 8.x and 7.x -> - OL 8.x and 7.x -> - Ubuntu 20.04 and 18.04 LTS -> - Debian 10.x and 9.x + - RHEL 8.x and 7.x + - CentOS 8.x and 7.x + - OL 8.x and 7.x + - Ubuntu 20.04 and 18.04 LTS + - Debian 10.x and 9.x **Linux on IBM Power8/9 (LE)** -> - RHEL 7.x + - RHEL 7.x The Hadoop Foreign Data Wrapper supports use of the Hadoop file system using a HiveServer2 interface or Apache Spark using the Spark Thrift Server. From 13ba36a50ffe91ec4b1e9915dc2f8e0406b63ca4 Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Fri, 25 Jun 2021 13:46:02 +0000 Subject: [PATCH 50/50] New PDFs generated by Github Actions Former-commit-id: 6f4af2dcf4b103f7a3db2ffd8baf5271b9cbe1df