1
0
mirror of synced 2025-12-19 18:05:44 -05:00

All in with PG

This commit is contained in:
Petri Autero
2019-02-14 22:03:51 +02:00
parent c4c4710b52
commit e81d57b9da
26 changed files with 1209 additions and 47 deletions

View File

@@ -0,0 +1,18 @@
# PostgreSQL Cloud SQL Private IP Example
This folder contains an example of how to use the [MySQL module](/modules/mysql) to create a [Google Cloud SQL](https://cloud.google.com/sql/)
[MySQL](https://cloud.google.com/sql/docs/mysql/) database instance with a private IP.
## How do you run this example?
To run this example, you need to:
1. Install [Terraform](https://www.terraform.io/).
1. Open up `vars.tf` and set secrets at the top of the file as environment variables and fill in any other variables in
the file that don't have defaults.
1. `terraform init`.
1. `terraform plan`.
1. If the plan looks good, run `terraform apply`.
When the templates are applied, Terraform will output the IP address of the instance
and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy).

View File

@@ -0,0 +1,99 @@
# ------------------------------------------------------------------------------
# LAUNCH A MYSQL CLOUD SQL PRIVATE IP INSTANCE
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# CONFIGURE OUR GCP CONNECTION
# ------------------------------------------------------------------------------
provider "google-beta" {
region = "${var.region}"
project = "${var.project}"
}
# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via
# https://github.com/terraform-providers/terraform-provider-google
terraform {
required_version = ">= 0.10.3"
}
# ------------------------------------------------------------------------------
# CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES
# ------------------------------------------------------------------------------
resource "random_id" "name" {
byte_length = 2
}
locals {
# If name_override is specified, use that - otherwise use the name_prefix with a random string
instance_name = "${length(var.name_override) == 0 ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override}"
private_network_name = "private-network-${random_id.name.hex}"
private_ip_name = "private-ip-${random_id.name.hex}"
}
# ------------------------------------------------------------------------------
# CREATE COMPUTE NETWORKS
# ------------------------------------------------------------------------------
# Simple network, auto-creates subnetworks
resource "google_compute_network" "private_network" {
provider = "google-beta"
name = "${local.private_network_name}"
}
# Reserve global internal address range for the peering
resource "google_compute_global_address" "private_ip_address" {
provider = "google-beta"
name = "${local.private_ip_name}"
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
network = "${google_compute_network.private_network.self_link}"
}
# Establish VPC network peering connection using the reserved address range
resource "google_service_networking_connection" "private_vpc_connection" {
provider = "google-beta"
network = "${google_compute_network.private_network.self_link}"
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = ["${google_compute_global_address.private_ip_address.name}"]
}
# ------------------------------------------------------------------------------
# CREATE DATABASE INSTANCE WITH PRIVATE IP
# ------------------------------------------------------------------------------
module "mysql" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::git@github.com:gruntwork-io/terraform-google-sql.git//modules/mysql?ref=v0.1.0"
source = "../../modules/mysql"
project = "${var.project}"
region = "${var.region}"
name = "${local.instance_name}"
db_name = "${var.db_name}"
engine = "${var.postgres_version}"
machine_type = "${var.machine_type}"
# These together will construct the master_user privileges, i.e.
# 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'.
# These should typically be set as the environment variable TF_VAR_master_user_password, etc.
# so you don't check these into source control."
master_user_password = "${var.master_user_password}"
master_user_name = "${var.master_user_name}"
master_user_host = "%"
# Pass the private network link to the module
private_network = "${google_compute_network.private_network.self_link}"
# Wait for the vpc connection to complete
wait_for = "${google_service_networking_connection.private_vpc_connection.network}"
custom_labels = {
test-id = "postgres-private-ip-example"
}
}

View File

@@ -0,0 +1,34 @@
output "master_instance_name" {
description = "The name of the database instance"
value = "${module.mysql.master_instance_name}"
}
output "master_ip_addresses" {
description = "All IP addresses of the instance as list of maps, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address"
value = "${module.mysql.master_ip_addresses}"
}
output "master_private_ip" {
description = "The first IPv4 address of the addresses assigned to the instance. As this instance has only private IP, it is the private IP address."
value = "${module.mysql.master_first_ip_address}"
}
output "master_instance" {
description = "Self link to the master instance"
value = "${module.mysql.master_instance}"
}
output "master_proxy_connection" {
description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy"
value = "${module.mysql.master_proxy_connection}"
}
output "db_name" {
description = "Name of the default database"
value = "${module.mysql.db_name}"
}
output "db" {
description = "Self link to the default database"
value = "${module.mysql.db}"
}

View File

@@ -0,0 +1,49 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# These variables are expected to be passed in by the operator
# ---------------------------------------------------------------------------------------------------------------------
variable "project" {
description = "The project ID to host the database in."
}
variable "region" {
description = "The region to host the database in."
}
# Note, after a name db instance is used, it cannot be reused for up to one week.
variable "name_prefix" {
description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter."
}
variable "master_user_name" {
description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control."
}
variable "master_user_password" {
description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control."
}
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# Generally, these values won't need to be changed.
# ---------------------------------------------------------------------------------------------------------------------
variable "postgres_version" {
description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/features for supported versions."
default = "POSTGRES_9_6"
}
variable "machine_type" {
description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details"
default = "db-f1-micro"
}
variable "db_name" {
description = "Name for the db"
default = "default"
}
variable "name_override" {
description = "You may optionally override the name_prefix + random string by specifying an override"
default = ""
}

View File

@@ -0,0 +1,17 @@
# PostgreSQL Cloud SQL Public IP Example
This folder contains an example of how to use the [MySQL module](/modules/mysql) to create a [Google Cloud SQL](https://cloud.google.com/sql/)
[MySQL](https://cloud.google.com/sql/docs/mysql/) database instance with a public IP.
## How do you run this example?
To run this example, you need to:
1. Install [Terraform](https://www.terraform.io/).
1. Open up `vars.tf` and set secrets at the top of the file as environment variables and fill in any other variables in
the file that don't have defaults.
1. `terraform init`.
1. `terraform plan`.
1. If the plan looks good, run `terraform apply`.
When the templates are applied, Terraform will output the IP address of the instance and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy).

View File

@@ -0,0 +1,88 @@
# ------------------------------------------------------------------------------
# LAUNCH A POSTGRESQL CLOUD SQL PUBLIC IP INSTANCE
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# CONFIGURE OUR GCP CONNECTION
# ------------------------------------------------------------------------------
provider "google-beta" {
region = "${var.region}"
project = "${var.project}"
}
# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via
# https://github.com/terraform-providers/terraform-provider-google
terraform {
required_version = ">= 0.10.3"
}
# ------------------------------------------------------------------------------
# CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES
# ------------------------------------------------------------------------------
resource "random_id" "name" {
byte_length = 2
}
locals {
# If name_override is specified, use that - otherwise use the name_prefix with a random string
instance_name = "${length(var.name_override) == 0 ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override}"
}
# ------------------------------------------------------------------------------
# CREATE DATABASE INSTANCE WITH PUBLIC IP
# ------------------------------------------------------------------------------
module "mysql" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::git@github.com:gruntwork-io/terraform-google-sql.git//modules/mysql?ref=v0.1.0"
source = "../../modules/mysql"
project = "${var.project}"
region = "${var.region}"
name = "${local.instance_name}"
db_name = "${var.db_name}"
engine = "${var.postgres_version}"
machine_type = "${var.machine_type}"
# These together will construct the master_user privileges, i.e.
# 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'.
# These should typically be set as the environment variable TF_VAR_master_user_password, etc.
# so you don't check these into source control."
master_user_password = "${var.master_user_password}"
master_user_name = "${var.master_user_name}"
master_user_host = "%"
# To make it easier to test this example, we are giving the servers public IP addresses and allowing inbound
# connections from anywhere. In real-world usage, your servers should live in private subnets, only have private IP
# addresses, and only allow access from specific trusted networks, servers or applications in your VPC.
enable_public_internet_access = true
# Default setting for this is 'false' in 'variables.tf'
# In the test cases, we're setting this to true, to test forced SSL.
require_ssl = "${var.require_ssl}"
authorized_networks = [
{
name = "allow-all-inbound"
value = "0.0.0.0/0"
},
]
# Set auto-increment flags to test the
# feature during automated testing
database_flags = [
{
name = "autovacuum_naptime"
value = "2"
},
]
custom_labels = {
test-id = "postgres-public-ip-example"
}
}

View File

@@ -0,0 +1,39 @@
output "master_instance_name" {
description = "The name of the database instance"
value = "${module.mysql.master_instance_name}"
}
output "master_ip_addresses" {
description = "All IP addresses of the instance as list of maps, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address"
value = "${module.mysql.master_ip_addresses}"
}
output "master_public_ip" {
description = "The first IPv4 address of the addresses assigned to the instance. As this instance has only public IP, it is the public IP address."
value = "${module.mysql.master_first_ip_address}"
}
output "master_ca_cert" {
value = "${module.mysql.master_ca_cert}"
description = "The CA Certificate used to connect to the SQL Instance via SSL"
}
output "master_instance" {
description = "Self link to the master instance"
value = "${module.mysql.master_instance}"
}
output "master_proxy_connection" {
description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy"
value = "${module.mysql.master_proxy_connection}"
}
output "db_name" {
description = "Name of the default database"
value = "${module.mysql.db_name}"
}
output "db" {
description = "Self link to the default database"
value = "${module.mysql.db}"
}

View File

@@ -0,0 +1,56 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# These variables are expected to be passed in by the operator
# ---------------------------------------------------------------------------------------------------------------------
variable "project" {
description = "The project ID to host the database in."
}
variable "region" {
description = "The region to host the database in."
}
# Note, after a name db instance is used, it cannot be reused for up to one week.
variable "name_prefix" {
description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter."
}
variable "master_user_name" {
description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control."
}
variable "master_user_password" {
description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control."
}
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# Generally, these values won't need to be changed.
# ---------------------------------------------------------------------------------------------------------------------
variable "postgres_version" {
description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/features for supported versions."
default = "POSTGRES_9_6"
}
variable "machine_type" {
description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details"
default = "db-f1-micro"
}
variable "db_name" {
description = "Name for the db"
default = "default"
}
variable "name_override" {
description = "You may optionally override the name_prefix + random string by specifying an override"
default = ""
}
# When configuring a public IP instance, you should only allow secure connections
# For testing purposes, we're initially allowing unsecured connections.
variable "require_ssl" {
description = "True if the instance should require SSL/TLS for users connecting over IP. Note: SSL/TLS is needed to provide security when you connect to Cloud SQL using IP addresses. If you are connecting to your instance only by using the Cloud SQL Proxy or the Java Socket Library, you do not need to configure your instance to use SSL/TLS."
default = false
}

View File

@@ -0,0 +1,18 @@
# MySQL Cloud SQL Replica Example
This folder contains an example of how to use the [MySQL module](/modules/mysql) to create a [Google Cloud SQL](https://cloud.google.com/sql/)
[MySQL](https://cloud.google.com/sql/docs/mysql/) database cluster with a public IP and failover and read replicas.
## How do you run this example?
To run this example, you need to:
1. Install [Terraform](https://www.terraform.io/).
1. Open up `vars.tf` and set secrets at the top of the file as environment variables and fill in any other variables in
the file that don't have defaults.
1. `terraform init`.
1. `terraform plan`.
1. If the plan looks good, run `terraform apply`.
When the templates are applied, Terraform will output the IP address of the instance
and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy).

View File

@@ -0,0 +1,86 @@
# ------------------------------------------------------------------------------
# LAUNCH A POSTGRES CLUSTER WITH HA AND READ REPLICAS
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# CONFIGURE OUR GCP CONNECTION
# ------------------------------------------------------------------------------
provider "google-beta" {
region = "${var.region}"
project = "${var.project}"
}
# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via
# https://github.com/terraform-providers/terraform-provider-google
terraform {
required_version = ">= 0.10.3"
}
# ------------------------------------------------------------------------------
# CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES
# ------------------------------------------------------------------------------
resource "random_id" "name" {
byte_length = 2
}
locals {
# If name_override is specified, use that - otherwise use the name_prefix with a random string
instance_name = "${length(var.name_override) == 0 ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override}"
private_network_name = "private-network-${random_id.name.hex}"
private_ip_name = "private-ip-${random_id.name.hex}"
}
# ------------------------------------------------------------------------------
# CREATE DATABASE CLUSTER WITH PUBLIC IP
# ------------------------------------------------------------------------------
module "mysql" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::git@github.com:gruntwork-io/terraform-google-sql.git//modules/mysql?ref=v0.1.0"
source = "../../modules/mysql"
project = "${var.project}"
region = "${var.region}"
name = "${local.instance_name}"
db_name = "${var.db_name}"
engine = "${var.postgres_version}"
machine_type = "${var.machine_type}"
master_zone = "${var.master_zone}"
# To make it easier to test this example, we are giving the servers public IP addresses and allowing inbound
# connections from anywhere. In real-world usage, your servers should live in private subnets, only have private IP
# addresses, and only allow access from specific trusted networks, servers or applications in your VPC.
enable_public_internet_access = true
authorized_networks = [
{
name = "allow-all-inbound"
value = "0.0.0.0/0"
},
]
# Indicate that we want to create a failover replica
enable_failover_replica = true
# Indicate we want read replicas to be created
num_read_replicas = "${var.num_read_replicas}"
read_replica_zones = ["${var.read_replica_zones}"]
# These together will construct the master_user privileges, i.e.
# 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'.
# These should typically be set as the environment variable TF_VAR_master_user_password, etc.
# so you don't check these into source control."
master_user_password = "${var.master_user_password}"
master_user_name = "${var.master_user_name}"
master_user_host = "%"
custom_labels = {
test-id = "postgres-replicas-example"
}
}

View File

@@ -0,0 +1,74 @@
# ------------------------------------------------------------------------------
# MASTER OUTPUTS
# ------------------------------------------------------------------------------
output "master_instance_name" {
description = "The name of the database instance"
value = "${module.mysql.master_instance_name}"
}
output "master_ip_addresses" {
description = "All IP addresses of the instance as list of maps, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address"
value = "${module.mysql.master_ip_addresses}"
}
output "master_public_ip" {
description = "The first IPv4 address of the addresses assigned to the master instance. As this instance has only public IP, it is the public IP address."
value = "${module.mysql.master_first_ip_address}"
}
output "master_instance" {
description = "Self link to the master instance"
value = "${module.mysql.master_instance}"
}
output "master_proxy_connection" {
description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy"
value = "${module.mysql.master_proxy_connection}"
}
# ------------------------------------------------------------------------------
# DB OUTPUTS
# ------------------------------------------------------------------------------
output "db_name" {
description = "Name of the default database"
value = "${module.mysql.db_name}"
}
output "db" {
description = "Self link to the default database"
value = "${module.mysql.db}"
}
# ------------------------------------------------------------------------------
# READ REPLICA OUTPUTS
# ------------------------------------------------------------------------------
output "read_replica_instance_names" {
description = "List of names for the read replica instances"
value = ["${module.mysql.read_replica_instance_names}"]
}
output "read_replica_public_ips" {
description = "List of first IPv4 addresses of the addresses assigned to the read replica instances. As the instances have only public IP in the example, the are the public IP addresses."
value = ["${module.mysql.read_replica_first_ip_addresses}"]
}
output "read_replica_instances" {
description = "List of self links to the read replica instances"
value = ["${module.mysql.read_replica_instances}"]
}
output "read_replica_proxy_connections" {
description = "List of read replica instance paths for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy"
value = ["${module.mysql.read_replica_proxy_connections}"]
}
# Although we don't use the values, this output highlights the JSON encoded output we use in certain
# cases where the resource output cannot properly be computed.
# See https://github.com/hashicorp/terraform/issues/17048
output "read_replica_server_ca_certs" {
description = "JSON encoded list of CA Certificates used to connect to the read replica instances via SSL"
value = "${module.mysql.read_replica_server_ca_certs}"
}

View File

@@ -0,0 +1,69 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# These variables are expected to be passed in by the operator
# ---------------------------------------------------------------------------------------------------------------------
variable "project" {
description = "The project ID to host the database in."
}
variable "region" {
description = "The region to host the database in (e.g. 'us-central1')."
}
variable "master_zone" {
description = "The preferred zone for the master instance (e.g. 'us-central1-a'). Must be different than 'failover_replica_zone'."
}
variable "failover_replica_zone" {
description = "The preferred zone for the failover instance (e.g. 'us-central1-b'). Must be different than 'master_zone'."
}
variable "num_read_replicas" {
description = "The number of read replicas to create. Cloud SQL will replicate all data from the master to these replicas, which you can use to horizontally scale read traffic."
}
variable "read_replica_zones" {
description = "A list of compute zones where read replicas should be created. List size should match 'num_read_replicas'"
type = "list"
# Example:
# default = ["us-central1-b", "us-central1-c"]
}
# Note, after a name db instance is used, it cannot be reused for up to one week.
variable "name_prefix" {
description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter."
}
variable "master_user_name" {
description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control."
}
variable "master_user_password" {
description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control."
}
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# Generally, these values won't need to be changed.
# ---------------------------------------------------------------------------------------------------------------------
variable "postgres_version" {
description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/features for supported versions."
default = "POSTGRES_9_6"
}
variable "machine_type" {
description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details"
default = "db-f1-micro"
}
variable "db_name" {
description = "Name for the db"
default = "default"
}
variable "name_override" {
description = "You may optionally override the name_prefix + random string by specifying an override"
default = ""
}

View File

@@ -23,7 +23,7 @@ locals {
# ------------------------------------------------------------------------------
data "template_file" "failover_proxy_connection" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
template = "${var.project}:${var.region}:${google_sql_database_instance.failover_replica.0.name}"
}
@@ -36,27 +36,27 @@ data "template_file" "failover_proxy_connection" {
# ------------------------------------------------------------------------------
data "template_file" "failover_certificate" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
template = "${google_sql_database_instance.failover_replica.0.server_ca_cert.0.cert}"
}
data "template_file" "failover_certificate_common_name" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
template = "${google_sql_database_instance.failover_replica.0.server_ca_cert.0.common_name}"
}
data "template_file" "failover_certificate_create_time" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
template = "${google_sql_database_instance.failover_replica.0.server_ca_cert.0.create_time}"
}
data "template_file" "failover_certificate_expiration_time" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
template = "${google_sql_database_instance.failover_replica.0.server_ca_cert.0.expiration_time}"
}
data "template_file" "failover_certificate_sha1_fingerprint" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
template = "${google_sql_database_instance.failover_replica.0.server_ca_cert.0.sha1_fingerprint}"
}

View File

@@ -12,9 +12,15 @@
# ------------------------------------------------------------------------------
locals {
# Determine the engine type
is_postgres = "${replace(var.engine, "POSTGRES", "") != var.engine}"
is_mysql = "${replace(var.engine, "MYSQL", "") != var.engine}"
# Calculate actuals, so we get expected behavior for each engine
actual_binary_log = "${local.is_postgres ? false : var.mysql_binary_log_enabled}"
actual_availability_type = "${local.is_postgres && var.enable_failover_replica ? "REGIONAL" : "ZONAL"}"
actual_failover_replica_count = "${local.is_postgres ? 0 : var.enable_failover_replica ? 1 : 0}"
# Terraform does not allow using lists of maps with coditionals, so we have to
# trick terraform by creating a string conditional first.
# See https://github.com/hashicorp/terraform/issues/12453
@@ -71,7 +77,7 @@ resource "google_sql_database_instance" "master" {
}
backup_configuration {
binary_log_enabled = "${var.binary_log_enabled}"
binary_log_enabled = "${local.actual_binary_log}"
enabled = "${var.backup_enabled}"
start_time = "${var.backup_start_time}"
}
@@ -85,7 +91,7 @@ resource "google_sql_database_instance" "master" {
disk_size = "${var.disk_size}"
disk_type = "${var.disk_type}"
database_flags = ["${var.database_flags}"]
availability_type = "${var.availability_type}"
availability_type = "${local.actual_availability_type}"
user_labels = "${var.custom_labels}"
}
@@ -138,7 +144,7 @@ resource "null_resource" "wait_for" {
# ------------------------------------------------------------------------------
resource "google_sql_database_instance" "failover_replica" {
count = "${var.enable_failover_replica}"
count = "${local.actual_failover_replica_count}"
depends_on = [
"google_sql_database_instance.master",

View File

@@ -71,7 +71,7 @@ output "db_name" {
}
# ------------------------------------------------------------------------------
# FAILOVER REPLICA OUTPUTS
# FAILOVER REPLICA OUTPUTS - ONLY APPLICABLE TO MYSQL
# ------------------------------------------------------------------------------
output "failover_instance_name" {
@@ -101,7 +101,7 @@ output "failover_proxy_connection" {
}
# ------------------------------------------------------------------------------
# FAILOVER CERT OUTPUTS
# FAILOVER CERT OUTPUTS - ONLY APPLICABLE TO MYSQL
# ------------------------------------------------------------------------------
output "failover_replica_ca_cert" {

View File

@@ -66,11 +66,6 @@ variable "authorized_gae_applications" {
default = []
}
variable "availability_type" {
description = "This specifies whether a PostgreSQL instance should be set up for high availability (REGIONAL) or single zone (ZONAL)."
default = "ZONAL"
}
variable "backup_enabled" {
description = "Set to false if you want to disable backup."
default = true
@@ -81,8 +76,8 @@ variable "backup_start_time" {
default = "04:00"
}
variable "binary_log_enabled" {
description = "Set to false if you want to disable binary logs. Note, when using failover or read replicas, master and existing backups need to have binary_log_enabled=true set."
variable "mysql_binary_log_enabled" {
description = "Set to false if you want to disable binary logs - only applicable to MySQL. Note, when using failover or read replicas, master and existing backups need to have binary_log_enabled=true set."
default = true
}
@@ -172,7 +167,7 @@ variable "enable_failover_replica" {
}
variable "failover_replica_zone" {
description = "The preferred zone for the failover instance (e.g. 'us-central1-b'). Must be different than 'master_zone'."
description = "The preferred zone for the failover instance (e.g. 'us-central1-b'). Must be different than 'master_zone'. Only applicable to MySQL, Postgres will determine this automatically."
default = ""
}

14
test/Gopkg.lock generated
View File

@@ -24,6 +24,7 @@
"logging",
"proxy/certs",
"proxy/dialers/mysql",
"proxy/dialers/postgres",
"proxy/proxy",
"proxy/util",
]
@@ -182,6 +183,17 @@
pruneopts = ""
revision = "c2b33e84"
[[projects]]
digest = "1:29145d7af4adafd72a79df5e41456ac9e232d5a28c1cd4dacf3ff008a217fc10"
name = "github.com/lib/pq"
packages = [
".",
"oid",
]
pruneopts = ""
revision = "4ded0e9383f75c197b3a2aaa6d590ac52df6fd79"
version = "v1.0.0"
[[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
name = "github.com/pmezard/go-difflib"
@@ -413,11 +425,13 @@
analyzer-version = 1
input-imports = [
"github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql",
"github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/postgres",
"github.com/go-sql-driver/mysql",
"github.com/gruntwork-io/terratest/modules/gcp",
"github.com/gruntwork-io/terratest/modules/logger",
"github.com/gruntwork-io/terratest/modules/terraform",
"github.com/gruntwork-io/terratest/modules/test-structure",
"github.com/lib/pq",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
]

View File

@@ -1,13 +0,0 @@
package test
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCloudSQLPostgres(t *testing.T) {
t.Parallel()
assert.Equal(t, "5432", "5432")
}

View File

@@ -42,7 +42,7 @@ func TestMySqlPrivateIP(t *testing.T) {
test_structure.RunTestStage(t, "deploy", func() {
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
terraformOptions := createTerratestOptionsForMySql(projectId, region, exampleDir, NAME_PREFIX_PRIVATE, "", "", 0, "")
terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_PRIVATE, "", "", 0, "")
test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions)
terraform.InitAndApply(t, terraformOptions)

View File

@@ -65,7 +65,7 @@ func TestMySqlPublicIP(t *testing.T) {
test_structure.RunTestStage(t, "deploy", func() {
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
terraformOptions := createTerratestOptionsForMySql(projectId, region, exampleDir, NAME_PREFIX_PUBLIC, "", "", 0, "")
terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_PUBLIC, "", "", 0, "")
test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
@@ -119,8 +119,8 @@ func TestMySqlPublicIP(t *testing.T) {
}
// Clean up
logger.Logf(t, "Empty table: %s", MYSQL_EMPTY_TEST_TABLE_STATEMENT)
if _, err = db.Exec(MYSQL_EMPTY_TEST_TABLE_STATEMENT); err != nil {
logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT)
if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil {
t.Fatalf("Failed to clean up table: %v", err)
}

View File

@@ -58,7 +58,7 @@ func TestMySqlReplicas(t *testing.T) {
masterZone := test_structure.LoadString(t, exampleDir, KEY_MASTER_ZONE)
failoverReplicaZone := test_structure.LoadString(t, exampleDir, KEY_FAILOVER_REPLICA_ZONE)
readReplicaZone := test_structure.LoadString(t, exampleDir, KEY_READ_REPLICA_ZONE)
terraformOptions := createTerratestOptionsForMySql(projectId, region, exampleDir, NAME_PREFIX_REPLICAS, masterZone, failoverReplicaZone, 1, readReplicaZone)
terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_REPLICAS, masterZone, failoverReplicaZone, 1, readReplicaZone)
test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
@@ -132,8 +132,8 @@ func TestMySqlReplicas(t *testing.T) {
}
// Clean up
logger.Logf(t, "Empty table: %s", MYSQL_EMPTY_TEST_TABLE_STATEMENT)
if _, err = db.Exec(MYSQL_EMPTY_TEST_TABLE_STATEMENT); err != nil {
logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT)
if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil {
t.Fatalf("Failed to clean up table: %v", err)
}
@@ -190,11 +190,11 @@ func TestMySqlReplicas(t *testing.T) {
logger.Logf(t, "Failed to insert data to read replica as expected: %v", err)
// Prepare statement for reading data
stmtOut, err := db.Prepare(MYSQL_QUERY_ROW_COUNT)
stmtOut, err := db.Prepare(SQL_QUERY_ROW_COUNT)
require.NoError(t, err, "Failed to prepare readonly count statement")
// Query data, results don't matter...
logger.Logf(t, "Query r/o data: %s", MYSQL_QUERY_ROW_COUNT)
logger.Logf(t, "Query r/o data: %s", SQL_QUERY_ROW_COUNT)
var numResults int

View File

@@ -0,0 +1,73 @@
package test
import (
"fmt"
"github.com/gruntwork-io/terratest/modules/gcp"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/gruntwork-io/terratest/modules/test-structure"
"github.com/stretchr/testify/assert"
"path/filepath"
"strings"
"testing"
)
const NAME_PREFIX_POSTGRES_PRIVATE = "postgres-private"
const EXAMPLE_NAME_POSTGRES_PRIVATE = "postgres-private-ip"
func TestPostgresPrivateIP(t *testing.T) {
t.Parallel()
//os.Setenv("SKIP_bootstrap", "true")
//os.Setenv("SKIP_deploy", "true")
//os.Setenv("SKIP_validate_outputs", "true")
//os.Setenv("SKIP_teardown", "true")
_examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples")
exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_POSTGRES_PRIVATE)
test_structure.RunTestStage(t, "bootstrap", func() {
projectId := gcp.GetGoogleProjectIDFromEnvVar(t)
region := getRandomRegion(t, projectId)
test_structure.SaveString(t, exampleDir, KEY_REGION, region)
test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId)
})
// At the end of the test, run `terraform destroy` to clean up any resources that were created
defer test_structure.RunTestStage(t, "teardown", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
terraform.Destroy(t, terraformOptions)
})
test_structure.RunTestStage(t, "deploy", func() {
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_POSTGRES_PRIVATE, "", "", 0, "")
test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
})
test_structure.RunTestStage(t, "validate_outputs", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME)
ipAddressesFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_IP_ADDRESSES)
privateIPFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PRIVATE_IP)
assert.Contains(t, ipAddressesFromOutput, "PRIVATE", "IP Addresses output has to contain 'PRIVATE'")
assert.Contains(t, ipAddressesFromOutput, privateIPFromOutput, "IP Addresses output has to contain 'private_ip' from output")
dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME)
proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION)
expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput)
assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_POSTGRES_PRIVATE))
assert.Equal(t, DB_NAME, dbNameFromOutput)
assert.Equal(t, expectedDBConn, proxyConnectionFromOutput)
})
}

View File

@@ -0,0 +1,249 @@
package test
import (
"database/sql"
"fmt"
_ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/postgres"
"github.com/gruntwork-io/terratest/modules/gcp"
"github.com/gruntwork-io/terratest/modules/logger"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/gruntwork-io/terratest/modules/test-structure"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"os"
"path/filepath"
"strings"
"testing"
)
const NAME_PREFIX_POSTGRES_PUBLIC = "postgres-public"
const EXAMPLE_NAME_POSTGRES_PUBLIC = "postgres-public-ip"
func TestPostgresPublicIP(t *testing.T) {
t.Parallel()
//os.Setenv("SKIP_bootstrap", "true")
//os.Setenv("SKIP_deploy", "true")
//os.Setenv("SKIP_validate_outputs", "true")
//os.Setenv("SKIP_sql_tests", "true")
//os.Setenv("SKIP_proxy_tests", "true")
//os.Setenv("SKIP_deploy_cert", "true")
//os.Setenv("SKIP_redeploy", "true")
//os.Setenv("SKIP_ssl_sql_tests", "true")
//os.Setenv("SKIP_teardown_cert", "true")
//os.Setenv("SKIP_teardown", "true")
_examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples")
exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_POSTGRES_PUBLIC)
certExampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_CERT)
// BOOTSTRAP VARIABLES FOR THE TESTS
test_structure.RunTestStage(t, "bootstrap", func() {
projectId := gcp.GetGoogleProjectIDFromEnvVar(t)
region := getRandomRegion(t, projectId)
test_structure.SaveString(t, exampleDir, KEY_REGION, region)
test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId)
})
// AT THE END OF THE TESTS, RUN `terraform destroy`
// TO CLEAN UP ANY RESOURCES THAT WERE CREATED
defer test_structure.RunTestStage(t, "teardown", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
terraform.Destroy(t, terraformOptions)
})
defer test_structure.RunTestStage(t, "teardown_cert", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, certExampleDir)
terraform.Destroy(t, terraformOptions)
})
test_structure.RunTestStage(t, "deploy", func() {
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_POSTGRES_PUBLIC, "", "", 0, "")
test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
})
// VALIDATE MODULE OUTPUTS
test_structure.RunTestStage(t, "validate_outputs", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME)
dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME)
proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION)
expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput)
assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_POSTGRES_PUBLIC))
assert.Equal(t, DB_NAME, dbNameFromOutput)
assert.Equal(t, expectedDBConn, proxyConnectionFromOutput)
})
// TEST REGULAR SQL CLIENT
test_structure.RunTestStage(t, "sql_tests", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP)
connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME)
// Does not actually open up the connection - just returns a DB ref
logger.Logf(t, "Connecting to: %s", publicIp)
db, err := sql.Open("postgres", connectionString)
require.NoError(t, err, "Failed to open DB connection")
// Make sure we clean up properly
defer db.Close()
// Run ping to actually test the connection
logger.Log(t, "Ping the DB")
if err = db.Ping(); err != nil {
t.Fatalf("Failed to ping DB: %v", err)
}
// Create table if not exists
logger.Logf(t, "Create table: %s", POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL)
if _, err = db.Exec(POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL); err != nil {
t.Fatalf("Failed to create table: %v", err)
}
// Clean up
logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT)
if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil {
t.Fatalf("Failed to clean up table: %v", err)
}
logger.Logf(t, "Insert data: %s", POSTGRES_INSERT_TEST_ROW)
var testid int
err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid)
require.NoError(t, err, "Failed to insert data")
assert.True(t, testid > 0, "Data was inserted")
})
// TEST CLOUD SQL PROXY
test_structure.RunTestStage(t, "proxy_tests", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
proxyConn := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION)
logger.Logf(t, "Connecting to: %s via Cloud SQL Proxy", proxyConn)
// Use the Cloud SQL Proxy for queries
// See https://cloud.google.com/sql/docs/mysql/sql-proxy
// Note that sslmode=disable is required it does not mean that the connection
// is unencrypted. All connections via the proxy are completely encrypted.
datasourceName := fmt.Sprintf("host=%s user=%s dbname=%s password=%s sslmode=disable", proxyConn, DB_USER, DB_NAME, DB_PASS)
db, err := sql.Open("cloudsqlpostgres", datasourceName)
require.NoError(t, err, "Failed to open Proxy DB connection")
// Make sure we clean up properly
defer db.Close()
// Run ping to actually test the connection
logger.Log(t, "Ping the DB via Proxy")
if err = db.Ping(); err != nil {
t.Fatalf("Failed to ping DB via Proxy: %v", err)
}
logger.Logf(t, "Insert data via Proxy: %s", POSTGRES_INSERT_TEST_ROW)
var testid int
err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid)
require.NoError(t, err, "Failed to insert data via Proxy")
assert.True(t, testid > 0, "Assert data was inserted")
})
// CREATE CLIENT CERT
test_structure.RunTestStage(t, "deploy_cert", func() {
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME)
commonName := fmt.Sprintf("%s-client", instanceNameFromOutput)
terraformOptionsForCert := createTerratestOptionsForClientCert(projectId, region, certExampleDir, commonName, instanceNameFromOutput)
test_structure.SaveTerraformOptions(t, certExampleDir, terraformOptionsForCert)
terraform.InitAndApply(t, terraformOptionsForCert)
})
// REDEPLOY WITH FORCED SSL SETTINGS
test_structure.RunTestStage(t, "redeploy", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
// Force secure connections
terraformOptions.Vars["require_ssl"] = true
terraform.InitAndApply(t, terraformOptions)
})
// RUN TESTS WITH SECURED CONNECTION
test_structure.RunTestStage(t, "ssl_sql_tests", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
terraformOptionsForCert := test_structure.LoadTerraformOptions(t, certExampleDir)
//********************************************************
// First test that we're not allowed to connect over insecure connection
//********************************************************
publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP)
connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME)
// Does not actually open up the connection - just returns a DB ref
logger.Logf(t, "Connecting to: %s", publicIp)
db, err := sql.Open("postgres",
connectionString)
require.NoError(t, err, "Failed to open DB connection")
// Make sure we clean up properly
defer db.Close()
// Run ping to actually test the connection
logger.Log(t, "Ping the DB with forced SSL")
if err = db.Ping(); err != nil {
logger.Logf(t, "Not allowed to ping %s as expected.", publicIp)
} else {
t.Fatalf("Ping %v succeeded against the odds.", publicIp)
}
//********************************************************
// Test connection over secure connection
//********************************************************
// Prepare certificates
serverCertB := []byte(terraform.Output(t, terraformOptions, OUTPUT_MASTER_CA_CERT))
clientCertB := []byte(terraform.Output(t, terraformOptionsForCert, OUTPUT_CLIENT_CA_CERT))
clientPKB := []byte(terraform.Output(t, terraformOptionsForCert, OUTPUT_CLIENT_PRIVATE_KEY))
serverCertFile := createTempFile(t, serverCertB)
defer os.Remove(serverCertFile.Name())
clientCertFile := createTempFile(t, clientCertB)
defer os.Remove(clientCertFile.Name())
clientPKFile := createTempFile(t, clientPKB)
defer os.Remove(clientPKFile.Name())
// Prepare the secure connection string and ping the DB
sslConnectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s", DB_USER, DB_PASS, publicIp, DB_NAME, serverCertFile.Name(), clientCertFile.Name(), clientPKFile.Name())
db, err = sql.Open("postgres", sslConnectionString)
// Run ping to actually test the connection with the SSL config
logger.Log(t, "Ping the DB with forced SSL")
if err = db.Ping(); err != nil {
t.Fatalf("Failed to ping DB with forced SSL: %v", err)
}
})
}

View File

@@ -0,0 +1,177 @@
package test
import (
"database/sql"
"fmt"
"github.com/gruntwork-io/terratest/modules/gcp"
"github.com/gruntwork-io/terratest/modules/logger"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/gruntwork-io/terratest/modules/test-structure"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"path/filepath"
"strings"
"testing"
)
const NAME_PREFIX_POSTGRES_REPLICAS = "postgres-replicas"
const EXAMPLE_NAME_POSTGRES_REPLICAS = "postgres-replicas"
func TestPostgresReplicas(t *testing.T) {
t.Parallel()
//os.Setenv("SKIP_bootstrap", "true")
//os.Setenv("SKIP_deploy", "true")
//os.Setenv("SKIP_validate_outputs", "true")
//os.Setenv("SKIP_sql_tests", "true")
//os.Setenv("SKIP_read_replica_tests", "true")
//os.Setenv("SKIP_teardown", "true")
_examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples")
exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_POSTGRES_REPLICAS)
// BOOTSTRAP VARIABLES FOR THE TESTS
test_structure.RunTestStage(t, "bootstrap", func() {
projectId := gcp.GetGoogleProjectIDFromEnvVar(t)
region := getRandomRegion(t, projectId)
masterZone, readReplicaZone := getTwoDistinctRandomZonesForRegion(t, projectId, region)
test_structure.SaveString(t, exampleDir, KEY_REGION, region)
test_structure.SaveString(t, exampleDir, KEY_MASTER_ZONE, masterZone)
test_structure.SaveString(t, exampleDir, KEY_READ_REPLICA_ZONE, readReplicaZone)
test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId)
})
// AT THE END OF THE TESTS, RUN `terraform destroy`
// TO CLEAN UP ANY RESOURCES THAT WERE CREATED
defer test_structure.RunTestStage(t, "teardown", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
terraform.Destroy(t, terraformOptions)
})
test_structure.RunTestStage(t, "deploy", func() {
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
masterZone := test_structure.LoadString(t, exampleDir, KEY_MASTER_ZONE)
readReplicaZone := test_structure.LoadString(t, exampleDir, KEY_READ_REPLICA_ZONE)
terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_POSTGRES_REPLICAS, masterZone, "", 1, readReplicaZone)
test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
})
// VALIDATE MODULE OUTPUTS
test_structure.RunTestStage(t, "validate_outputs", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
region := test_structure.LoadString(t, exampleDir, KEY_REGION)
projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT)
instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME)
dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME)
proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION)
expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput)
assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_POSTGRES_REPLICAS))
assert.Equal(t, DB_NAME, dbNameFromOutput)
assert.Equal(t, expectedDBConn, proxyConnectionFromOutput)
// Read replica outputs
readReplicaInstanceNameFromOutputList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_INSTANCE_NAMES)
readReplicaProxyConnectionFromOutputList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_PROXY_CONNECTIONS)
readReplicaInstanceNameFromOutput := readReplicaInstanceNameFromOutputList[0]
readReplicaProxyConnectionFromOutput := readReplicaProxyConnectionFromOutputList[0]
expectedReadReplicaDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, readReplicaInstanceNameFromOutput)
assert.True(t, strings.HasPrefix(readReplicaInstanceNameFromOutput, NAME_PREFIX_POSTGRES_REPLICAS))
assert.Equal(t, expectedReadReplicaDBConn, readReplicaProxyConnectionFromOutput)
})
// TEST REGULAR SQL CLIENT
test_structure.RunTestStage(t, "sql_tests", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP)
connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME)
// Does not actually open up the connection - just returns a DB ref
logger.Logf(t, "Connecting to: %s", publicIp)
db, err := sql.Open("postgres", connectionString)
require.NoError(t, err, "Failed to open DB connection")
// Make sure we clean up properly
defer db.Close()
// Run ping to actually test the connection
logger.Log(t, "Ping the DB")
if err = db.Ping(); err != nil {
t.Fatalf("Failed to ping DB: %v", err)
}
// Create table if not exists
logger.Logf(t, "Create table: %s", POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL)
if _, err = db.Exec(POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL); err != nil {
t.Fatalf("Failed to create table: %v", err)
}
// Clean up
logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT)
if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil {
t.Fatalf("Failed to clean up table: %v", err)
}
logger.Logf(t, "Insert data: %s", POSTGRES_INSERT_TEST_ROW)
var testid int
err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid)
require.NoError(t, err, "Failed to insert data")
assert.True(t, testid > 0, "Data was inserted")
})
// TEST READ REPLICA WITH REGULAR SQL CLIENT
test_structure.RunTestStage(t, "read_replica_tests", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir)
readReplicaPublicIpList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_PUBLIC_IPS)
readReplicaPublicIp := readReplicaPublicIpList[0]
connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, readReplicaPublicIp, DB_NAME)
// Does not actually open up the connection - just returns a DB ref
logger.Logf(t, "Connecting to: %s", readReplicaPublicIp)
db, err := sql.Open("postgres", connectionString)
require.NoError(t, err, "Failed to open DB connection")
// Make sure we clean up properly
defer db.Close()
// Run ping to actually test the connection
logger.Log(t, "Ping the DB")
if err = db.Ping(); err != nil {
t.Fatalf("Failed to ping DB: %v", err)
}
// Try to insert data to verify we cannot write
logger.Logf(t, "Insert data: %s", POSTGRES_INSERT_TEST_ROW)
var testid int
err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid)
// This time we actually expect an error:
// 'cannot execute INSERT in a read-only transaction'
require.Error(t, err, "Should not be able to write to read replica")
logger.Logf(t, "Failed to insert data to read replica as expected: %v", err)
// Query data, results don't matter...
logger.Logf(t, "Query r/o data: %s", SQL_QUERY_ROW_COUNT)
rows, err := db.Query(SQL_QUERY_ROW_COUNT)
require.NoError(t, err, "Failed to execute query statement on read replica")
assert.True(t, rows.Next(), "We have a result")
})
}

View File

@@ -3,6 +3,9 @@ package test
import (
"github.com/gruntwork-io/terratest/modules/gcp"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/stretchr/testify/require"
"io/ioutil"
"os"
"testing"
)
@@ -16,8 +19,6 @@ const KEY_MASTER_ZONE = "masterZone"
const KEY_FAILOVER_REPLICA_ZONE = "failoverReplicaZone"
const KEY_READ_REPLICA_ZONE = "readReplicaZone"
const MYSQL_VERSION = "MYSQL_5_7"
const OUTPUT_MASTER_IP_ADDRESSES = "master_ip_addresses"
const OUTPUT_MASTER_INSTANCE_NAME = "master_instance_name"
const OUTPUT_FAILOVER_INSTANCE_NAME = "failover_instance_name"
@@ -35,9 +36,13 @@ const OUTPUT_CLIENT_PRIVATE_KEY = "client_private_key"
const OUTPUT_DB_NAME = "db_name"
const MYSQL_CREATE_TEST_TABLE_WITH_AUTO_INCREMENT_STATEMENT = "CREATE TABLE IF NOT EXISTS test (id int NOT NULL AUTO_INCREMENT, name varchar(10) NOT NULL, PRIMARY KEY (ID))"
const MYSQL_EMPTY_TEST_TABLE_STATEMENT = "DELETE FROM test"
const MYSQL_INSERT_TEST_ROW = "INSERT INTO test(name) VALUES(?)"
const MYSQL_QUERY_ROW_COUNT = "SELECT count(*) FROM test"
const SQL_EMPTY_TEST_TABLE_STATEMENT = "DELETE FROM test"
const SQL_QUERY_ROW_COUNT = "SELECT count(*) FROM test"
const POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL = "CREATE TABLE IF NOT EXISTS test (id SERIAL, name varchar(10) NOT NULL, PRIMARY KEY (ID))"
const POSTGRES_INSERT_TEST_ROW = "INSERT INTO test(name) VALUES('Grunty') RETURNING id"
func getRandomRegion(t *testing.T, projectID string) string {
approvedRegions := []string{"europe-north1", "europe-west1", "europe-west2", "europe-west3", "us-central1", "us-east1", "us-west1"}
@@ -58,7 +63,7 @@ func getTwoDistinctRandomZonesForRegion(t *testing.T, projectID string, region s
return firstZone, secondZone
}
func createTerratestOptionsForMySql(projectId string, region string, exampleDir string, namePrefix string, masterZone string, failoverReplicaZone string, numReadReplicas int, readReplicaZone string) *terraform.Options {
func createTerratestOptionsForCloudSql(projectId string, region string, exampleDir string, namePrefix string, masterZone string, failoverReplicaZone string, numReadReplicas int, readReplicaZone string) *terraform.Options {
terratestOptions := &terraform.Options{
// The path to where your Terraform code is located
@@ -71,7 +76,6 @@ func createTerratestOptionsForMySql(projectId string, region string, exampleDir
"failover_replica_zone": failoverReplicaZone,
"project": projectId,
"name_prefix": namePrefix,
"mysql_version": MYSQL_VERSION,
"db_name": DB_NAME,
"master_user_name": DB_USER,
"master_user_password": DB_PASS,
@@ -96,3 +100,13 @@ func createTerratestOptionsForClientCert(projectId string, region string, exampl
return terratestOptions
}
func createTempFile(t *testing.T, content []byte) *os.File {
tmpFile, err := ioutil.TempFile(os.TempDir(), "temp-")
require.NoError(t, err, "Failed to create temp file")
_, err = tmpFile.Write(content)
require.NoError(t, err, "Failed to write to temp file")
err = tmpFile.Close()
require.NoError(t, err, "Failed to close temp file")
return tmpFile
}