-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tf
146 lines (120 loc) · 4.17 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
resource "google_container_cluster" "cluster" {
provider = google
name = var.cluster_name
project = var.project
location = var.region
//network = google_compute_network.network.self_link
//subnetwork = google_compute_subnetwork.subnetwork.self_link
logging_service = "logging.googleapis.com/kubernetes"
monitoring_service = "monitoring.googleapis.com/kubernetes"
// Decouple the default node pool lifecycle from the cluster object lifecycle
// by removing the node pool and specifying a dedicated node pool in a
// separate resource below.
remove_default_node_pool = "true"
initial_node_count = 1
// Configure various addons
addons_config {
// Disable the Kubernetes dashboard, which is often an attack vector. The
// cluster can still be managed via the GKE UI.
kubernetes_dashboard {
disabled = true
}
// Enable network policy (Calico)
network_policy_config {
disabled = false
}
}
// Enable workload identity
// workload_identity_config {
// identity_namespace = format("%s.svc.id.goog", var.project)
// }
// Disable basic authentication and cert-based authentication.
// Empty fields for username and password are how to "disable" the
// credentials from being generated.
master_auth {
username = ""
password = ""
client_certificate_config {
issue_client_certificate = "false"
}
}
// Enable network policy configurations (like Calico) - for some reason this
// has to be in here twice.
network_policy {
enabled = "true"
}
// Allocate IPs in our subnetwork
ip_allocation_policy {
use_ip_aliases = true
// cluster_secondary_range_name = google_compute_subnetwork.subnetwork.secondary_ip_range.0.range_name
// services_secondary_range_name = google_compute_subnetwork.subnetwork.secondary_ip_range.1.range_name
}
// Specify the list of CIDRs which can access the master's API
master_authorized_networks_config {
cidr_blocks {
display_name = "homeoffice"
cidr_block = "169.111.111.111/32"
}
}
// Configure the cluster to have private nodes and private control plane access only
private_cluster_config {
enable_private_endpoint = "true"
enable_private_nodes = "true"
master_ipv4_cidr_block = "172.16.0.16/28"
}
// Allow plenty of time for each operation to finish (default was 10m)
timeouts {
create = "30m"
update = "30m"
delete = "30m"
}
}
// A dedicated/separate node pool where workloads will run. A regional node pool
// will have "node_count" nodes per zone, and will use 3 zones. This node pool
// will be 3 nodes in size and use a non-default service-account with minimal
// Oauth scope permissions.
resource "google_container_node_pool" "private-np-1" {
provider = google-beta
name = "private-np-1"
location = var.region
cluster = google_container_cluster.cluster.name
node_count = "1"
// Repair any issues but don't auto upgrade node versions
management {
auto_repair = "true"
auto_upgrade = "false"
}
node_config {
machine_type = "n1-standard-2"
disk_type = "pd-ssd"
disk_size_gb = 30
image_type = "COS"
// Use the cluster created service account for this node pool
//service_account = google_service_account.gke-sa.email
// Use the minimal oauth scopes needed
oauth_scopes = [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append",
]
labels = {
cluster = "${var.cluster_name}"
}
// Enable workload identity on this node pool
workload_metadata_config {
node_metadata = "GKE_METADATA_SERVER"
}
metadata = {
// Set metadata on the VM to supply more entropy
google-compute-enable-virtio-rng = "true"
// Explicitly remove GCE legacy metadata API endpoint
disable-legacy-endpoints = "true"
}
}
depends_on = [
google_container_cluster.cluster,
]
}