Reputation: 11
Google documentation doesn't clarify how to do it, but my goal is to expose an App deployed in K8s into a VPC only, but could be seen by an user that uses a VPN in another project, using HTTPs protocol. Context:
Current config (Terraform):
resource "kubernetes_service" "this" {
metadata {
name = var.service_name
namespace = kubernetes_namespace_v1.this.metadata[0].name
}
spec {
type = "NodePort"
selector = {
app = var.service_name
}
port {
name = "port-http"
protocol = "TCP"
port = var.service_port
target_port = var.service_target_port
app_protocol = "HTTP"
}
port {
name = "port-https"
protocol = "TCP"
port = 443
target_port = var.service_target_port
app_protocol = "HTTPS"
}
}
provider = kubernetes.gke
}
resource "kubernetes_manifest" "gateway" {
manifest = yamldecode(
<<-YAML
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1beta1
metadata:
name: ${var.service_name}-internal-http
namespace: ${kubernetes_namespace_v1.this.metadata[0].name}
spec:
gatewayClassName: gke-l7-rilb
listeners:
- name: http-listener
protocol: HTTP
port: 80
- name: https-listener
protocol: HTTPS
port: 443
hostnames:
- "${var.service_name}.myapp.com"
tls:
mode: Terminate
certificateRefs:
- name: ${kubernetes_secret.tls_certs.metadata[0].name}
kind: Secret
group: ""
addresses:
- type: NamedAddress
value: ${google_compute_address.static_ip_address.name}
YAML
)
provider = kubernetes.gke
}
resource "kubernetes_manifest" "health_check_policy_https" {
manifest = yamldecode(
<<-YAML
apiVersion: networking.gke.io/v1
kind: HealthCheckPolicy
metadata:
name: ${var.service_name}-healthcheck-https
namespace: ${kubernetes_namespace_v1.this.metadata[0].name}
spec:
default:
checkIntervalSec: 10
timeoutSec: 5
healthyThreshold: 3
unhealthyThreshold: 3
logConfig:
enabled: true
config:
type: HTTPS
httpsHealthCheck:
port: ${var.service_target_port}
requestPath: /
targetRef:
group: ""
kind: Service
name: ${var.service_name}
YAML
)
provider = kubernetes.gke
}
resource "kubernetes_manifest" "httproute" {
manifest = yamldecode(
<<-YAML
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1beta1
metadata:
name: ${var.service_name}-httproute
namespace: ${kubernetes_namespace_v1.this.metadata[0].name}
labels:
gateway: ${kubernetes_manifest.gateway.object.metadata.name}
spec:
parentRefs:
- kind: Gateway
name: ${kubernetes_manifest.gateway.object.metadata.name}
hostnames:
- "${var.service_name}.myapp.com"
rules:
- backendRefs:
- name: ${kubernetes_service.this.metadata[0].name}
port: ${var.service_port}
YAML
)
provider = kubernetes.gke
}
resource "kubernetes_manifest" "gateway_policy" {
manifest = yamldecode(
<<-YAML
apiVersion: networking.gke.io/v1
kind: GCPGatewayPolicy
metadata:
name: ${var.service_name}-gateway-policy
namespace: ${kubernetes_namespace_v1.this.metadata[0].name}
spec:
default:
allowGlobalAccess: true
targetRef:
group: gateway.networking.k8s.io
kind: Gateway
name: ${kubernetes_manifest.gateway.object.metadata.name}
YAML
)
provider = kubernetes.gke
}
I've looked in a lot of blogs, forums and sites without a clear answer. Thanks in advance.
allow {
protocol = "tcp"
ports = [ "8080", "80", "443", "5000", "8501" ]
}
source_ranges = ["0.0.0.0/0"]
CertificateMap or Google-managed SSL certificates are not supported with regional Gateways. Use self-managed regional SSL certificates or secrets to secure traffic between your clients and your regional Gateway.
Upvotes: 1
Views: 69
Reputation: 93
Make sure that your request path in your HealthCheckPolicy configuration is a valid endpoint that returns 200 OK responses or else you will keep getting unhealthy status on your backend service. You can refer to this article for setting up your liveness check. Also, validate that your healthcheck is pointing to the correct port on which your service pod is listening and exposed.
You can also try switching your healthcheck type to HTTP instead of HTTPS and see if it helps.
config:
type: HTTP # Switch to HTTP instead of HTTPS
Upvotes: 1