Matthew The Terrible
Matthew The Terrible

Reputation: 1643

terraform azurerm - cannot destroy public ip

New to terraform so i'm hoping this is an easy issue. I'm creating some resources in azure and deploying a simple flask application to AKS. Creating works fine using terraform plan. I can see that azure is provisioned correctly and I can hit the flask app.

When I try to run terraform destroy I get the error - "StatusCode=400...In order to delete the public IP, disassociate/detach the Public IP address from the resource.

Main.tf

variable "subscription_id" {}
variable "client_id" {}
variable "client_secret" {}
variable "tenant_id" {}

provider "azurerm" {
    version         = "=1.28.0"
    tenant_id       = "${var.tenant_id}"
    subscription_id = "${var.subscription_id}"
}

resource "azurerm_resource_group" "aks" {
    name        = "${var.name_prefix}"
    location    = "${var.location}"
}

resource "azurerm_kubernetes_cluster" "k8s" {
    name                    = "${var.name_prefix}-aks"
    kubernetes_version      = "${var.kubernetes_version}"
    location                = "${azurerm_resource_group.aks.location}"
    resource_group_name     = "${azurerm_resource_group.aks.name}"
    dns_prefix              = "AKS-${var.dns_prefix}"

    agent_pool_profile {
        name                = "${var.node_pool_name}"
        count               = "${var.node_pool_size}"
        vm_size             = "${var.node_pool_vmsize}"
        os_type             = "${var.node_pool_os}"
        os_disk_size_gb     = 30
    }

    service_principal {
        client_id           = "${var.client_id}"
        client_secret       = "${var.client_secret}"
    }

    tags = {
        environment = "${var.env_tag}"
    }
}

provider "helm" {
  install_tiller = true

  kubernetes {
    host                   = "${azurerm_kubernetes_cluster.k8s.kube_config.0.host}"
    client_certificate     = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)}"
    client_key             = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)}"
    cluster_ca_certificate = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)}"
  }
}

# Create Static Public IP Address to be used by Nginx Ingress
resource "azurerm_public_ip" "nginx_ingress" {
  name                         = "nginx-ingress-public-ip"
  location                     = "${azurerm_kubernetes_cluster.k8s.location}"
  resource_group_name          = "${azurerm_kubernetes_cluster.k8s.node_resource_group}"
  allocation_method            = "Static"
  domain_name_label            = "${var.name_prefix}"
}

# Add Kubernetes Stable Helm charts repo
data "helm_repository" "stable" {
  name = "stable"
  url  = "https://kubernetes-charts.storage.googleapis.com"
}

# Install Nginx Ingress using Helm Chart
resource "helm_release" "nginx_ingress" {
  name       = "nginx-ingress"
  repository = "${data.helm_repository.stable.metadata.0.name}"
  chart      = "nginx-ingress"

  set {
    name  = "rbac.create"
    value = "false"
  }

  set {
    name  = "controller.service.externalTrafficPolicy"
    value = "Local"
  }

  set {
    name  = "controller.service.loadBalancerIP"
    value = "${azurerm_public_ip.nginx_ingress.ip_address}"
  }
}

Also deploying my kubernetes stuff in this file k8s.tf

provider "kubernetes" {
    host                    = "${azurerm_kubernetes_cluster.k8s.kube_config.0.host}"
    username                = "${azurerm_kubernetes_cluster.k8s.kube_config.0.username}"
    password                = "${azurerm_kubernetes_cluster.k8s.kube_config.0.password}"
    client_certificate      = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)}"
    client_key              = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)}"
    cluster_ca_certificate  = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)}"
}

resource "kubernetes_deployment" "flask-api-deployment" {
    metadata {
        name = "flask-api-deployment"
    }

    spec {
        replicas = 2

        selector {
            match_labels {
                component = "api"
            }
        }

        template {
            metadata {
                labels = {
                    component = "api"
                }
            }

            spec {
                container {
                    image = "xxx.azurecr.io/sampleflask:0.1.0"
                    name = "flask-api"
                    port {
                        container_port = 5000
                    }
                }
            }
        }
    }
}

resource "kubernetes_service" "api-cluster-ip-service" {
    metadata {
        name = "flask-api-cluster-ip-service"
    }

    spec {
        selector {
            component = "api"
        }

        port {
            port = 5000
            target_port = 5000
        }
    }
}

resource "kubernetes_ingress" "flask-ingress-service" {
    metadata {
        name = "flask-ingress-service"
    }

    spec {
        backend {
            service_name = "flask-api-cluster-ip-service"
            service_port = 5000
        }
    }
}

Upvotes: 0

Views: 2261

Answers (2)

110100100
110100100

Reputation: 199

The user @4c74356b41 is right, but to give more information assuming a config like this:

resource "azurerm_kubernetes_cluster" "k8s" {
  name                = "aks-e2x-nuffield-uat"
  resource_group_name = azurerm_resource_group.core_rg.name
  location            = azurerm_resource_group.core_rg.location
  dns_prefix          = "aks-e2x-nuffield-uat-dns"
  kubernetes_version  = var.k8s_version
  # NOTE currently only a single node pool, default, is configured
  private_cluster_enabled = true
...
  network_profile {
    network_plugin     = "kubenet"
    load_balancer_sku  = "standard"
    service_cidr       = var.k8s_service_subnet
    pod_cidr           = var.k8s_pod_subnet
    docker_bridge_cidr = "172.17.0.1/16"
    dns_service_ip     = "40.0.8.10" # within the service subnet
  }
}

Where the load_balancer_sku is set to standard, you can access the public IP to be used elsewhere like this:

data "azurerm_public_ip" "k8s_load_balancer_ip" {
  name = reverse(split("/", tolist(azurerm_kubernetes_cluster.k8s.network_profile.0.load_balancer_profile.0.effective_outbound_ips)[0]))[0]
  resource_group_name = azurerm_kubernetes_cluster.k8s.node_resource_group
}

output "ingress_public_ip" {
  # value = azurerm_public_ip.ingress.ip_address
  value = data.azurerm_public_ip.k8s_load_balancer_ip.ip_address
}

Upvotes: 0

Charles Xu
Charles Xu

Reputation: 31404

For your issue, this is a problem about the sequence of the resources. When you create the nginx ingress with the public IP, the public IP should be created first. But when you delete the public IP, it's still in use by the nginx ingress. So It causes the error.

The solution is that you can detach the public IP from the resource which uses it. Then use the destroy the resource from the Terraform. You can take a look at the explanation in the issue.

Upvotes: 4

Related Questions