Reputation: 317
$ terraform -v
Terraform v0.12.6
provider.aws v2.23.0
provider.null v2.1.2
Terraform template to reproduce the error message:
//VARIABLES
variable "aws_access_key" {
default = "AK"
}
variable "aws_secret_key" {
default = "SAK"
}
variable "instance_count" {
default = "3"
}
variable "username" {
default = "Administrator"
}
variable "admin_password" {
default = "Password"
}
variable "instance_name" {
default = "Testing"
}
variable "vpc_id" {
default = "vpc-id"
}
//PROVIDERS
provider "aws" {
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
region = "ap-southeast-2"
}
//RESOURCES
resource "aws_instance" "ec2instance" {
count = "${var.instance_count}"
ami = "Windows AMI"
instance_type = "t2.xlarge"
key_name = "ec2_key"
subnet_id = "subnet-id"
vpc_security_group_ids = ["${aws_security_group.ec2instance-sg.id}"]
tags = {
Name = "${var.instance_name}-${count.index}"
}
}
resource "null_resource" "nullresource" {
count = "${var.instance_count}"
connection {
type = "winrm"
host = "${element(aws_instance.ec2instance.*.private_ip, count.index)}"
user = "${var.username}"
password = "${var.admin_password}"
timeout = "10m"
}
provisioner "remote-exec" {
inline = [
"powershell.exe Write-Host Instance_No=${count.index}"
]
}
// provisioner "local-exec" {
// command = "powershell.exe Write-Host Instance_No=${count.index}"
// }
// provisioner "file" {
// source = "testscript"
// destination = "D:/testscript"
// }
}
resource "aws_security_group" "ec2instance-sg" {
name = "${var.instance_name}-sg"
vpc_id = "${var.vpc_id}"
// RDP
ingress {
from_port = 3389
to_port = 3389
protocol = "tcp"
cidr_blocks = ["CIDR"]
}
// WinRM access from the machine running TF to the instance
ingress {
from_port = 5985
to_port = 5985
protocol = "tcp"
cidr_blocks = ["CIDR"]
}
tags = {
Name = "${var.instance_name}-sg"
}
}
//OUTPUTS
output "private_ip" {
value = "${aws_instance.ec2instance.*.private_ip}"
}
Observations:
Upvotes: 4
Views: 2167
Reputation: 70
Terraform 0.12.26 resolved similar issue for me (when using multiple file provisioners when deploying multiple VMs)
Hope this helps you: https://github.com/hashicorp/terraform/issues/22006
Upvotes: 0
Reputation: 147
I used this triger in null_resource and it works perfectly for me. It also works when number of instances are increased and it do configuration on all instances.I am using terraform and openstack.
triggers= { instance_ids = join(",",openstack_compute_instance_v2.swarm-cluster-hosts[*].id) }
Upvotes: 1
Reputation: 5016
Update: what eventually did the trick was downgrading Terraform to v11.14
as per this issue comment.
A few things you can try:
remote-exec
:resource "aws_instance" "ec2instance" {
count = "${var.instance_count}"
# ...
provisioner "remote-exec" {
connection {
# ...
}
inline = [
# ...
]
}
}
Now you can refer to self
inside the connection
block to get the instance's private IP.
triggers
to null_resource
:resource "null_resource" "nullresource" {
triggers {
host = "${element(aws_instance.ec2instance.*.private_ip, count.index)}" # Rerun when IP changes
version = "${timestamp()}" # ...or rerun every time
}
# ...
}
You can use the triggers
attribute to recreate null_resource
and thus re-execute remote-exec
.
Upvotes: 4