以下是我从这个仓库
provider "aws" {
region = "${var.aws_region}"
profile = "${var.aws_profile}"
}
##----------------------------
# Get VPC Variables
##----------------------------
#-- Get VPC ID
data "aws_vpc" "selected" {
tags = {
Name = "${var.name_tag}"
}
}
#-- Get Public Subnet List
data "aws_subnet_ids" "selected" {
vpc_id = "${data.aws_vpc.selected.id}"
tags = {
Tier = "public"
}
}
#--- Gets Security group with tag specified by var.name_tag
data "aws_security_group" "selected" {
tags = {
Name = "${var.name_tag}*"
}
}
#--- Creates SSH key to provision server
module "ssh_key_pair" {
source = "git::https://github.com/cloudposse/terraform-aws-key-pair.git?ref=tags/0.3.2"
namespace = "example"
stage = "dev"
name = "${var.key_name}"
ssh_public_key_path = "${path.module}/secret"
generate_ssh_key = "true"
private_key_extension = ".pem"
public_key_extension = ".pub"
}
#-- Grab the latest AMI built with packer - widows2016.json
data "aws_ami" "Windows_2016" {
owners = [ "amazon", "microsoft" ]
filter {
name = "is-public"
values = ["false"]
}
filter {
name = "name"
values = ["windows2016Server*"]
}
most_recent = true
}
#-- sets the user data script
data "template_file" "user_data" {
template = "/scripts/user_data.ps1"
}
#---- Test Development Server
resource "aws_instance" "this" {
ami = "${data.aws_ami.Windows_2016.image_id}"
instance_type = "${var.instance}"
key_name = "${module.ssh_key_pair.key_name}"
subnet_id = "${data.aws_subnet_ids.selected.ids[01]}"
security_groups = ["${data.aws_security_group.selected.id}"]
user_data = "${data.template_file.user_data.rendered}"
iam_instance_profile = "${var.iam_role}"
get_password_data = "true"
root_block_device {
volume_type = "${var.volume_type}"
volume_size = "${var.volume_size}"
delete_on_termination = "true"
}
tags {
"Name" = "NEW_windows2016"
"Role" = "Dev"
}
#--- Copy ssh keys to S3 Bucket
provisioner "local-exec" {
command = "aws s3 cp ${path.module}/secret s3://PATHTOKEYPAIR/ --recursive"
}
#--- Deletes keys on destroy
provisioner "local-exec" {
when = "destroy"
command = "aws s3 rm 3://PATHTOKEYPAIR/${module.ssh_key_pair.key_name}.pem"
}
provisioner "local-exec" {
when = "destroy"
command = "aws s3 rm s3://PATHTOKEYPAIR/${module.ssh_key_pair.key_name}.pub"
}
}
当我调整时,terraform plan
我收到以下错误消息:
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
data.template_file.user_data: Refreshing state...
Error: Error refreshing state: 1 error(s) occurred:
* provider.aws: error validating provider credentials: error calling sts:GetCallerIdentity: NoCredentialProviders: no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors
答案1
我认为您错过了访问和密钥。请尝试以下方法。如果您没有将导入作为变量传递。
provider "aws" {
region = "${var.region}"
profile = "${var.profile}"
access_key=********
secret_key=********
}
答案2
仔细检查文件的格式~/.aws/credentials
。
就我而言,凭证采用以下格式:
[profile]
AWS_ACCESS_KEY_ID=xxxx
AWS_SECRET_ACCESS_KEY=yyyy
将其更改为以下内容可修复该问题:
[profile]
aws_access_key_id = xxxx
aws_secret_access_key = yyyy
答案3
session-name
就我而言,当我使用角色来承担 Terraform 后端时,我忘记分配属性。
答案4
随着宣布更新 IAM 角色信任策略行为还有另一种可能性:你可能有已经假设你的目标角色(例如通过export AWS_PROFILE=...
(强调我的):
因此,从今天开始,对于自 2022 年 6 月 30 日起未使用基于身份的行为的任何角色,角色信任策略必须明确向所有主体授予权限,包括角色本身,需要在规定的条件下假设它。
如果您已AWS_PROFILE
设置环境变量并且可以运行aws sts get-caller-identity
并在配置中看到所需的角色backend
,则您有两个选择:
- 您可以
unset AWS_PROFILE
在运行 Terraform 之前执行此操作(假设您的默认 IAM 角色可以role_arn
在后端配置中承担) - 您可以更新目标 IAM 角色以信任自身(有关如何执行此操作的示例包含在宣布这一变化的文章)