是否可以使用 Terraform 在 AWS EKS 集群上部署 Datadog 代理

是否可以使用 Terraform 在 AWS EKS 集群上部署 Datadog 代理

我想知道是否有人可以指导我如何在我的 AWS EKS 集群上以 pod 形式安装 Datadog 代理。我可以使用 kubectl 命令来完成我的要求。

但是在这里,我正在寻找一个可能的解决方案,以便通过 Terraform 脚本完成相同的工作,或者是否有人可以建议任何其他自动化方式在我的 eks 集群上部署 Datadog 代理。

答案1

Helm 提供程序Terraform 可用于将包部署到 Kubernetes。这GitHub 问题包含如何使用它来部署 Datadog 代理的示例:

resource "helm_release" "datadog" {
  name          = "datadog"
  version       = "1.38.2"
  chart         = "stable/datadog"
  namespace     = kubernetes_namespace.datadog.metadata.0.name
  recreate_pods = true
  force_update  = true

  values = [<<YAML
image:
  repository: datadog/agent
  tag: 6.14.1-jmx
  pullPolicy: IfNotPresent
clusterAgent:
  containerName: cluster-agent
  image:
    repository: datadog/cluster-agent
    tag: 1.3.1
    pullPolicy: IfNotPresent
  enabled: true
  metricsProvider:
    enabled: true
  replicas: 1
  resources:
    requests:
      cpu: 200m
      memory: 256Mi
    limits:
      cpu: 400m
      memory: 512Mi
datadog:
  apiKeyExistingSecret: datadog-api-key
  apmEnabled: true
  appKeyExistingSecret: datadog-app-key
  collectEvents: true
  env:
    - name: DD_APM_IGNORE_RESOURCES
      value: "GET /webjars/.*, GET /v2/api-docs, GET /swagger-resources, GET /actuator/health, GET /_health, GET /manifest"
    - name: DD_KUBELET_TLS_VERIFY
      value: "false"
    - name: DD_COLLECT_EC2_TAGS
      value: "true"
    - name: DD_CUSTOM_SENSITIVE_WORDS
      value: "authorization"
    - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE
      value: "true"
  leaderElection: true
  logsConfigContainerCollectAll: true
  logsEnabled: true
  logLevel: INFO
  name: datadog
  nonLocalTraffic: true
  processAgentEnabled: true
  resources:
    requests:
      cpu: 500m
      memory: 512Mi
    limits:
      cpu: 2000m
      memory: 2Gi
  tags:
    - env:${var.environment}
    - cluster:<my_cluster>
  confd:
    disk.yaml: |-
        init_config:
        instances:
          - use_mount: true
            mount_point_whitelist:
              - /$
    vault.yaml: |-
        init_config:
        instances:
          - api_url: https://<some_vault_url>/v1
    istio.yaml: |-
        init_config:
        instances:
          - istio_mesh_endpoint: http://istio-telemetry.istio-system:42422/metrics
            mixer_endpoint: http://istio-telemetry.istio-system:15014/metrics
            galley_endpoint: http://istio-galley.istio-system:15014/metrics
            pilot_endpoint: http://istio-pilot.istio-system:15014/metrics
            citadel_endpoint: http://istio-citadel.istio-system:15014/metrics
            send_histograms_buckets: true
            send_monotonic_counter: true
  useCriSocketVolume: true
daemonset:
  enabled: true
  tolerations:
    - key: "node-role.kubernetes.io/controlplane"
      operator: "Exists"
      effect: "NoSchedule"
    - key: "node-role.kubernetes.io/controlplane"
      operator: "Exists"
      effect: "NoExecute"
    - key: "node-role.kubernetes.io/etcd"
      operator: "Exists"
      effect: "NoExecute"
    - key: "node-role.kubernetes.io/<node_taint>"
      operator: "Exists"
      effect: "NoSchedule"
  useConfigMap: true
  customAgentConfig:
    listeners:
      - name: kubelet
    config_providers:
      - name: kubelet
        polling: true
    apm_config:
      enabled: false
      apm_non_local_traffic: true
    jmx_use_cgroup_memory_limit: true
    logs_config:
      open_files_limit: 500
  updateStrategy:
     type: RollingUpdate
  useHostPort: true
kubeStateMetrics:
  enabled: true
kube-state-metrics:
  rbac:
    create: false
  serviceAccount:
    create: false
    name: "${kubernetes_service_account.kube-state-metrics.metadata.0.name}"
rbac:
  create: false
  serviceAccountName: "${kubernetes_service_account.datadog-cluster-agent.metadata.0.name}"
YAML
  ]

  lifecycle {
    ignore_changes = [
      keyring,
    ]
  }
}

相关内容