- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我使用以下 Terraform 在 Azure 中创建了一个 Kubernetes 集群
# Locals block for hardcoded names
locals {
backend_address_pool_name = "appgateway-beap"
frontend_port_name = "appgateway-feport"
frontend_ip_configuration_name = "appgateway-feip"
http_setting_name = "appgateway-be-htst"
listener_name = "appgateway-httplstn"
request_routing_rule_name = "appgateway-rqrt"
app_gateway_subnet_name = "appgateway-subnet"
}
data "azurerm_subnet" "aks-subnet" {
name = "aks-subnet"
virtual_network_name = "np-dat-spoke-vnet"
resource_group_name = "ipz12-dat-np-connect-rg"
}
data "azurerm_subnet" "appgateway-subnet" {
name = "appgateway-subnet"
virtual_network_name = "np-dat-spoke-vnet"
resource_group_name = "ipz12-dat-np-connect-rg"
}
# Create Resource Group for Kubernetes Cluster
module "resource_group_kubernetes_cluster" {
source = "./modules/resource_group"
count = var.enable_kubernetes == true ? 1 : 0
#name_override = "rg-aks-spoke-dev-westus3-001"
app_or_service_name = "aks" # var.app_or_service_name
subscription_type = var.subscription_type # "spoke"
environment = var.environment # "dev"
location = var.location # "westus3"
instance_number = var.instance_number # "001"
tags = var.tags
}
resource "azurerm_user_assigned_identity" "identity_uami" {
location = var.location
name = "appgw-uami"
resource_group_name = module.resource_group_kubernetes_cluster[0].name
}
# Application Gateway Public Ip
resource "azurerm_public_ip" "test" {
name = "publicIp1"
location = var.location
resource_group_name = module.resource_group_kubernetes_cluster[0].name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_application_gateway" "network" {
name = var.app_gateway_name
resource_group_name = module.resource_group_kubernetes_cluster[0].name
location = var.location
sku {
name = var.app_gateway_sku
tier = "Standard_v2"
capacity = 2
}
identity {
type = "UserAssigned"
identity_ids = [
azurerm_user_assigned_identity.identity_uami.id
]
}
gateway_ip_configuration {
name = "appGatewayIpConfig"
subnet_id = data.azurerm_subnet.appgateway-subnet.id
}
frontend_port {
name = local.frontend_port_name
port = 80
}
frontend_port {
name = "httpsPort"
port = 443
}
frontend_ip_configuration {
name = local.frontend_ip_configuration_name
public_ip_address_id = azurerm_public_ip.test.id
}
backend_address_pool {
name = local.backend_address_pool_name
}
backend_http_settings {
name = local.http_setting_name
cookie_based_affinity = "Disabled"
port = 80
protocol = "Http"
request_timeout = 1
}
http_listener {
name = local.listener_name
frontend_ip_configuration_name = local.frontend_ip_configuration_name
frontend_port_name = local.frontend_port_name
protocol = "Http"
}
request_routing_rule {
name = local.request_routing_rule_name
rule_type = "Basic"
http_listener_name = local.listener_name
backend_address_pool_name = local.backend_address_pool_name
backend_http_settings_name = local.http_setting_name
priority = 100
}
tags = var.tags
depends_on = [azurerm_public_ip.test]
lifecycle {
ignore_changes = [
backend_address_pool,
backend_http_settings,
request_routing_rule,
http_listener,
probe,
tags,
frontend_port
]
}
}
# Create the Azure Kubernetes Service (AKS) Cluster
resource "azurerm_kubernetes_cluster" "kubernetes_cluster" {
count = var.enable_kubernetes == true ? 1 : 0
name = "aks-prjx-${var.subscription_type}-${var.environment}-${var.location}-${var.instance_number}"
location = var.location
resource_group_name = module.resource_group_kubernetes_cluster[0].name # "rg-aks-spoke-dev-westus3-001"
dns_prefix = "dns-aks-prjx-${var.subscription_type}-${var.environment}-${var.location}-${var.instance_number}" #"dns-prjxcluster"
private_cluster_enabled = false
local_account_disabled = true
default_node_pool {
name = "npprjx${var.subscription_type}" #"prjxsyspool" # NOTE: "name must start with a lowercase letter, have max length of 12, and only have characters a-z0-9."
vm_size = "Standard_B8ms"
vnet_subnet_id = data.azurerm_subnet.aks-subnet.id
# zones = ["1", "2", "3"]
enable_auto_scaling = true
max_count = 3
min_count = 1
# node_count = 3
os_disk_size_gb = 50
type = "VirtualMachineScaleSets"
enable_node_public_ip = false
enable_host_encryption = false
node_labels = {
"node_pool_type" = "npprjx${var.subscription_type}"
"node_pool_os" = "linux"
"environment" = "${var.environment}"
"app" = "prjx_${var.subscription_type}_app"
}
tags = var.tags
}
ingress_application_gateway {
gateway_id = azurerm_application_gateway.network.id
}
# Enabled the cluster configuration to the Azure kubernets with RBAC
azure_active_directory_role_based_access_control {
managed = true
admin_group_object_ids = var.active_directory_role_based_access_control_admin_group_object_ids
azure_rbac_enabled = true #false
}
network_profile {
network_plugin = "azure"
network_policy = "azure"
outbound_type = "userDefinedRouting"
}
identity {
type = "SystemAssigned"
}
oms_agent {
log_analytics_workspace_id = module.log_analytics_workspace[0].id
}
timeouts {
create = "20m"
delete = "20m"
}
depends_on = [
azurerm_application_gateway.network
]
}
并提供必要的权限
# Get the AKS Agent Pool SystemAssigned Identity
data "azurerm_user_assigned_identity" "aks-identity" {
name = "${azurerm_kubernetes_cluster.kubernetes_cluster[0].name}-agentpool"
resource_group_name = "MC_${module.resource_group_kubernetes_cluster[0].name}_aks-prjx-spoke-dev-eastus-001_eastus"
}
# Get the AKS SystemAssigned Identity
data "azuread_service_principal" "aks-sp" {
display_name = azurerm_kubernetes_cluster.kubernetes_cluster[0].name
}
# Provide ACR Pull permission to AKS SystemAssigned Identity
resource "azurerm_role_assignment" "acrpull_role" {
scope = module.container_registry[0].id
role_definition_name = "AcrPull"
principal_id = data.azurerm_user_assigned_identity.aks-identity.principal_id
skip_service_principal_aad_check = true
depends_on = [
data.azurerm_user_assigned_identity.aks-identity
]
}
resource "azurerm_role_assignment" "aks_id_network_contributor_subnet" {
scope = data.azurerm_subnet.aks-subnet.id
role_definition_name = "Network Contributor"
principal_id = data.azurerm_user_assigned_identity.aks-identity.principal_id
depends_on = [data.azurerm_user_assigned_identity.aks-identity]
}
resource "azurerm_role_assignment" "akssp_network_contributor_subnet" {
scope = data.azurerm_subnet.aks-subnet.id
role_definition_name = "Network Contributor"
principal_id = data.azuread_service_principal.aks-sp.object_id
depends_on = [data.azuread_service_principal.aks-sp]
}
resource "azurerm_role_assignment" "aks_id_contributor_agw" {
scope = data.azurerm_subnet.appgateway-subnet.id
role_definition_name = "Network Contributor"
principal_id = data.azurerm_user_assigned_identity.aks-identity.principal_id
depends_on = [data.azurerm_user_assigned_identity.aks-identity]
}
resource "azurerm_role_assignment" "akssp_contributor_agw" {
scope = data.azurerm_subnet.appgateway-subnet.id
role_definition_name = "Network Contributor"
principal_id = data.azuread_service_principal.aks-sp.object_id
depends_on = [data.azuread_service_principal.aks-sp]
}
resource "azurerm_role_assignment" "aks_ingressid_contributor_on_agw" {
scope = azurerm_application_gateway.network.id
role_definition_name = "Contributor"
principal_id = azurerm_kubernetes_cluster.kubernetes_cluster[0].ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
depends_on = [azurerm_application_gateway.network,azurerm_kubernetes_cluster.kubernetes_cluster]
skip_service_principal_aad_check = true
}
resource "azurerm_role_assignment" "aks_ingressid_contributor_on_uami" {
scope = azurerm_user_assigned_identity.identity_uami.id
role_definition_name = "Contributor"
principal_id = azurerm_kubernetes_cluster.kubernetes_cluster[0].ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
depends_on = [azurerm_application_gateway.network,azurerm_kubernetes_cluster.kubernetes_cluster]
skip_service_principal_aad_check = true
}
resource "azurerm_role_assignment" "uami_contributor_on_agw" {
scope = azurerm_application_gateway.network.id
role_definition_name = "Contributor"
principal_id = azurerm_user_assigned_identity.identity_uami.principal_id
depends_on = [azurerm_application_gateway.network,azurerm_user_assigned_identity.identity_uami]
skip_service_principal_aad_check = true
}
并部署了下面提到的应用程序
apiVersion: apps/v1
kind: Deployment
metadata:
name: aks-helloworld
spec:
replicas: 1
selector:
matchLabels:
app: aks-helloworld-two
template:
metadata:
labels:
app: aks-helloworld-two
spec:
containers:
- name: aks-helloworld-two
image: mcr.microsoft.com/azuredocs/aks-helloworld:v1
ports:
- containerPort: 80
env:
- name: TITLE
value: "AKS Ingress Demo"
---
apiVersion: v1
kind: Service
metadata:
name: aks-helloworld
spec:
type: LoadBalancer
ports:
- port: 80
selector:
app: aks-helloworld-two
外部IP已分配
但是我无法访问外部IP
注意:我没有像 Microsoft Article 中提到的那样单独部署任何 Ingress Controller 。因为我不确定这是必需的
最佳答案
我尝试在我的环境中重现相同的内容,以使用应用程序网关创建 Kubernetes 服务集群:
关注Stack link使用Ingress应用程序网关创建Kubernetes服务集群。
如果在 Azure Kubernetes 服务 (AKS) 中部署后无法使用外部负载均衡器 IP 访问应用程序,请验证 AKS 群集中的以下设置。
1.使用以下命令检查负载均衡器的状态。
kubectl get service <your service name>
确保外部 -IP 字段未设置为待处理状态。
请按照以下步骤检查AKS集群中的NSG安全规则。
转到 Azure 门户 > Kubernetes 服务 > 选择您的 Kubernetes 服务 > 属性 > 在基础结构资源组 > 概述下选择您的资源组 > 选择您的 NSG 组。
我在网络安全组中禁用了入站http规则进行测试,得到了同样的错误。
应用程序状态,一旦禁用NSG中的端口 80。
关于azure - AKS 服务部署已完成,但无法访问外部 IP,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/75165929/
我使用“创建 Kubernetes 集群”功能在 Azure 门户中创建了我的 AKS 集群,并允许它创建新的服务主体。 我开始怀疑这个主体使用的凭据是否过期。为了避免 K8s 在凭证到期时与 Azu
文档描述了如何将 ACR 附加到现有的 AKS 群集,https://learn.microsoft.com/en-us/azure/aks/cluster-container-registry-in
我创建了一个 AKS 集群,并启用了 AKS 管理的 Azure Active Directory 和基于角色的访问控制 (RBAC)。如果我尝试使用 Admin Azure AD 组中包含的其中一个
因此,我可以在 AKS 的见解选项卡中清楚地看到每个容器的统计信息。这些必须来自某个地方,但我只能在查询日志/指标时找到每个节点的统计信息。我如何查询这个(为了构建一个工作簿)。 最佳答案 该数据位于
我遇到的情况是,我的 AKS 集群已经就位,有两个 AKS 集群,并且它们仅在其安全区域内部可用。我不想通过互联网从另一个集群访问集群内的内部资源。 我正在探索专用链接服务和端点,有什么建议吗? 两个
好吧,在过去的两天里,我一直在与这个文档作斗争: https://learn.microsoft.com/en-au/azure/aks/static-ip和 https://learn.micros
我正在尝试找出使用 AKS 和 VSTS 为 Asp.Net Core Web 应用程序设置 CI/CD 的步骤。 https://learn.microsoft.com/en-us/vsts/bui
我遇到的情况是,我的 AKS 集群已经就位,有两个 AKS 集群,并且它们仅在其安全区域内部可用。我不想通过互联网从另一个集群访问集群内的内部资源。 我正在探索专用链接服务和端点,有什么建议吗? 两个
只是一个问题,在微软页面上 https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#bring-your-own-subnet-an
kubectl 任务无法将 list 文件部署到 AKS。管道失败并出现以下错误 ##[错误]未找到与/home/vsts/work/1/s/manifests 匹配的配置文件。 管道在运行两个阶段(
我一直在尝试设置 Kubernetes 1.13 AKS 部署以使用 HPA,但我一直遇到问题: NAME REFERENCE
我们公司封锁了 ssh 端口。如何使用 cloud shell ssh 进入 AKS 集群,以便我们可以从那里 curl 到外部 URL 来测试连接?谢了。 最佳答案 这实际上没有多大意义,但您只需要
我正在通过 azure 安装 istio az aks mesh enable --resource-group 'myrg' --name 'myk8s' 然后启用外部 istio 入口网关 az
查看地形 documentation我无法确定如何将 UAMI 分配为 kubelet_identity对于aks集群。 identity { ... }按照描述设置 controlPlane UAM
我正在通过 azure 安装 istio az aks mesh enable --resource-group 'myrg' --name 'myk8s' 然后启用外部 istio 入口网关 az
查看地形 documentation我无法确定如何将 UAMI 分配为 kubelet_identity对于aks集群。 identity { ... }按照描述设置 controlPlane UAM
我们可以从 Log Analytics Workspace 访问 pod 相关日志,但没有应用程序日志(类似于我们在 kubectl get events 中看到的)。 我指的是 Azure 文档,但
az aks create -n MyServices -g MyKubernetes --generate-ssh-keys 不工作。错误消息:az aks create -n Adestis-Se
我想知道如何通过 ssh 连接到 GKE 和 AKS 中的节点,以及如何在 Kubernetes 集群中安装 ELK 堆栈。 任何一步一步的链接都会对我有帮助。 无法使用此命令连接:gcloud co
使用azure aks get-credentials --admin可以获取kubernetes管理配置文件,azure aks get-credentials只能获取azure上的用户配置文件。
我是一名优秀的程序员,十分优秀!