我正在更新这个,因为原来版本没有得到太多支持。和,自从使用全新安装重新启动后,我不再看到该错误打包堆栈。为了帮助尝试“帮你,帮帮我。”我发布了我安装的所有相关配置,希望您能发现我的错误配置。这是通过以下方式全新安装的 Rocky:打包堆栈。在 CentOS 7.6 上,以及 HyperVNovaCompute_Beta.msi 来自http://cloudbase.it。我原本计划只使用 Hyper-V 进行初始基础安装,而不使用 OVS。运行该程序后,使用相同的控制器设置第二台配置了 OVS 的 Hyper-V 服务器。我很难让 hyperv 代理(非 OVS)正常工作并将“服务器”vSwitch 分配给新 VM。我遵循了与v-magazine脚本用于进行安装,查看其源代码以找出原因。(V-Magine 无法安装,我认为有新的更新打包堆栈。以及它卡住的仓库中的软件包。)
如果您需要查看更多配置/设置,请告诉我。
设置如下
Hyper01- HyperV
NIC1(Broadcom NetXtreme 千兆以太网 #2):172.16.1.91,带 vSwitch”服务器“+ 允许管理(未选中/启用任何扩展)
OpenStack01- Hyper01 上的控制器虚拟机
eth0:172.16.1.90,已分配给“服务器“vSwitch,VMQ 已启用,MAC 欺骗已启用#用于管理访问
eth1(br-data): 没有 ip,分配给“服务器“vSwitch,VMQ 已启用,MAC 欺骗已启用 # 为便于将来与 openvswitch 一起使用,将在设置 Open vSwitch 时移至新的 vSwitch
eth2(br-ext): 没有 ip,已分配给“服务器“vSwitch,VMQ 已启用,MAC 欺骗已启用 # 为便于将来与 openvswitch 一起使用,将在设置 Open vSwitch 时移至新的 vSwitch
控制器-openstack01(172.16.1.90):
# IP地址
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:15:5d:01:5b:05 brd ff:ff:ff:ff:ff:ff
inet 172.16.1.90/24 brd 172.16.1.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::215:5dff:fe01:5b05/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ovs-system state UP group default qlen 1000
link/ether 00:15:5d:01:5b:08 brd ff:ff:ff:ff:ff:ff
inet6 fe80::215:5dff:fe01:5b08/64 scope link
valid_lft forever preferred_lft forever
4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ovs-system state UP group default qlen 1000
link/ether 00:15:5d:01:5b:09 brd ff:ff:ff:ff:ff:ff
inet6 fe80::215:5dff:fe01:5b09/64 scope link
valid_lft forever preferred_lft forever
5: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether f6:17:06:d6:d2:dd brd ff:ff:ff:ff:ff:ff
6: br-tun: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 12:81:ed:9b:a4:47 brd ff:ff:ff:ff:ff:ff
7: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether ca:45:3c:bd:83:43 brd ff:ff:ff:ff:ff:ff
8: br-data: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether 00:15:5d:01:5b:08 brd ff:ff:ff:ff:ff:ff
inet6 fe80::f812:f0ff:fe21:6f43/64 scope link
valid_lft forever preferred_lft forever
9: br-ext: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether 00:15:5d:01:5b:09 brd ff:ff:ff:ff:ff:ff
inet6 fe80::483:e6ff:fe1a:aa4e/64 scope link
valid_lft forever preferred_lft forever
# ovs-vsctl 显示
d80df210-f9bc-4506-9674-18ebb803b301
Manager "ptcp:6640:127.0.0.1"
is_connected: true
Bridge br-ext
Controller "tcp:127.0.0.1:6633"
is_connected: true
fail_mode: secure
Port phy-br-ext
Interface phy-br-ext
type: patch
options: {peer=int-br-ext}
Port "eth2"
Interface "eth2"
Port br-ext
Interface br-ext
type: internal
Bridge br-data
Controller "tcp:127.0.0.1:6633"
is_connected: true
fail_mode: secure
Port "eth1"
Interface "eth1"
Port phy-br-data
Interface phy-br-data
type: patch
options: {peer=int-br-data}
Port br-data
Interface br-data
type: internal
Bridge br-tun
Controller "tcp:127.0.0.1:6633"
is_connected: true
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-int
Controller "tcp:127.0.0.1:6633"
is_connected: true
fail_mode: secure
Port int-br-data
Interface int-br-data
type: patch
options: {peer=phy-br-data}
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port br-int
Interface br-int
type: internal
Port int-br-ext
Interface int-br-ext
type: patch
options: {peer=phy-br-ext}
Port "qr-fd8d84fe-71"
tag: 1
Interface "qr-fd8d84fe-71"
type: internal
Port "tapd2b841f2-41"
tag: 1
Interface "tapd2b841f2-41"
type: internal
Port "qg-75a54182-7e"
tag: 2
Interface "qg-75a54182-7e"
type: internal
ovs_version: "2.10.1"
/etc/nova/nova.conf
[DEFAULT]
debug=False
instance_usage_audit_period=hour
rootwrap_config=/etc/nova/rootwrap.conf
compute_driver=libvirt.LibvirtDriver
allow_resize_to_same_host=True
vif_plugging_is_fatal=True
vif_plugging_timeout=300
force_raw_images=True
reserved_host_memory_mb=512
cpu_allocation_ratio=16.0
ram_allocation_ratio=1.5
instance_usage_audit=True
heal_instance_info_cache_interval=60
force_snat_range=0.0.0.0/0
metadata_host=172.16.1.90
dhcp_domain=novalocal
firewall_driver=nova.virt.firewall.NoopFirewallDriver
ssl_only=False
state_path=/var/lib/nova
report_interval=10
service_down_time=60
enabled_apis=osapi_compute,metadata
osapi_compute_listen=0.0.0.0
osapi_compute_listen_port=8774
osapi_compute_workers=2
metadata_listen=0.0.0.0
metadata_listen_port=8775
metadata_workers=2
log_dir=/var/log/nova
transport_url=rabbit://openstack:***@172.16.1.90:5672/
volume_api_class=nova.volume.cinder.API
[api]
auth_strategy=keystone
use_forwarded_for=False
fping_path=/usr/sbin/fping
[api_database]
connection=mysql+pymysql://nova_api:***@172.16.1.90/nova_api
[conductor]
workers=2
[database]
connection=mysql+pymysql://nova:***@172.16.1.90/nova
[filter_scheduler]
host_subset_size=1
max_io_ops_per_host=8
max_instances_per_host=50
available_filters=nova.scheduler.filters.all_filters
enabled_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,CoreFilter
weight_classes=nova.scheduler.weights.all_weighers
[glance]
api_servers=http://172.16.1.90:9292
[key_manager]
backend=nova.keymgr.conf_key_mgr.ConfKeyManager
[keystone_authtoken]
www_authenticate_uri=http://172.16.1.90:5000/
auth_uri=http://172.16.1.90:5000/
auth_type=password
auth_url=http://172.16.1.90:5000/v3
username=nova
password=***
user_domain_name=Default
project_name=services
project_domain_name=Default
[libvirt]
virt_type=qemu
inject_password=False
inject_key=False
inject_partition=-2
live_migration_uri=qemu+tcp://%s/system
cpu_mode=none
vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
[neutron]
url=http://172.16.1.90:9696
ovs_bridge=br-int
default_floating_pool=public
extension_sync_interval=600
service_metadata_proxy=True
metadata_proxy_shared_secret=***
timeout=60
auth_type=v3password
auth_url=http://172.16.1.90:5000/v3
project_name=services
project_domain_name=Default
username=neutron
user_domain_name=Default
password=***
region_name=SouthCentral
[notifications]
notify_on_state_change=vm_and_task_state
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_notifications]
driver=messagingv2
[oslo_messaging_rabbit]
ssl=False
[oslo_policy]
policy_file=/etc/nova/policy.json
[placement]
auth_type=password
auth_url=http://172.16.1.90:5000/v3
project_name=services
project_domain_name=Default
username=placement
user_domain_name=Default
password=***
region_name=SouthCentral
[placement_database]
connection=mysql+pymysql://nova_placement:***@172.16.1.90/nova_placement
[scheduler]
driver=filter_scheduler
max_attempts=3
workers=2
[spice]
enabled=False
[vendordata_dynamic_auth]
project_domain_name=Default
user_domain_name=Default
[vnc]
enabled=True
keymap=en-us
novncproxy_base_url=http://172.16.1.90:6080/vnc_auto.html
novncproxy_host=0.0.0.0
novncproxy_port=6080
auth_schemes=none
vncserver_proxyclient_address=openstack01
vncserver_listen=0.0.0.0
[wsgi]
api_paste_config=api-paste.ini
/etc/neutron/neutron.conf
[DEFAULT]
debug=False
bind_host=0.0.0.0
auth_strategy=keystone
core_plugin=neutron.plugins.ml2.plugin.Ml2Plugin
service_plugins=qos,trunk,router,metering
allow_overlapping_ips=True
notify_nova_on_port_status_changes=True
notify_nova_on_port_data_changes=True
api_workers=2
rpc_workers=2
router_scheduler_driver=neutron.scheduler.l3_agent_scheduler.ChanceScheduler
l3_ha=False
max_l3_agents_per_router=3
log_dir=/var/log/neutron
transport_url=rabbit://***:***@172.16.1.90:5672/
control_exchange=neutron
[agent]
root_helper=sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
connection=mysql+pymysql://neutron:***@172.16.1.90/neutron
[keystone_authtoken]
www_authenticate_uri=http://172.16.1.90:5000/
auth_uri=http://172.16.1.90:5000/
auth_type=password
auth_url=http://172.16.1.90:5000/v3
username=neutron
password=***
user_domain_name=Default
project_name=services
project_domain_name=Default
[nova]
region_name=SouthCentral
auth_url=http://172.16.1.90:5000/v3
auth_type=password
password=***
project_domain_id=default
project_domain_name=Default
project_name=services
tenant_name=services
user_domain_id=default
user_domain_name=Default
username=nova
[oslo_concurrency]
lock_path=$state_path/lock
[oslo_messaging_rabbit]
ssl=False
[oslo_policy]
policy_file=/etc/neutron/policy.json
/etc/neutron/插件/ml2/ml2_conf.ini
[DEFAULT]
debug=False
[ml2]
type_drivers=vxlan,flat
tenant_network_types=vxlan
mechanism_drivers=openvswitch,hyperv
extension_drivers=port_security,qos
path_mtu=0
[ml2_type_flat]
flat_networks=*
[ml2_type_vxlan]
vni_ranges=10:100
vxlan_group=224.0.0.1
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group=True
/etc/neutron/插件/ml2/openvswitch_agent.ini
[DEFAULT]
debug=False
[agent]
tunnel_types=vxlan
vxlan_udp_port=4789
l2_population=False
drop_flows_on_start=False
[ovs]
integration_bridge=br-int
tunnel_bridge=br-tun
local_ip=172.16.1.90
bridge_mappings=physnet1:br-data,extnet:br-ext
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
计算 - Hyper01(172.16.1.91):
PS>获取NetAdapter
Name InterfaceDescription ifIndex Status MacAddress LinkSpeed
---- -------------------- ------- ------ ---------- ---------
vEthernet (Servers) Hyper-V Virtual Ethernet Adapter 2 Up 00-10-18-14-2C-3A 1 Gbps
Ethernet Broadcom NetXtreme Gigabit Ethernet #2 3 Up 00-10-18-14-2C-3A 1 Gbps
Storage SAN QLogic Dual Port 10 Gigabit Ethernet... 5 Up 00-0E-1E-04-D0-14 10 Gbps
Unused SAN QLogic Dual Port 10 Gigabit Ethern...#2 7 Disconnected 00-0E-1E-04-D0-10 0 bps
使用的计算安装命令(HyperVNovaCompute_Beta.msi 于 2019 年 1 月 14 日晚上 11:03 下载)
msiexec /I HyperVNovaCompute_Beta.msi /l*v nova_install.log ADDLOCAL=HyperVNovaCompute,NeutronHyperVAgent,iSCSISWInitiator,FreeRDP PLACEMENTAUTHURL="http://172.16.1.90:5000/v3" PLACEMENTPASSWORD=*** PLACEMENTPROJECTNAME=services PLACEMENTUSERNAME=placement PLACEMENTDOMAINNAME=Default PLACEMENTUSERDOMAINNAME=Default RPCTRANSPORTURL="rabbit://openstack:***@172.16.1.90:5672/" GLANCEURL="http://172.16.1.90:9292" INSTANCESPATH="\\STORAGE\VMs\" LOGDIR="C:\OpenStack\Log\" RDPCONSOLEURL="http://Hyper01:8000" ADDVSWITCH=0 VSWITCHNAME="Servers" USECOWIMAGES=0 FORCECONFIGDRIVE=1 CONFIGDRIVEINJECTPASSWORD=1 DYNAMICMEMORYRATIO=1 ENABLELOGGING=1 VERBOSELOGGING=1 NEUTRONURL="http://172.16.1.90:9696" NEUTRONADMINTENANTNAME=services NEUTRONADMINUSERNAME=neutron NEUTRONADMINPASSWORD=*** NEUTRONADMINAUTHURL="http://172.16.1.90:5000/v3" NEUTRONDOMAINNAME=Default NEUTRONUSERDOMAINNAME=Default NOVACOMPUTESERVICEUSER="svc-nova-compute" NOVACOMPUTESERVICEPASSWORD=***
C:\Program Files\Cloudbase Solutions\OpenStack\Nova\etc\nova.conf
[DEFAULT]
debug=False
compute_driver=compute_hyperv.driver.HyperVDriver
instances_path=\\STORAGE\VMs\
use_cow_images=false
force_config_drive=true
flat_injected=true
mkisofs_cmd=C:\Program Files\Cloudbase Solutions\OpenStack\Nova\bin\mkisofs.exe
allow_resize_to_same_host=true
running_deleted_instance_poll_interval=120
resize_confirm_window=5
resume_guests_state_on_host_boot=true
transport_url=rabbit://openstack:***@172.16.1.90:5672/
rpc_response_timeout=1800
lock_path=C:\OpenStack\Log\
vif_plugging_is_fatal=false
vif_plugging_timeout=60
block_device_allocate_retries=600
log_dir=C:\OpenStack\Log\
log_file=nova-compute.log
instance_usage_audit=true
instance_usage_audit_period=hour
[placement]
auth_strategy=keystone
auth_type=password
auth_url=http://172.16.1.90:5000/v3
project_name=services
username=placement
password=***
project_domain_name=Default
user_domain_name=Default
os_region_name=SouthCentral
[notifications]
notify_on_state_change=vm_and_task_state
[glance]
api_servers=http://172.16.1.90:9292
[hyperv]
limit_cpu_features=false
config_drive_inject_password=true
qemu_img_cmd=C:\Program Files\Cloudbase Solutions\OpenStack\Nova\bin\qemu-img.exe
config_drive_cdrom=true
dynamic_memory_ratio=1
enable_instance_metrics_collection=true
[os_win]
cache_temporary_wmi_objects=false
[rdp]
enabled=true
html5_proxy_base_url=http://Hyper01:8000
[neutron]
url=http://172.16.1.90:9696
auth_strategy=keystone
project_name=services
username=neutron
password=
auth_url=http://172.16.1.90:5000/v3
project_domain_name=Default
user_domain_name=Default
os_region_name=SouthCentral
auth_type=password
service_metadata_proxy = true
metadata_proxy_shared_secret = ***
C:\Program Files\Cloudbase Solutions\OpenStack\Nova\etc\neutron_hyperv_agent.conf
[DEFAULT]
control_exchange=neutron
transport_url=rabbit://openstack:***@172.16.1.90:5672
log_dir=C:\OpenStack\Log\
log_file=neutron-hyperv-agent.log
[AGENT]
polling_interval=2
physical_network_vswitch_mappings=*:Servers
enable_metrics_collection=true
enable_qos_extension=false
[SECURITYGROUP]
firewall_driver=hyperv
enable_security_group=true