场景:当我使用 Docker Desktop For Windows(这里是 Windows 10 Pro)时,我的 docker-compose.yml 工作正常。由于我必须在同一个 Notebook 中使用 Virtual Box + Minishift,所以我在其他地方读到建议卸载 Docker Desktop for Windows 并安装 Docker Toolbox(主要是因为 Docker Descktop 依赖于 HyperV,如果我不禁用 HyperV,我就无法启动 Virtual Box 和 Minishift)。到目前为止一切顺利。我的意思是,我成功启动了 minishift(OpenShift 简化版),并且成功启动了 docker。
问题是,在我卸载 Docker For Windows 并安装 Docker ToolBox 后,我收到下面的错误。
似乎在“安装”卷时出现了一些问题或者某些路径出现了一些问题,但我可以想象我可能会尝试修复什么。
以防万一,我在这里还添加了我的整个 docker-compose 及其所有 yml 和 conf 文件依赖项。如果你想重现,下面粘贴的所有文件都可以从克隆github
docker-compose.yml
version: '3.2'
services:
kibana:
image: docker.elastic.co/kibana/kibana:7.5.2
volumes:
- "./kibana.yml:/usr/share/kibana/config/kibana.yml"
restart: always
environment:
- SERVER_NAME=kibana.localhost
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- "5601:5601"
links:
- elasticsearch
depends_on:
- elasticsearch
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.5.2
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
- xpack.watcher.enabled=false
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "./esdata:/usr/share/elasticsearch/data"
ports:
- "9200:9200"
logstash:
image: docker.elastic.co/logstash/logstash:7.5.2
volumes:
- "./logstash.conf:/config-dir/logstash.conf"
restart: always
command: logstash -f /config-dir/logstash.conf
ports:
- "9600:9600"
- "7777:7777"
links:
- elasticsearch
- kafka1
- kafka2
- kafka3
kafka1:
image: wurstmeister/kafka
command: [start-kafka.sh]
depends_on:
- zoo1
- zoo2
- zoo3
links:
- zoo1
- zoo2
- zoo3
ports:
- "9092:9092"
environment:
KAFKA_LISTENERS: PLAINTEXT://:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092
KAFKA_BROKER_ID: 1
KAFKA_ADVERTISED_PORT: 9092
KAFKA_LOG_RETENTION_HOURS: "168"
KAFKA_LOG_RETENTION_BYTES: "100000000"
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_CREATE_TOPICS: "log:3:3"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
kafka2:
image: wurstmeister/kafka
depends_on:
- zoo1
- zoo2
- zoo3
links:
- zoo1
- zoo2
- zoo3
ports:
- "9093:9092"
environment:
KAFKA_LISTENERS: PLAINTEXT://:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092
KAFKA_BROKER_ID: 2
KAFKA_ADVERTISED_PORT: 9092
KAFKA_LOG_RETENTION_HOURS: "168"
KAFKA_LOG_RETENTION_BYTES: "100000000"
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_CREATE_TOPICS: "log:3:3"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
kafka3:
image: wurstmeister/kafka
depends_on:
- zoo1
- zoo2
- zoo3
links:
- zoo1
- zoo2
- zoo3
ports:
- "9094:9092"
environment:
KAFKA_LISTENERS: PLAINTEXT://:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092
KAFKA_BROKER_ID: 3
KAFKA_ADVERTISED_PORT: 9092
KAFKA_LOG_RETENTION_HOURS: "168"
KAFKA_LOG_RETENTION_BYTES: "100000000"
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_CREATE_TOPICS: "log:3:3"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
zoo1:
image: elevy/zookeeper:latest
environment:
MYID: 1
SERVERS: zoo1,zoo2,zoo3
ports:
- "2181:2181"
zoo2:
image: elevy/zookeeper:latest
environment:
MYID: 2
SERVERS: zoo1,zoo2,zoo3
ports:
- "2182:2181"
zoo3:
image: elevy/zookeeper:latest
environment:
MYID: 3
SERVERS: zoo1,zoo2,zoo3
ports:
- "2183:2181"
filebeat:
image: docker.elastic.co/beats/filebeat:7.5.2
volumes:
- "./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro"
- "./sample-logs:/sample-logs"
links:
- kafka1
- kafka2
- kafka3
depends_on:
- kafka1
- kafka2
- kafka3
文件节拍.yml
filebeat.inputs:
- paths:
- /sample-logs/*.log
tags:
- request-sample
input_type: log
document_type: request-sample
fields_under_root: true
output.kafka:
hosts: ["kafka1:9092", "kafka2:9092", "kafka3:9092"]
topic: 'log'
partition.round_robin:
reachable_only: false
required_acks: 1
compression: gzip
max_message_bytes: 1000000
kibana.yml
server.name: kibana
server.host: "0"
xpack.monitoring.ui.container.elasticsearch.enabled: false
#opendistro_security.multitenancy.enabled: false
elasticsearch.ssl.verificationMode: none
logstash配置文件
input {
kafka {
bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092"
client_id => "logstash"
group_id => "logstash"
consumer_threads => 3
topics => ["log"]
codec => "json"
tags => ["log", "kafka_source"]
type => "log"
}
}
filter {
if [type] == "request-sample" {
grok {
match => { "message" => "%{COMMONAPACHELOG}" }
}
date {
match => ["timestamp", "dd/MMM/yyyy:HH:mm:ss Z"]
remove_field => ["timestamp"]
}
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "logstash-%{[type]}-%{+YYYY.MM.dd}"
}
stdout { codec => rubydebug }
}
尝试 docker-compose 的日志
SPANOT149+Cast@SPANOT149 MINGW64 /c/Dockers/megalog-try-1 (master)
$ docker-compose down Stopping megalog-try-1_logstash_1 ... done Stopping megalog-try-1_kafka2_1 ... done Stopping megalog-try-1_kafka3_1 ... done Stopping megalog-try-1_kafka1_1 ... done Stopping megalog-try-1_zoo3_1 ... done Stopping megalog-try-1_zoo2_1 ... done Stopping megalog-try-1_zoo1_1 ... done Removing megalog-try-1_logstash_1 ... done Removing megalog-try-1_filebeat_1 ... done Removing megalog-try-1_kibana_1 ... done Removing megalog-try-1_kafka2_1 ... done Removing megalog-try-1_kafka3_1 ... done Removing megalog-try-1_kafka1_1 ... done Removing megalog-try-1_zoo3_1 ... done Removing megalog-try-1_elasticsearch_1 ... done Removing megalog-try-1_zoo2_1 ... done Removing megalog-try-1_zoo1_1 ... done Removing network megalog-try-1_default
SPANOT149+Cast@SPANOT149 MINGW64 /c/Dockers/megalog-try-1 (master)
$ docker volume prune
WARNING! This will remove all local volumes not used by at least one container.
Are you sure you want to continue? [y/N] y
Deleted Volumes:
10a8f1ae070802866a7072ded8a65fb8cee80fbbcf527126869a69778089d514
f71c1e6bd951a256cf746d538528baa9f34b0da57ca628f03fbabbb1eb9c1133
0d8ed8cff8f2cdd1870f4f3757d15203432b9a549d13d9094c090a41f11e66c5
Total reclaimed space: 62.92MB
SPANOT149+Cast@SPANOT149 MINGW64 /c/Dockers/megalog-try-1 (master)
$ docker container prune
WARNING! This will remove all stopped containers.
Are you sure you want to continue? [y/N] y
Total reclaimed space: 0B
SPANOT149+Cast@SPANOT149 MINGW64 /c/Dockers/megalog-try-1 (master)
$ docker-compose up -d --remove-orphans
Creating network "megalog-try-1_default" with the default driver
Creating megalog-try-1_zoo3_1 ... done Creating megalog-try-1_zoo2_1 ... done Creating megalog-try-1_elasticsearch_1 ... done Creating megalog-try-1_zoo1_1 ... done Creating megalog-try-1_kafka3_1 ...
Creating megalog-try-1_kafka1_1 ...
Creating megalog-try-1_kafka2_1 ...
Creating megalog-try-1_kibana_1 ... error
ERROR: for megalog-try-1_kibana_1 Cannot start service kibana: OCI runtime create failed: container_linux.go:346: starting container process caused "process_linux.go:449: container init caused \"rootfs_linux.go:58: mounting \\\"/c/Dockers/Creating megalog-try-1_kafka3_1 ... done Creating megalog-try-1_kafka1_1 ... done Creating megalog-try-1_kafka2_1 ... done ing to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type
Creating megalog-try-1_logstash_1 ...
Creating megalog-try-1_filebeat_1 ... error
ERROR: for megalog-try-1_filebeat_1 Cannot start service filebeat: OCI runtime create failed: container_linux.go:346: starting container process caused "process_linux.go:449: container init caused \"rootfs_linux.go:58: mounting \\\"/c/Dockers/megalog-try-1/filebeat.yml\\\" to rootfs \\\"/mnt/sda1/var/lib/docker/overlay2/445ddec2e92149c077681b1daf3f0723dbc3c3f821541fb28252501ae122a4bf/merged\\\" at \\\"/mnt/sda1/var/lib/docker/overlay2/445ddec2e92149c077681b1daf3f0723dbc3c3f821541fb28252501ae122a4bf/merged/usr/share/filebeat/filebeat.yml\\\" caused \\\"not a directory\\\"\"": unknown: Are you Creating megalog-try-1_logstash_1 ... done e
ERROR: for kibana Cannot start service kibana: OCI runtime create failed: container_linux.go:346: starting container process caused "process_linux.go:449: container init caused \"rootfs_linux.go:58: mounting \\\"/c/Dockers/megalog-try-1/kibana.yml\\\" to rootfs \\\"/mnt/sda1/var/lib/docker/overlay2/36dbfb01925aef8cc05376a57e88d2f7e3b85bd47af9549e13a9c4f5ad516ae3/merged\\\" at \\\"/mnt/sda1/var/lib/docker/overlay2/36dbfb01925aef8cc05376a57e88d2f7e3b85bd47af9549e13a9c4f5ad516ae3/merged/usr/share/kibana/config/kibana.yml\\\" caused \\\"not a directory\\\"\"": unknown: Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type
ERROR: for filebeat Cannot start service filebeat: OCI runtime create failed: container_linux.go:346: starting container process caused "process_linux.go:449: container init caused \"rootfs_linux.go:58: mounting \\\"/c/Dockers/megalog-try-1/filebeat.yml\\\" to rootfs \\\"/mnt/sda1/var/lib/docker/overlay2/445ddec2e92149c077681b1daf3f0723dbc3c3f821541fb28252501ae122a4bf/merged\\\" at \\\"/mnt/sda1/var/lib/docker/overlay2/445ddec2e92149c077681b1daf3f0723dbc3c3f821541fb28252501ae122a4bf/merged/usr/share/filebeat/filebeat.yml\\\" caused \\\"not a directory\\\"\"": unknown: Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type
ERROR: Encountered errors while bringing up the project.
SPANOT149+Cast@SPANOT149 MINGW64 /c/Dockers/megalog-try-1 (master)
答案1
默认情况下,Docker 工具箱将c:\Users
(区分大小写)目录共享到 VM 中,以供容器作为卷访问。您需要在 VirtualBox 中向 VM 添加其他共享,以将其他路径挂载为卷:
打开 VirtualBox UI。
单击“设置”齿轮,然后转到“共享文件夹”。
选择机器文件夹下的任何现有列表,然后单击 + 图标。
选择主机上的文件夹路径,输入虚拟机内的文件夹名称(或采用默认值,与主机上的名称相同),并配置您需要的任何其他选项。
如果您希望文件夹自动挂载到虚拟机中,请选择“自动挂载”,并选择“设为永久”以将其视为永久共享文件夹。
单击“确定”将新文件夹添加到共享文件夹列表。
再次单击“确定”保存更改并退出“设置”对话框。
参考: https://docs.docker.com/toolbox/toolbox_install_windows/#optional-add-shared-directories