Hi SharpDove45 ,
You mean wou'd like to set up a ClearML Server that will use a remotely-managed mongodb?
Well, the server's hosts.conf file contains the connection strings for MongoDB (see https://github.com/allegroai/clearml-server/blob/6434f1028e6e7fd2479b22fe553f7bca3f8a716f/apiserver/config/default/hosts.conf#L25 )
You can override it using environment variables in the form of:CLEARML__HOSTS__MONGO__BACKEND__HOST=mongodb://<user>:<pass>@<hostname>:27017/backend CLEARML__HOSTS__MONGO__AUTH__HOST=mongodb://<user>:<pass>@<hostname>:27017/auth
For the two MongoDB databases used by the server
Use ""
:CLEARML__HOSTS__MONGO__BACKEND__HOST="mongodb://<user>:<pass>@<hostname>:27017/backend" CLEARML__HOSTS__MONGO__AUTH__HOST="mongodb://<user>:<pass>@<hostname>:27017/auth"
i tried running the string in config_parser.ConfigFactory()
(parse_string) and got the same error
SharpDove45 , can you try this instead:TRAINS__HOSTS__MONGO__BACKEND__HOST: "\"mongodb://myDBReader:D1fficultP%40ssw0rd@mongodb0.example.com:27017/?authSource=admin\"" TRAINS__HOSTS__MONGO__AUTH__HOST: "\"mongodb://myDBReader:D1fficultP%40ssw0rd@mongodb0.example.com:27017/?authSource=admin\""
You might need to revert your escaping code (i.e. the use of %40
etc.)
[{{"[" ["^"]} Group:({{Group:({{{{{W:(\, \[]-...) | Re:('\\\\0?[xX][0-9a-fA-F]+')} | Re:('\\\\0[0-7]+')} | !W:(\])} Suppress:("-")} {{{W:(\, \[]-...) | Re:('\\\\0?[xX][0-9a-fA-F]+')} | Re:('\\\\0[0-7]+')} | !W:(\])}}) | {{{W:(\, \[]-...) | Re:('\\\\0?[xX][0-9a-fA-F]+')} | Re:('\\\\0[0-7]+')} | !W:(\])}}}...)}, "]"]
is the “grammer” for the parser, and it doesn’t seem to have a literal @
in it… unless i’m missing something
SuccessfulKoala55 So i tried what you suggested but i’m gettingpyparsing.ParseException: Expected end of text, found '@' (at char 56), (line:1, col:57)
Any idea whats the issue? i supplied the env variable via docker compose file with the same standard as the one you posted
ok it seems the string you pass in the env variables has to be a uri safe string, after passing the string in https://www.urlencoder.io/ i managed to not get that exception…
` version: "3.6"
services:
apiserver:
command:
- apiserver
container_name: clearml-apiserver
image: allegroai/clearml:latest
restart: unless-stopped
volumes:
- /opt/clearml/logs:/var/log/trains
- /opt/clearml/config:/opt/trains/config
- /opt/clearml/data/fileserver:/mnt/fileserver
depends_on:
- redis
- elasticsearch
- fileserver
environment:
TRAINS_ELASTIC_SERVICE_HOST: elasticsearch
TRAINS_ELASTIC_SERVICE_PORT: 9200
TRAINS_REDIS_SERVICE_HOST: redis
TRAINS_REDIS_SERVICE_PORT: 6379
TRAINS_SERVER_DEPLOYMENT_TYPE: ${TRAINS_SERVER_DEPLOYMENT_TYPE:-linux}
TRAINS__HOSTS__MONGO__BACKEND__HOST: "mongodb://myDBReader:D1fficultP%40ssw0rd@mongodb0.example.com:27017/?authSource=admin"
TRAINS__HOSTS__MONGO__AUTH__HOST: "mongodb://myDBReader:D1fficultP%40ssw0rd@mongodb0.example.com:27017/?authSource=admin"
TRAINS__apiserver__pre_populate__enabled: "true"
TRAINS__apiserver__pre_populate__zip_files: "/opt/trains/db-pre-populate"
TRAINS__apiserver__pre_populate__artifacts_path: "/mnt/fileserver"
ports:
- "8008:8008"
expose:
- "8008"
networks:
- backend
- frontend
elasticsearch:
networks:
- backend
container_name: clearml-elastic
environment:
ES_JAVA_OPTS: -Xms2g -Xmx2g
bootstrap.memory_lock: "true"
cluster.name: clearml
cluster.routing.allocation.node_initial_primaries_recoveries: "500"
cluster.routing.allocation.disk.watermark.low: 500mb
cluster.routing.allocation.disk.watermark.high: 500mb
cluster.routing.allocation.disk.watermark.flood_stage: 500mb
discovery.zen.minimum_master_nodes: "1"
discovery.type: "single-node"
http.compression_level: "7"
node.ingest: "true"
node.name: clearml
reindex.remote.whitelist: '.'
xpack.monitoring.enabled: "false"
xpack.security.enabled: "false"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
restart: unless-stopped
volumes:
- /opt/clearml/data/elastic_7:/usr/share/elasticsearch/data
- /usr/share/elasticsearch/logs
fileserver:
networks:
- backend
- frontend
command:
- fileserver
container_name: clearml-fileserver
image: allegroai/clearml:latest
restart: unless-stopped
volumes:
- /opt/clearml/logs:/var/log/trains
- /opt/clearml/data/fileserver:/mnt/fileserver
- /opt/clearml/config:/opt/trains/config
ports:
- "8081:8081"
redis:
networks:
- backend
container_name: clearml-redis
image: redis:5.0
restart: unless-stopped
volumes:
- /opt/clearml/data/redis:/data
webserver:
command:
- webserver
container_name: clearml-webserver
image: allegroai/clearml:latest
restart: unless-stopped
depends_on:
- apiserver
ports:
- "8080:80"
expose:
- "8080"
networks:
- backend
- frontend
agent-services:
networks:
- backend
container_name: clearml-agent-services
image: allegroai/clearml-agent-services:latest
restart: unless-stopped
privileged: true
environment:
CLEARML_HOST_IP: ${CLEARML_HOST_IP}
CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-}
CLEARML_API_HOST:
CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-}
CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-}
CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-}
CLEARML_AGENT_GIT_USER: ${CLEARML_AGENT_GIT_USER}
CLEARML_AGENT_GIT_PASS: ${CLEARML_AGENT_GIT_PASS}
CLEARML_AGENT_UPDATE_VERSION: ${CLEARML_AGENT_UPDATE_VERSION:->=0.17.0}
CLEARML_AGENT_DEFAULT_BASE_DOCKER: "ubuntu:18.04"
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-}
AWS_DEFAULT_REGION: ${AWS_DEFAULT_REGION:-}
AZURE_STORAGE_ACCOUNT: ${AZURE_STORAGE_ACCOUNT:-}
AZURE_STORAGE_KEY: ${AZURE_STORAGE_KEY:-}
GOOGLE_APPLICATION_CREDENTIALS: ${GOOGLE_APPLICATION_CREDENTIALS:-}
CLEARML_WORKER_ID: "clearml-services"
CLEARML_AGENT_DOCKER_HOST_MOUNT: "/opt/clearml/agent:/root/.clearml"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /opt/clearml/agent:/root/.clearml
depends_on:
- apiserver
networks:
backend:
driver: bridge
frontend:
driver: bridge `
Can I see the complete docker-compose.yml
you're using?
well no… the parser doesn’t seem to match what the mongo engine expects when passing a uri with username and password, so unless i’m missing something there’s no way to connect to a remote mongo that needs a user/pass to authenticate