Hi, I am currently using clearml agent along with a k8s glue container that was installed via Helm in a Kubernetes cluster.
I would like to understand the process of caching pip requirements so that I don't have to install them in each clearml-id-xx pod.
Even though I have configured my agent, I am still noticing that the requirements are being installed in each pod, which ends up taking approximately 4 minutes.
clearmlConfig: |-
sdk {
storage {
cache {
# Defaults to <system_temp_folder>/clearml_cache
default_base_dir: "~/.clearml/cache"
}
direct_access: [
# Objects matching are considered to be available for direct access, i.e. they will not be downloaded
# or cached, and any download request will return a direct reference.
# Objects are specified in glob format, available for url and content_type.
{ url: "file://*" } # file-urls are always directly referenced
]
}
}
agent{
venvs_cache: {
# maximum number of cached venvs
max_entries: 100
# minimum required free space to allow for cache entry, disable by passing 0 or negative value
free_space_threshold_gb: 32.0
# unmark to enable virtual environment caching
path: ~/.clearml/venvs-cache
},
vcs_cache: {
enabled: true,
path: ~/.clearml/vcs-cache
},
pip_download_cache {
enabled: true,
path: ~/.clearml/pip-download-cache
},
docker_pip_cache = ~/.clearml/pip-cache
docker_apt_cache = ~/.clearml/apt-cache
# allow to set internal mount points inside the docker,
# especially useful for non-root docker container images.
docker_internal_mounts {
sdk_cache: "/clearml_agent_cache"
apt_cache: "/var/cache/apt/archives"
ssh_folder: "/root/.ssh"
pip_cache: "/root/.cache/pip"
poetry_cache: "/root/.cache/pypoetry"
vcs_cache: "/root/.clearml/vcs-cache"
venv_build: "~/.clearml/venvs-builds"
pip_download: "/root/.clearml/pip-download-cache"
}
package_manager: {
# virtual environment inheres packages from system
system_site_packages: true,
}
}