You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
By default, the Cortex CLI is installed at `/usr/local/bin/cortex`. To install the executable elsewhere, export the `CORTEX_INSTALL_PATH` environment variable to your desired location before running the command above.
Copy file name to clipboardExpand all lines: docs/workloads/async/configuration.md
+1-1Lines changed: 1 addition & 1 deletion
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -26,7 +26,7 @@ predictor:
26
26
shell: <string> # relative path to a shell script for system package installation (default: dependencies.sh)
27
27
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (optional)
28
28
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
29
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.31.0, quay.io/cortexlabs/python-predictor-gpu:0.31.0-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.31.0 based on compute)
29
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.31.1, quay.io/cortexlabs/python-predictor-gpu:0.31.1-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.31.1 based on compute)
30
30
env: <string: string> # dictionary of environment variables
31
31
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
32
32
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
Copy file name to clipboardExpand all lines: docs/workloads/batch/configuration.md
+4-4Lines changed: 4 additions & 4 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -19,7 +19,7 @@ predictor:
19
19
path: <string> # path to a python file with a PythonPredictor class definition, relative to the Cortex root (required)
20
20
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional)
21
21
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
22
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.31.0 or quay.io/cortexlabs/python-predictor-gpu:0.31.0-cuda10.2-cudnn8 based on compute)
22
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.31.1 or quay.io/cortexlabs/python-predictor-gpu:0.31.1-cuda10.2-cudnn8 based on compute)
23
23
env: <string: string> # dictionary of environment variables
24
24
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
25
25
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
@@ -49,8 +49,8 @@ predictor:
49
49
batch_interval: <duration> # the maximum amount of time to spend waiting for additional requests before running inference on the batch of requests
50
50
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional)
51
51
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
52
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.31.0)
53
-
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:0.31.0 or quay.io/cortexlabs/tensorflow-serving-gpu:0.31.0 based on compute)
52
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.31.1)
53
+
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:0.31.1 or quay.io/cortexlabs/tensorflow-serving-gpu:0.31.1 based on compute)
54
54
env: <string: string> # dictionary of environment variables
55
55
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
56
56
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
@@ -75,7 +75,7 @@ predictor:
75
75
...
76
76
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional)
77
77
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
78
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:0.31.0 or quay.io/cortexlabs/onnx-predictor-gpu:0.31.0 based on compute)
78
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:0.31.1 or quay.io/cortexlabs/onnx-predictor-gpu:0.31.1 based on compute)
79
79
env: <string: string> # dictionary of environment variables
80
80
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
81
81
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
Cortex's base Docker images are listed below. Depending on the Cortex Predictor and compute type specified in your API configuration, choose one of these images to use as the base for your Docker image:
The sample `Dockerfile` below inherits from Cortex's Python CPU serving image, and installs 3 packages. `tree` is a system package and `pandas` and `rdkit` are Python packages.
29
29
30
30
<!-- CORTEX_VERSION_BRANCH_STABLE -->
31
31
```dockerfile
32
32
# Dockerfile
33
33
34
-
FROM quay.io/cortexlabs/python-predictor-cpu:0.31.0
34
+
FROM quay.io/cortexlabs/python-predictor-cpu:0.31.1
35
35
36
36
RUN apt-get update \
37
37
&& apt-get install -y tree \
@@ -49,7 +49,7 @@ If you need to upgrade the Python Runtime version on your image, you can follow
49
49
```Dockerfile
50
50
# Dockerfile
51
51
52
-
FROM quay.io/cortexlabs/python-predictor-cpu:0.31.0
52
+
FROM quay.io/cortexlabs/python-predictor-cpu:0.31.1
Copy file name to clipboardExpand all lines: docs/workloads/realtime/configuration.md
+4-4Lines changed: 4 additions & 4 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -39,7 +39,7 @@ predictor:
39
39
threads_per_process: <int> # the number of threads per process (default: 1)
40
40
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (optional)
41
41
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
42
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.31.0, quay.io/cortexlabs/python-predictor-gpu:0.31.0-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.31.0 based on compute)
42
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.31.1, quay.io/cortexlabs/python-predictor-gpu:0.31.1-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.31.1 based on compute)
43
43
env: <string: string> # dictionary of environment variables
44
44
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
45
45
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
@@ -74,8 +74,8 @@ predictor:
74
74
threads_per_process: <int> # the number of threads per process (default: 1)
75
75
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (optional)
76
76
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
77
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.31.0)
78
-
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:0.31.0, quay.io/cortexlabs/tensorflow-serving-gpu:0.31.0, or quay.io/cortexlabs/tensorflow-serving-inf:0.31.0 based on compute)
77
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.31.1)
78
+
tensorflow_serving_image: <string> # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:0.31.1, quay.io/cortexlabs/tensorflow-serving-gpu:0.31.1, or quay.io/cortexlabs/tensorflow-serving-inf:0.31.1 based on compute)
79
79
env: <string: string> # dictionary of environment variables
80
80
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
81
81
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
@@ -105,7 +105,7 @@ predictor:
105
105
threads_per_process: <int> # the number of threads per process (default: 1)
106
106
config: <string: value> # arbitrary dictionary passed to the constructor of the Predictor (optional)
107
107
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
108
-
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:0.31.0 or quay.io/cortexlabs/onnx-predictor-gpu:0.31.0 based on compute)
108
+
image: <string> # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:0.31.1 or quay.io/cortexlabs/onnx-predictor-gpu:0.31.1 based on compute)
109
109
env: <string: string> # dictionary of environment variables
110
110
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
111
111
shm_size: <string> # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null)
Copy file name to clipboardExpand all lines: docs/workloads/task/configuration.md
+1-1Lines changed: 1 addition & 1 deletion
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -12,7 +12,7 @@
12
12
conda: <string> # relative path to conda-packages.txt (default: conda-packages.txt)
13
13
shell: <string> # relative path to a shell script for system package installation (default: dependencies.sh)
14
14
python_path: <string> # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml)
15
-
image: <string> # docker image to use for the Task (default: quay.io/cortexlabs/python-predictor-cpu:0.31.0, quay.io/cortexlabs/python-predictor-gpu:0.31.0-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.31.0 based on compute)
15
+
image: <string> # docker image to use for the Task (default: quay.io/cortexlabs/python-predictor-cpu:0.31.1, quay.io/cortexlabs/python-predictor-gpu:0.31.1-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.31.1 based on compute)
16
16
env: <string: string> # dictionary of environment variables
17
17
log_level: <string> # log level that can be "debug", "info", "warning" or "error" (default: "info")
0 commit comments