Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/unifyai/ivy into fix_key_arg_1
Browse files Browse the repository at this point in the history
  • Loading branch information
Sai-Suraj-27 committed Oct 26, 2023
2 parents ce5f275 + 3792dd2 commit 9fdbad8
Show file tree
Hide file tree
Showing 20 changed files with 152 additions and 188 deletions.
6 changes: 3 additions & 3 deletions .devcontainer/build_gpu/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
"name": "Ivy GPU Development Environment (build)",

"build": {
"dockerfile": "../../docker/DockerfileGPUMultiCuda",
"dockerfile": "../../docker/DockerfileGPU",
"context": "../..",
"args": {
"IMAGE_NAME": "unifyai/multicuda",
"IMAGE_TAG": "base_and_requirements"
"IMAGE_NAME": "unifyai/ivy",
"IMAGE_TAG": "latest-gpu"
}
},

Expand Down
2 changes: 1 addition & 1 deletion .devcontainer/image_gpu/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "Ivy GPU Development Environment (image)",

"image": "unifyai/multicuda:base_and_requirements",
"image": "unifyai/ivy:latest-gpu",
"customizations": {
"vscode": {
"extensions": [
Expand Down
26 changes: 26 additions & 0 deletions .github/workflows/dockerfile-gpu-push.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
name: GPU Dockerfile Push

on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:

jobs:

build:
runs-on: ubuntu-latest-4-cores

steps:
- name: Checkout 🛎 Ivy
uses: actions/checkout@v3

- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}

- name: Build and push GPU image
run: |
docker build --progress=plain --no-cache -t unifyai/ivy:latest-gpu -f docker/DockerfileGPU .
docker push unifyai/ivy:latest-gpu
31 changes: 0 additions & 31 deletions .github/workflows/dockerfile-multicuda-push.yml

This file was deleted.

59 changes: 21 additions & 38 deletions .github/workflows/intelligent-tests-pr.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
name: intelligent-tests-pr
on:
workflow_dispatch:
pull_request_target:
types: [labeled, opened, synchronize, reopened, review_requested]
pull_request:

permissions:
actions: read
pull-requests: write

jobs:
display_test_results:
Expand All @@ -26,47 +24,18 @@ jobs:
cat combined_test_results.txt
- name: New Failures Introduced
id: ci_output
run: |
find . -name "new_failures_*.txt" -exec cat {} + > combined_failures.txt
if [ -s combined_failures.txt ]
then
echo "This PR introduces the following new failing tests:"
cat combined_failures.txt
{
echo 'MESSAGE<<EOF'
echo "# Failed tests:"
echo "This PR introduces the following new failing tests:"
cat combined_failures.txt
echo EOF
} >> "$GITHUB_OUTPUT"
else
echo "MESSAGE=This pull request does not result in any additional test failures. Congratulations!" >> "$GITHUB_OUTPUT"
echo "This PR does not introduce any new test failures! Yippee!"
fi
- name: Find Comment
uses: peter-evans/find-comment@v2
id: fc
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: <!--- CI Output -->

- name: Create or update comment
uses: peter-evans/create-or-update-comment@v3
with:
comment-id: ${{ steps.fc.outputs.comment-id }}
issue-number: ${{ github.event.pull_request.number }}
body: |
Thank you for this PR, here is the CI results:
-------------
${{ steps.ci_output.outputs.MESSAGE}}
<!--- CI Output -->
edit-mode: replace
run_tests:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
Expand All @@ -93,20 +62,34 @@ jobs:
submodules: "recursive"
fetch-depth: 100

- name: Install ivy and fetch binaries
run: |
cd ivy
pip3 install -e .
mkdir .ivy
touch .ivy/key.pem
echo -n ${{ secrets.USER_API_KEY }} > .ivy/key.pem
cd ..
- name: Get Job URL
uses: Tiryoh/gha-jobid-action@v0
id: jobs
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
job_name: ${{ github.job }}

- name: Determine and Run Tests
id: tests
run: |
git clone -b master${{ matrix.branch }} https://github.com/unifyai/Mapping.git --depth 200
git clone -b master${{ matrix.branch }} https://github.com/unifyai/Mapping.git --depth 1
pip install pydriller GitPython
python ivy/scripts/setup_tests/clone-mapping.py
cp Mapping/tests.pbz2 ivy/
cd ivy
mkdir .ivy
touch .ivy/key.pem
echo -n ${{ secrets.USER_API_KEY }} > .ivy/key.pem
python scripts/determine_tests/determine_tests.py ${{ matrix.branch }} pr
set -o pipefail
python scripts/run_tests/run_tests_pr.py new_failures_${{ matrix.branch }}.txt | tee test_results_${{ matrix.branch }}.txt
cd ..
continue-on-error: true

- name: Upload test results
Expand Down
50 changes: 33 additions & 17 deletions docker/DockerfileGPUMultiCuda → docker/DockerfileGPU
Original file line number Diff line number Diff line change
@@ -1,16 +1,32 @@
# uses the base image which has cuda and cudnn installed(multiple versions) and then installs the
# installs multiple versions of cuda and cudnn and then installs the
# latest frameworks and the requirements
FROM unifyai/multicuda:base
FROM debian:buster
WORKDIR /ivy
ARG fw

# arguments
ARG fw
ARG pycon=3.10

# environment variables
ENV DEBIAN_FRONTEND=noninteractive

# Install miniconda
ENV TZ=Europe/Moscow
ENV CONDA_DIR /opt/miniconda/


# install base libraries
RUN grep security /etc/apt/sources.list | tee /etc/apt/security.sources.list && \
apt-get update && \
apt-get upgrade -o Dir::Etc::SourceList=/etc/apt/security.sources.list -y &&\
apt-get -y update && \
apt-get install -y gnupg \
curl \
wget \
software-properties-common \
gcc \
nano


# install miniconda
RUN apt clean && \
rm -rf /var/lib/apt/lists/* && \
apt-get update && \
Expand All @@ -21,10 +37,12 @@ RUN apt clean && \
/bin/bash ~/miniconda.sh -b -p /opt/miniconda


# create conda environment
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda create --name multienv python==$pycon -y

# to fix protobuf conflicts

# fix protobuf conflicts
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION python
ENV PATH=/opt/miniconda/envs/multienv/bin:$PATH
RUN apt-get update && \
Expand All @@ -38,44 +56,42 @@ RUN apt-get update && \
pip3 install setuptools==58.5.3


# Install Ivy Upstream
# install Ivy Upstream
RUN git clone --progress --recurse-submodules https://github.com/unifyai/ivy --depth 1 && \
cd ivy && \
cd ivy_tests/array_api_testing/test_array_api && \
pip3 install --no-cache-dir -r requirements.txt

# Install local optional
COPY /docker/multicuda_framework_directory.py .

# copy library files to workdir
COPY docker/gpu_framework_directory.py .
COPY requirements/optional_gpu.txt .
COPY requirements/requirements.txt .


#setting torch path early on because torch-scatter needs it
# setting torch path early on because torch-scatter needs it
ENV PYTHONPATH "/opt/fw/torch:/opt/miniconda/envs/multienv/bin"


# requirement mappings directs which dependency to be installed and where
COPY /docker/requirement_mappings_gpu.json .
SHELL ["/bin/bash", "-c"]



RUN python3 multicuda_framework_directory.py $fw &&\
# install all libraries based on the mappings
RUN python3 gpu_framework_directory.py $fw &&\
jq -r 'to_entries[] | select(.value != [""]) | .key as $dir | .value[] | @sh "/opt/fw/\($dir) \(.)"' requirement_mappings_gpu.json | xargs -I {} sh -c 'printf "Installing %s\n" $2 && pip install --ignore-installed --target $1 $2 --extra-index-url https://download.pytorch.org/whl/cu118' sh {}



RUN sed -i '/numpy/d' requirements.txt &&\
pip install -r requirements.txt &&\
cp ./optional_gpu.txt tmp.txt &&\
jq -r 'to_entries[] | [.key] + .value | select(length > 0 or (. == "")) | .[]' requirement_mappings_gpu.json | sort -u | xargs -I {} sed -i '/{}/d;/jaxlib/d' tmp.txt && pip install -r tmp.txt




# add all the directories to environment path so that python knows where to find them
ENV PYTHONPATH "/opt/fw/mxnet:/opt/fw/numpy:/opt/fw/tensorflow:/opt/fw/jax:/opt/fw/torch:/opt/fw/paddle:/opt/miniconda/envs/multienv/bin"


# test dependencies
COPY scripts/test_dependencies.py .
RUN python3 test_dependencies.py -fp requirements.txt,optional_gpu.txt && \
rm -rf requirements.txt && \
Expand Down
20 changes: 0 additions & 20 deletions docker/DockerfileGPUMultiCuda-base

This file was deleted.

1 change: 0 additions & 1 deletion docker/build_DockerfileGPUMultiCuda.sh

This file was deleted.

1 change: 1 addition & 0 deletions docker/build_gpu_dockerfile.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
docker build --progress=plain --no-cache -t unifyai/ivy:latest-gpu -f DockerfileGPU ..
File renamed without changes.
2 changes: 1 addition & 1 deletion docker/rebuild_all_dockerfiles.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/bash

docker build -t unifyai/ivy:latest --no-cache -f Dockerfile ..
docker build -t unifyai/multicuda:base_and_requirements --no-cache -f DockerfileGPUMultiCuda ..
docker build -t unifyai/ivy:latest-gpu --no-cache -f DockerfileGPU ..
2 changes: 1 addition & 1 deletion docs/overview/contributing/setting_up.rst
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ Just follow the steps outlined below:

- :code:`Default project configuration` - This is the default option, it will set up with the default codespaces environment.
- :code:`Ivy Development Environment (build)` - This will set up the development environment of ivy for CPU and build image from :code:`ivy/docker/Dockerfile`.
- :code:`Ivy GPU Development Environment (build)` - This will set up the development environment of ivy for GPU and build image from :code:`ivy/docker/DockerfileGPUMultiCuda`.
- :code:`Ivy GPU Development Environment (build)` - This will set up the development environment of ivy for GPU and build image from :code:`ivy/docker/DockerfileGPU`.
- :code:`Ivv Development Environment for Multiver...` - This will set up the development environment of multiversion support with ivy and build image from :code:`ivy/docker/DockerfileMultiversion`.
- :code:`Ivy Development Environment (image)` - This will set up the development environment of ivy for CPU and build image from the latest image from dockerhub.
- :code:`Ivy GPU Development Environment (image)` - This will set up the development environment of ivy for GPU and build image from the latest image from dockerhub.
Expand Down
4 changes: 2 additions & 2 deletions ivy/functional/backends/jax/experimental/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def general_pool(
# if dtype is not set here, jax casts it to float64
inputs = jnp.array(inputs, dtype=jnp.float32)
if not ivy.is_array(init):
init = jnp.array(init, dtype=jnp.float32)
init = jnp.array(init, dtype=inputs.dtype)
promoted_type = jnp.promote_types(inputs.dtype, init.dtype)
inputs = inputs.astype(promoted_type)
init = init.astype(promoted_type)
Expand Down Expand Up @@ -703,7 +703,7 @@ def interpolate(
out: Optional[JaxArray] = None,
):
dims = len(x.shape) - 2
size = _get_size(scale_factor, size, dims, x.shape)
size, _ = _get_size(scale_factor, size, dims, x.shape)
mode = (
"nearest"
if mode == "nearest-exact"
Expand Down
2 changes: 1 addition & 1 deletion ivy/functional/backends/tensorflow/experimental/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -893,7 +893,7 @@ def interpolate(
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
dims = len(x.shape) - 2
size = _get_size(scale_factor, size, dims, x.shape)
size, _ = _get_size(scale_factor, size, dims, x.shape)
remove_dim = False
if mode in ["linear", "tf_area", "lanczos3", "lanczos5", "nearest-exact"]:
if dims == 1:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,10 @@ def avg_pool1d(
)


@with_unsupported_dtypes(
{"2.1.0 and below": ("float16",)},
"torch",
)
@to_ivy_arrays_and_back
def avg_pool2d(
input,
Expand Down
4 changes: 4 additions & 0 deletions ivy/functional/frontends/torch/reduction_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ def median(input, dim=None, keepdim=False, *, out=None):

@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.1.0 and below": ("complex64", "complex128")},
"torch",
)
def min(*input, dim=None, keepdim=False, out=None):
if len(input) == 1:
input = input[0]
Expand Down
Loading

0 comments on commit 9fdbad8

Please sign in to comment.