File size: 8,568 Bytes
d7e58f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
version: 2.1

#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml

#drive tests with nox or tox or pytest?

# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------


setupcuda: &setupcuda
  run:
    name: Setup CUDA
    working_directory: ~/
    command: |
      # download and install nvidia drivers, cuda, etc
      wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
      sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
      echo "Done installing CUDA."
      pyenv versions
      nvidia-smi
      pyenv global 3.9.1

binary_common: &binary_common
  parameters:
    # Edit these defaults to do a release`
    build_version:
      description: "version number of release binary; by default, build a nightly"
      type: string
      default: ""
    pytorch_version:
      description: "PyTorch version to build against; by default, use a nightly"
      type: string
      default: ""
    # Don't edit these
    python_version:
      description: "Python version to build against (e.g., 3.7)"
      type: string
    cu_version:
      description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
      type: string
    wheel_docker_image:
      description: "Wheel only: what docker image to use"
      type: string
      default: "pytorch/manylinux-cuda101"
    conda_docker_image:
      description: "what docker image to use for docker"
      type: string
      default: "pytorch/conda-cuda"
  environment:
    PYTHON_VERSION: << parameters.python_version >>
    BUILD_VERSION: << parameters.build_version >>
    PYTORCH_VERSION: << parameters.pytorch_version >>
    CU_VERSION: << parameters.cu_version >>
    TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>

jobs:
  main:
    environment:
      CUDA_VERSION: "11.3"
    resource_class: gpu.nvidia.small.multi
    machine:
      image: ubuntu-2004:202101-01
    steps:
      - checkout
      - <<: *setupcuda
      - run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
      - run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
      # - run: conda create -p ~/conda_env python=3.7 numpy
      # - run: conda activate ~/conda_env
      # - run: conda install -c pytorch pytorch torchvision

      - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
      - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
      - run:
          name: build
          command: |
            export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
            python3 setup.py build_ext --inplace
      - run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests
      - run: python3 setup.py bdist_wheel

  binary_linux_wheel:
    <<: *binary_common
    docker:
      - image: << parameters.wheel_docker_image >>
        auth:
          username: $DOCKERHUB_USERNAME
          password: $DOCKERHUB_TOKEN
    resource_class: 2xlarge+
    steps:
      - checkout
      - run: MAX_JOBS=15 packaging/build_wheel.sh
      - store_artifacts:
          path: dist
      - persist_to_workspace:
          root: dist
          paths:
            - "*"

  binary_linux_conda:
    <<: *binary_common
    docker:
      - image: "<< parameters.conda_docker_image >>"
        auth:
          username: $DOCKERHUB_USERNAME
          password: $DOCKERHUB_TOKEN
    resource_class: 2xlarge+
    steps:
      - checkout
      # This is building with cuda but no gpu present,
      # so we aren't running the tests.
      - run:
          name: build
          no_output_timeout: 20m
          command: MAX_JOBS=15 TEST_FLAG=--no-test packaging/build_conda.sh
      - store_artifacts:
          path: /opt/conda/conda-bld/linux-64
      - persist_to_workspace:
          root: /opt/conda/conda-bld/linux-64
          paths:
            - "*"

  binary_linux_conda_cuda:
    <<: *binary_common
    machine:
      image: ubuntu-1604:201903-01
    resource_class: gpu.nvidia.small.multi
    steps:
    - checkout
    - run:
        name: Setup environment
        command: |
          set -e

          curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
          curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -

          sudo apt-get update

          sudo apt-get install \
              apt-transport-https \
              ca-certificates \
              curl \
              gnupg-agent \
              software-properties-common

          curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -

          sudo add-apt-repository \
             "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
             $(lsb_release -cs) \
             stable"

          sudo apt-get update
          export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
          sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3

          # Add the package repositories
          distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
          curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
          curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list

          export NVIDIA_CONTAINER_VERSION="1.0.3-1"
          sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
          sudo systemctl restart docker

          DRIVER_FN="NVIDIA-Linux-x86_64-460.84.run"
          wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/460.84/$DRIVER_FN"
          sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
          nvidia-smi

    - run:
        name: Pull docker image
        command: |
          set -e

          { docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null

          echo Pulling docker image $TESTRUN_DOCKER_IMAGE
          docker pull $TESTRUN_DOCKER_IMAGE
    - run:
        name: Build and run tests
        no_output_timeout: 20m
        command: |
          set -e

          cd ${HOME}/project/

          export JUST_TESTRUN=1
          VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"

          docker run --gpus all  --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} ./packaging/build_conda.sh

  binary_macos_wheel:
    <<: *binary_common
    macos:
      xcode: "12.0"
    steps:
      - checkout
      - run:
          # Cannot easily deduplicate this as source'ing activate
          # will set environment variables which we need to propagate
          # to build_wheel.sh
          command: |
            curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
            sh conda.sh -b
            source $HOME/miniconda3/bin/activate
            packaging/build_wheel.sh
      - store_artifacts:
          path: dist

workflows:
  version: 2
  build_and_test:
    jobs:
      # - main:
      #     context: DOCKERHUB_TOKEN
      {{workflows()}}
      - binary_linux_conda_cuda:
          name: testrun_conda_cuda_py37_cu102_pyt170
          context: DOCKERHUB_TOKEN
          python_version: "3.7"
          pytorch_version: '1.7.0'
          cu_version: "cu102"
      - binary_macos_wheel:
          cu_version: cpu
          name: macos_wheel_py36_cpu
          python_version: '3.6'
          pytorch_version: '1.9.0'
      - binary_macos_wheel:
          cu_version: cpu
          name: macos_wheel_py37_cpu
          python_version: '3.7'
          pytorch_version: '1.9.0'
      - binary_macos_wheel:
          cu_version: cpu
          name: macos_wheel_py38_cpu
          python_version: '3.8'
          pytorch_version: '1.9.0'
      - binary_macos_wheel:
          cu_version: cpu
          name: macos_wheel_py39_cpu
          python_version: '3.9'
          pytorch_version: '1.9.0'