1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
name: Lint & Test
on:
push:
branches:
- main
pull_request:
jobs:
lint-test:
runs-on: ubuntu-latest
env:
# Configure pip to cache dependencies and do a user install
PIP_NO_CACHE_DIR: false
PIP_USER: 1
# Make sure package manager does not use virtualenv
POETRY_VIRTUALENVS_CREATE: false
# Specify explicit paths for python dependencies and the pre-commit
# environment so we know which directories to cache
POETRY_CACHE_DIR: ${{ github.workspace }}/.cache/py-user-base
PYTHONUSERBASE: ${{ github.workspace }}/.cache/py-user-base
PRE_COMMIT_HOME: ${{ github.workspace }}/.cache/pre-commit-cache
steps:
- name: Add custom PYTHONUSERBASE to PATH
run: echo '${{ env.PYTHONUSERBASE }}/bin/' >> $GITHUB_PATH
- name: Checkout repository
uses: actions/checkout@v2
- name: Setup python
id: python
uses: actions/setup-python@v2
with:
python-version: '3.9'
# Start the database early to give it a chance to get ready before
# we start running tests.
- name: Run database using docker-compose
run: docker-compose run -d -p 7777:5432 --name pydis_web postgres
# This step caches our Python dependencies. To make sure we
# only restore a cache when the dependencies, the python version,
# the runner operating system, and the dependency location haven't
# changed, we create a cache key that is a composite of those states.
#
# Only when the context is exactly the same, we will restore the cache.
- name: Python Dependency Caching
uses: actions/cache@v2
id: python_cache
with:
path: ${{ env.PYTHONUSERBASE }}
key: "python-0-${{ runner.os }}-${{ env.PYTHONUSERBASE }}-\
${{ steps.python.outputs.python-version }}-\
${{ hashFiles('./pyproject.toml', './poetry.lock') }}"
# Install our dependencies if we did not restore a dependency cache
- name: Install dependencies using poetry
if: steps.python_cache.outputs.cache-hit != 'true'
run: |
pip install poetry
poetry install
# This step caches our pre-commit environment. To make sure we
# do create a new environment when our pre-commit setup changes,
# we create a cache key based on relevant factors.
- name: Pre-commit Environment Caching
uses: actions/cache@v2
with:
path: ${{ env.PRE_COMMIT_HOME }}
key: "precommit-0-${{ runner.os }}-${{ env.PRE_COMMIT_HOME }}-\
${{ steps.python.outputs.python-version }}-\
${{ hashFiles('./.pre-commit-config.yaml') }}"
# We will not run `flake8` here, as we will use a separate flake8
# action. As pre-commit does not support user installs, we set
# PIP_USER=0 to not do a user install.
- name: Run pre-commit hooks
run: export PIP_USER=0; SKIP=flake8 pre-commit run --all-files
# Run flake8 and have it format the linting errors in the format of
# the GitHub Workflow command to register error annotations. This
# means that our flake8 output is automatically added as an error
# annotation to both the run result and in the "Files" tab of a
# pull request.
#
# Format used:
# ::error file={filename},line={line},col={col}::{message}
- name: Run flake8
run: "flake8 \
--format='::error file=%(path)s,line=%(row)d,col=%(col)d::\
[flake8] %(code)s: %(text)s'"
- name: Migrations and run tests with coverage.py
run: |
python manage.py makemigrations --check
python manage.py migrate
coverage run manage.py test --no-input
coverage report -m
env:
CI: True
DATABASE_URL: postgres://pysite:pysite@localhost:7777/pysite
METRICITY_DB_URL: postgres://pysite:pysite@localhost:7777/metricity
# This step will publish the coverage reports coveralls.io and
# print a "job" link in the output of the GitHub Action
- name: Publish coverage report to coveralls.io
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls
- name: Tear down docker-compose containers
run: docker-compose stop
if: ${{ always() }}
# Prepare the Pull Request Payload artifact. If this fails, we
# we fail silently using the `continue-on-error` option. It's
# nice if this succeeds, but if it fails for any reason, it
# does not mean that our lint-test checks failed.
- name: Prepare Pull Request Payload artifact
id: prepare-artifact
if: always() && github.event_name == 'pull_request'
continue-on-error: true
run: cat $GITHUB_EVENT_PATH | jq '.pull_request' > pull_request_payload.json
# This only makes sense if the previous step succeeded. To
# get the original outcome of the previous step before the
# `continue-on-error` conclusion is applied, we use the
# `.outcome` value. This step also fails silently.
- name: Upload a Build Artifact
if: always() && steps.prepare-artifact.outcome == 'success'
continue-on-error: true
uses: actions/upload-artifact@v2
with:
name: pull-request-payload
path: pull_request_payload.json
|