diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..cfb364d2 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,40 @@ +name: Formatting + +on: + push: + branches: [main] + pull_request: + branches: [main] + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + merge_group: + +permissions: + contents: read + +jobs: + black: + runs-on: ubuntu-latest + concurrency: + group: lint-${{ github.ref }} + cancel-in-progress: true + steps: + - uses: actions/checkout@v3 + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python - + echo "$HOME/.poetry/bin" >> $GITHUB_PATH + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: "3.9" + cache: poetry + - name: Install dependencies + run: | + poetry install --with dev + - name: Run black + uses: psf/black@stable + with: + options: "--check" + src: "." + version: "23.3.0" # Black version to use for formatting, since yearly they change the style, this keeps it stable diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index adc48140..7a94b284 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,4 +1,3 @@ - name: Tests on: @@ -68,12 +67,8 @@ jobs: tests: runs-on: ubuntu-latest - strategy: - matrix: - # Minimum supported; Maybe add in some newer version to test things won't break if used on newer versions of python? - python_version: ["3.9"] #, "3.x"] concurrency: - group: tests-${{ github.ref }}-${{ matrix.python_version }} + group: tests-${{ github.ref }} cancel-in-progress: true steps: - uses: actions/checkout@v3 @@ -81,22 +76,30 @@ jobs: run: | curl -sSL https://install.python-poetry.org | python - echo "$HOME/.poetry/bin" >> $GITHUB_PATH - - name: Set up Python ${{ matrix.python_version }} + - name: Set up Python uses: actions/setup-python@v3 with: - python-version: ${{ matrix.python_version }} + python-version: "3.9" cache: poetry - name: Install dependencies run: | poetry install --with dev - name: Run tests run: | - poetry run pytest tests + poetry run pytest --cov-report=xml:coverage.xml -rFxX tests/ - name: Upload generated files if: failure() uses: actions/upload-artifact@v3 with: - name: test-generated-files-${{ github.event_name }}-${{ matrix.python_version }} + name: test-generated-files-${{ github.event_name }} path: tests/test_question_templates/question_generated_outputs/ if-no-files-found: ignore retention-days: 5 + # - name: Upload coverage report to codecov + # if: always() + # uses: codecov/codecov-action@v3 + # with: + # # Needed for private repos, publi repos don't, but it is a good + # # fallback for if codecov fails to identify it properly as public + # # token: ${{ secrets.CODECOV_TOKEN }} + # files: ./coverage1.xml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..c8e12dc2 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/psf/black + rev: 'refs/tags/23.3.0:refs/tags/23.3.0' + hooks: + - id: black diff --git a/README.md b/README.md index ff32349b..013aaa6e 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Python](https://img.shields.io/badge/python-3.9-blue)]() [![codecov](https://codecov.io/gh/open-resources/problem_bank_scripts/branch/main/graph/badge.svg)](https://codecov.io/gh/open-resources/problem_bank_scripts) [![Documentation Status](https://readthedocs.org/projects/problem_bank_scripts/badge/?version=latest)](https://problem_bank_scripts.readthedocs.io/en/latest/?badge=latest) +[![Tests](https://github.com/open-resources/problem_bank_scripts/actions/workflows/tests.yml/badge.svg)](https://github.com/open-resources/problem_bank_scripts/actions/workflows/tests.yml) ## Installation diff --git a/docs/source/conf.py b/docs/source/conf.py index c3515fff..92f1cff0 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -6,9 +6,9 @@ # -- Project information ----------------------------------------------------- -project = u"Problem Bank Scripts" -copyright = u"2021, Open Problem Bank Team" -author = u"Open Problem Bank Team" +project = "Problem Bank Scripts" +copyright = "2021, Open Problem Bank Team" +author = "Open Problem Bank Team" # -- General configuration --------------------------------------------------- diff --git a/poetry.lock b/poetry.lock index acaecaca..3e79cc90 100644 --- a/poetry.lock +++ b/poetry.lock @@ -126,6 +126,57 @@ soupsieve = ">1.2" html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "black" +version = "23.3.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.7" +files = [ + {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, + {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, + {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, + {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, + {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, + {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, + {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, + {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, + {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, + {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, + {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, + {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, + {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, + {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, +] + +[package.dependencies] +click = ">=8.0.0" +ipython = {version = ">=7.8.0", optional = true, markers = "extra == \"jupyter\""} +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tokenize-rt = {version = ">=3.2.0", optional = true, markers = "extra == \"jupyter\""} +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + [[package]] name = "bleach" version = "6.0.0" @@ -340,6 +391,81 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "coverage" +version = "7.2.7" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"}, + {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495"}, + {file = "coverage-7.2.7-cp310-cp310-win32.whl", hash = "sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818"}, + {file = "coverage-7.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a"}, + {file = "coverage-7.2.7-cp311-cp311-win32.whl", hash = "sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a"}, + {file = "coverage-7.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562"}, + {file = "coverage-7.2.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d"}, + {file = "coverage-7.2.7-cp312-cp312-win32.whl", hash = "sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511"}, + {file = "coverage-7.2.7-cp312-cp312-win_amd64.whl", hash = "sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3"}, + {file = "coverage-7.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02"}, + {file = "coverage-7.2.7-cp37-cp37m-win32.whl", hash = "sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f"}, + {file = "coverage-7.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f"}, + {file = "coverage-7.2.7-cp38-cp38-win32.whl", hash = "sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e"}, + {file = "coverage-7.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2"}, + {file = "coverage-7.2.7-cp39-cp39-win32.whl", hash = "sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb"}, + {file = "coverage-7.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27"}, + {file = "coverage-7.2.7-pp37.pp38.pp39-none-any.whl", hash = "sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d"}, + {file = "coverage-7.2.7.tar.gz", hash = "sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + [[package]] name = "decorator" version = "5.1.1" @@ -960,6 +1086,17 @@ docs = ["sphinx"] gmpy = ["gmpy2 (>=2.1.0a4)"] tests = ["pytest (>=4.6)"] +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + [[package]] name = "myst-nb" version = "0.15.0" @@ -1264,6 +1401,17 @@ files = [ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] testing = ["docopt", "pytest (<6.0.0)"] +[[package]] +name = "pathspec" +version = "0.11.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, +] + [[package]] name = "pexpect" version = "4.8.0" @@ -1484,6 +1632,24 @@ toml = "*" [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] +[[package]] +name = "pytest-cov" +version = "4.1.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] + [[package]] name = "python-dateutil" version = "2.8.2" @@ -2104,6 +2270,17 @@ webencodings = ">=0.4" doc = ["sphinx", "sphinx_rtd_theme"] test = ["flake8", "isort", "pytest"] +[[package]] +name = "tokenize-rt" +version = "5.1.0" +description = "A wrapper around the stdlib `tokenize` which roundtrips." +optional = false +python-versions = ">=3.8" +files = [ + {file = "tokenize_rt-5.1.0-py2.py3-none-any.whl", hash = "sha256:9b7bb843e77dd6ed0be5564bfaaba200083911e0497841cd3e9235a6a9794d74"}, + {file = "tokenize_rt-5.1.0.tar.gz", hash = "sha256:08f0c2daa94c4052e53c2fcaa8e32585e6ae9bdfc800974092d031401694e002"}, +] + [[package]] name = "toml" version = "0.10.2" @@ -2163,13 +2340,13 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] [[package]] name = "typing-extensions" -version = "4.7.0" +version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" optional = false python-versions = ">=3.7" files = [ - {file = "typing_extensions-4.7.0-py3-none-any.whl", hash = "sha256:5d8c9dac95c27d20df12fb1d97b9793ab8b2af8a3a525e68c80e21060c161771"}, - {file = "typing_extensions-4.7.0.tar.gz", hash = "sha256:935ccf31549830cda708b42289d44b6f74084d616a00be651601a4f968e77c82"}, + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, ] [[package]] @@ -2349,4 +2526,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "6f895f07c59811bf3d7f2b785b22fa2e8724001e0a10c323dad7a81e9b950196" +content-hash = "5bec59a49a19a201447af13e9fffeb063de76238a2968f1ac6f94c4a5094a48c" diff --git a/pyproject.toml b/pyproject.toml index b147ffe1..7d25685d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,7 @@ mdformat = "^0.7.14" sympy = "^1.8" problem-bank-helpers = "^0.1.14" -[tool.poetry.dev-dependencies] +[tool.poetry.group.dev.dependencies] Sphinx = "^4.4" nbsphinx = "^0.8.5" ipykernel = "^5.5.5" @@ -30,6 +30,8 @@ sphinxcontrib-napoleon = "^0.7" pytest = "^6.2.4" myst-nb = "^0.15" myst-parser = "^0.17" +pytest-cov = "^4.1.0" +black = {extras = ["jupyter"], version = "^23.3.0"} [build-system] requires = ["poetry-core>=1.0.0"] @@ -37,3 +39,9 @@ build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] filterwarnings = ["ignore::DeprecationWarning"] +addopts = "--cov=src/problem_bank_scripts --cov-report=term --cov-branch" + +[tool.black] +required_version = "23" # Black version to use for formatting, since yearly they change the style +force-exclude = "/tests/test_question_templates" +line-length = 100 diff --git a/src/problem_bank_scripts/prairielearn.py b/src/problem_bank_scripts/prairielearn.py index ab71e5b9..be7041c5 100644 --- a/src/problem_bank_scripts/prairielearn.py +++ b/src/problem_bank_scripts/prairielearn.py @@ -1,6 +1,7 @@ # This file has been copied directly from the PL repo: https://github.com/PrairieLearn/PrairieLearn/blob/master/lib/python_helper_sympy.py import sympy + # import ast # import sys @@ -8,9 +9,11 @@ # import html # import to_precision import numpy as np + # import uuid # import sympy import pandas + # from python_helper_sympy import convert_string_to_sympy # from python_helper_sympy import sympy_to_json # from python_helper_sympy import json_to_sympy @@ -22,45 +25,47 @@ # import os import collections + # Create a new instance of this class to access the member dictionaries. This # is to avoid accidentally modifying these dictionaries. class _Constants: def __init__(self): self.helpers = { - '_Integer': sympy.Integer, + "_Integer": sympy.Integer, } self.variables = { - 'pi': sympy.pi, - 'e': sympy.E, + "pi": sympy.pi, + "e": sympy.E, } self.hidden_variables = { - '_Exp1': sympy.E, + "_Exp1": sympy.E, } self.complex_variables = { - 'i': sympy.I, - 'j': sympy.I, + "i": sympy.I, + "j": sympy.I, } self.hidden_complex_variables = { - '_ImaginaryUnit': sympy.I, + "_ImaginaryUnit": sympy.I, } self.functions = { # These are shown to the student - 'cos': sympy.cos, - 'sin': sympy.sin, - 'tan': sympy.tan, - 'arccos': sympy.acos, - 'arcsin': sympy.asin, - 'arctan': sympy.atan, - 'acos': sympy.acos, - 'asin': sympy.asin, - 'atan': sympy.atan, - 'arctan2': sympy.atan2, - 'atan2': sympy.atan2, - 'exp': sympy.exp, - 'log': sympy.log, - 'sqrt': sympy.sqrt, + "cos": sympy.cos, + "sin": sympy.sin, + "tan": sympy.tan, + "arccos": sympy.acos, + "arcsin": sympy.asin, + "arctan": sympy.atan, + "acos": sympy.acos, + "asin": sympy.asin, + "atan": sympy.atan, + "arctan2": sympy.atan2, + "atan2": sympy.atan2, + "exp": sympy.exp, + "log": sympy.log, + "sqrt": sympy.sqrt, } + # # Safe evaluation of user input to convert from string to sympy expression. # # # # Adapted from: @@ -276,6 +281,7 @@ def __init__(self): # w_right = min(ind+w, len(s)) - ind # return s[ind-w_left:ind+w_right] + '\n' + ' '*w_left + '^' + ' '*w_right + def sympy_to_json(a, allow_complex=True): const = _Constants() @@ -289,14 +295,22 @@ def sympy_to_json(a, allow_complex=True): for k in reserved.keys(): for v in variables: if k == v: - raise ValueError('sympy expression has a variable with a reserved name: {:s}'.format(k)) + raise ValueError( + "sympy expression has a variable with a reserved name: {:s}".format(k) + ) # Apply substitutions for hidden variables a = a.subs([(const.hidden_variables[key], key) for key in const.hidden_variables.keys()]) if allow_complex: - a = a.subs([(const.hidden_complex_variables[key], key) for key in const.hidden_complex_variables.keys()]) + a = a.subs( + [ + (const.hidden_complex_variables[key], key) + for key in const.hidden_complex_variables.keys() + ] + ) + + return {"_type": "sympy", "_value": str(a), "_variables": variables} - return {'_type': 'sympy', '_value': str(a), '_variables': variables} # def json_to_sympy(a, allow_complex=True): # if not '_type' in a: @@ -310,6 +324,7 @@ def sympy_to_json(a, allow_complex=True): # return convert_string_to_sympy(a['_value'], a['_variables'], allow_hidden=True, allow_complex=allow_complex) + # Added to_json() from this file: https://github.com/PrairieLearn/PrairieLearn/blob/master/question-servers/freeformPythonLib/prairielearn.py def to_json(v): """to_json(v) @@ -328,12 +343,16 @@ def to_json(v): returned without change. """ if np.isscalar(v) and np.iscomplexobj(v): - return {'_type': 'complex', '_value': {'real': v.real, 'imag': v.imag}} + return {"_type": "complex", "_value": {"real": v.real, "imag": v.imag}} elif isinstance(v, np.ndarray): if np.isrealobj(v): - return {'_type': 'ndarray', '_value': v.tolist(), '_dtype': str(v.dtype)} + return {"_type": "ndarray", "_value": v.tolist(), "_dtype": str(v.dtype)} elif np.iscomplexobj(v): - return {'_type': 'complex_ndarray', '_value': {'real': v.real.tolist(), 'imag': v.imag.tolist()}, '_dtype': str(v.dtype)} + return { + "_type": "complex_ndarray", + "_value": {"real": v.real.tolist(), "imag": v.imag.tolist()}, + "_dtype": str(v.dtype), + } elif isinstance(v, sympy.Expr): return sympy_to_json(v) elif isinstance(v, sympy.Matrix) or isinstance(v, sympy.ImmutableMatrix): @@ -345,8 +364,20 @@ def to_json(v): for j in range(0, num_cols): row.append(str(v[i, j])) M.append(row) - return {'_type': 'sympy_matrix', '_value': M, '_variables': s, '_shape': [num_rows, num_cols]} + return { + "_type": "sympy_matrix", + "_value": M, + "_variables": s, + "_shape": [num_rows, num_cols], + } elif isinstance(v, pandas.DataFrame): - return {'_type': 'dataframe', '_value': {'index': list(v.index), 'columns': list(v.columns), 'data': v.values.tolist()}} + return { + "_type": "dataframe", + "_value": { + "index": list(v.index), + "columns": list(v.columns), + "data": v.values.tolist(), + }, + } else: - return v \ No newline at end of file + return v diff --git a/src/problem_bank_scripts/problem_bank_scripts.py b/src/problem_bank_scripts/problem_bank_scripts.py index 37a3f0a0..127ba02e 100644 --- a/src/problem_bank_scripts/problem_bank_scripts.py +++ b/src/problem_bank_scripts/problem_bank_scripts.py @@ -101,7 +101,6 @@ def parse_body_part(pnum, md_text): level3_headers = [i for i, j in enumerate(tokens) if j.tag == "h3" if j.nesting == 1] for hd in level3_headers: - header = tokens[hd + 1].content assert ( len(header) < 20 @@ -141,7 +140,6 @@ def get_next_headerloc(start, tokens, header_level): close = len(tokens) for i, j in enumerate(tokens[start:]): - if j.tag == f"h{header_level}" and j.nesting == 1: # next header found close = i + start @@ -291,7 +289,6 @@ def read_md_problem(filepath): ### for x, t in enumerate(tokens): - if t.tag == "h1" and t.nesting == 1: # title # oh boy. this is going to break and it will be your fault firas. blocks["title"] = [x, x + 3] @@ -314,8 +311,12 @@ def read_md_problem(filepath): blocks[f"block{block_count}"].append(len(tokens)) # Assert statements (turn into tests!) - assert num_titles == 1, "I see {0} Level 1 Headers (#) in this file, there should only be one!".format(num_titles) - assert block_count >= 1, "I see {0} Level 2 Headers (##) in this file, there should be at least 1".format( + assert ( + num_titles == 1 + ), "I see {0} Level 1 Headers (#) in this file, there should only be one!".format(num_titles) + assert ( + block_count >= 1 + ), "I see {0} Level 2 Headers (##) in this file, there should be at least 1".format( block_count - 1 ) @@ -333,7 +334,6 @@ def read_md_problem(filepath): part_counter = 0 for k, v in blocks.items(): - rendered_part = codecs.unicode_escape_decode( mdformat.renderer.MDRenderer().render(tokens[v[0] : v[1]], mdit.options, env) )[0] @@ -394,7 +394,7 @@ def dict_to_md( md_string += md_dict.pop("title", None) md_string += md_dict.pop("preamble", None) - #TODO: Refactor this to use the elegant solution provided here: https://stackoverflow.com/a/49723101/2217577 + # TODO: Refactor this to use the elegant solution provided here: https://stackoverflow.com/a/49723101/2217577 for k, v in md_dict.items(): if k in remove_keys: @@ -505,7 +505,9 @@ def write_server_py(output_path, parsed_question): server_file = assemble_server_py(parsed_question, "prairielearn") # Deal with path differences when using PL - server_file = server_file.replace('read_csv("', 'read_csv(data["options"]["client_files_course_path"]+"/') + server_file = server_file.replace( + 'read_csv("', 'read_csv(data["options"]["client_files_course_path"]+"/' + ) # Write server.py (output_path / "server.py").write_text(server_file, encoding="utf8") @@ -525,7 +527,10 @@ def process_multiple_choice(part_name, parsed_question, data_dict): html = f"""\n{parsed_question['body_parts_split'][part_name]['content']}\n\n\n""" pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html += f"""\n""" @@ -539,7 +544,6 @@ def process_multiple_choice(part_name, parsed_question, data_dict): ## Note: `|@`` gets converted into `{{` and `@|`` gets converted to `}}` by `replace_tags()` for a in data_dict["params"][f"{part_name}"].keys(): - if "ans" in a: if data_dict["params"][f"{part_name}"][f"{a}"]["feedback"]: feedback = f"|@ params.{part_name}.{a}.feedback @|" @@ -550,7 +554,7 @@ def process_multiple_choice(part_name, parsed_question, data_dict): value = f"|@ params.{part_name}.{a}.value @|" ## Hack to remove feedback for Dropdown questions - if parsed_question["header"][part_name]['type'] == 'dropdown': + if parsed_question["header"][part_name]["type"] == "dropdown": html += f"\t {value} {units} \n" else: html += f"\t {value} {units} \n" @@ -571,7 +575,9 @@ def process_dropdown(part_name, parsed_question, data_dict): Returns: html: A string of HTML that is part of the final PL question.html file. """ - html = process_multiple_choice(part_name, parsed_question, data_dict).replace("-multiple-choice", "-dropdown") + html = process_multiple_choice(part_name, parsed_question, data_dict).replace( + "-multiple-choice", "-dropdown" + ) return html @@ -590,7 +596,10 @@ def process_number_input(part_name, parsed_question, data_dict): html = f"""\n\t{parsed_question['body_parts_split'][part_name]['content']}\t\n\n\n""" pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html += f"""\n""" @@ -609,7 +618,9 @@ def process_checkbox(part_name, parsed_question, data_dict): html: A string of HTML that is part of the final PL question.html file. """ # start with the MCQ version and then...change things for checkbox questions - html = process_multiple_choice(part_name, parsed_question, data_dict).replace("-multiple-choice", "-checkbox") + html = process_multiple_choice(part_name, parsed_question, data_dict).replace( + "-multiple-choice", "-checkbox" + ) return html @@ -628,7 +639,10 @@ def process_symbolic_input(part_name, parsed_question, data_dict): html = f"""\n\t{parsed_question['body_parts_split'][part_name]['content']}\t\n\n\n""" pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html += f"""\n""" @@ -646,7 +660,10 @@ def process_longtext(part_name, parsed_question, data_dict): html: A string of HTML that is part of the final PL question.html file. """ pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html = f"""\n{parsed_question['body_parts_split'][part_name]['content']}\n\n\n""" @@ -667,7 +684,10 @@ def process_file_upload(part_name, parsed_question, data_dict): html: A string of HTML that is part of the final PL question.html file. """ pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html = f"""\n{parsed_question['body_parts_split'][part_name]['content']}\n\n\n""" @@ -697,7 +717,10 @@ def process_file_editor(part_name, parsed_question, data_dict): html: A string of HTML that is part of the final PL question.html file. """ pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html = f"""\n{parsed_question['body_parts_split'][part_name]['content']}\n\n\n""" @@ -706,6 +729,7 @@ def process_file_editor(part_name, parsed_question, data_dict): return replace_tags(html) + def process_string_input(part_name, parsed_question, data_dict): """Processes markdown format of string-input questions and returns PL HTML Args: @@ -717,7 +741,10 @@ def process_string_input(part_name, parsed_question, data_dict): html: A string of HTML that is part of the final PL question.html file. """ pl_customizations = " ".join( - [f'{k} = "{v}"' for k, v in parsed_question["header"][part_name]["pl-customizations"].items()] + [ + f'{k} = "{v}"' + for k, v in parsed_question["header"][part_name]["pl-customizations"].items() + ] ) # PL-customizations html = f"""\n{parsed_question['body_parts_split'][part_name]['content']}\n\n\n""" @@ -726,6 +753,7 @@ def process_string_input(part_name, parsed_question, data_dict): return replace_tags(html) + def replace_tags(string): """Takes in a string with tags: |@ and @| and returns {{ and }} respectively. This is because Python strings can't have double curly braces. @@ -735,7 +763,9 @@ def replace_tags(string): Returns: string (str): returns string with tags replaced with curly braces. """ - return string.replace("|@|@", "{{{").replace("@|@|", "}}}").replace("|@", "{{").replace("@|", "}}") + return ( + string.replace("|@|@", "{{{").replace("@|@|", "}}}").replace("|@", "{{").replace("@|", "}}") + ) def remove_correct_answers(data2_dict): @@ -778,7 +808,7 @@ def process_attribution(attribution): """ with importlib.resources.open_text("problem_bank_scripts", "attributions.json") as file: - possible_attributions = json.load(file) + possible_attributions = json.load(file) try: attribution_text = possible_attributions[attribution] @@ -793,7 +823,6 @@ def process_attribution(attribution): def process_question_md(source_filepath, output_path=None, instructor=False): - try: pathlib.Path(source_filepath) except: @@ -809,7 +838,9 @@ def process_question_md(source_filepath, output_path=None, instructor=False): if "source" in source_filepath: output_path = pathlib.Path(source_filepath.replace("source", path_replace)) else: - raise NotImplementedError("Check the source filepath; it does not have 'source' in it!! ") + raise NotImplementedError( + "Check the source filepath; it does not have 'source' in it!! " + ) else: ## TODO: Make this a bit more robust, perhaps by switching encodings!? output_path = pathlib.Path(output_path) @@ -863,21 +894,21 @@ def str_presenter(dumper, data2): repl_keys = {k.replace("_", "."): k for k in list(data2_sanitized_flattened.keys())} text = dict_to_md( - body_parts, - remove_keys=[ - "Rubric", - "Solution", - "Comments", - "pl-submission-panel", #FIXME: This will not remove level 3 headings because it's all a string! - "pl-answer-panel", #FIXME: This will not remove level 3 headings because it's all a string! - ], - ) + body_parts, + remove_keys=[ + "Rubric", + "Solution", + "Comments", + "pl-submission-panel", # FIXME: This will not remove level 3 headings because it's all a string! + "pl-answer-panel", # FIXME: This will not remove level 3 headings because it's all a string! + ], + ) for k, v in repl_keys.items(): text = text.replace(k, v) # Update the YAML header to add substitutions - header.update({"myst": {"substitutions": data2_sanitized_flattened} }) + header.update({"myst": {"substitutions": data2_sanitized_flattened}}) # Update the YAML header to add substitutions, unsort it, and process for file header_yml = yaml.dump(header, sort_keys=False, allow_unicode=True) @@ -953,18 +984,24 @@ def str_presenter(dumper, data2): # Move image assets files_to_copy = header.get("assets") if files_to_copy: - [copy2(pathlib.Path(source_filepath).parent / fl, output_path.parent) for fl in files_to_copy] + [ + copy2(pathlib.Path(source_filepath).parent / fl, output_path.parent) + for fl in files_to_copy + ] # Move autograde py test files files_to_copy = header.get("autogradeTestFiles") if files_to_copy: pl_path = output_path.parent / "tests" pl_path.mkdir(parents=True, exist_ok=True) - [copy2(pathlib.Path(source_filepath).parent / "tests" / fl, pl_path / fl) for fl in files_to_copy if (instructor or fl=="starter_code.py")] + [ + copy2(pathlib.Path(source_filepath).parent / "tests" / fl, pl_path / fl) + for fl in files_to_copy + if (instructor or fl == "starter_code.py") + ] def process_question_pl(source_filepath, output_path=None): - try: pathlib.Path(source_filepath) except: @@ -977,7 +1014,9 @@ def process_question_pl(source_filepath, output_path=None): if "source" in source_filepath: output_path = pathlib.Path(source_filepath.replace("source", path_replace)).parent else: - raise NotImplementedError("Check the source filepath; it does not have 'source' in it!! ") + raise NotImplementedError( + "Check the source filepath; it does not have 'source' in it!! " + ) else: ## TODO: It's annoying that here output_path.parent is used, but for md problems, it's just output_path output_path = pathlib.Path(output_path).parent @@ -1007,18 +1046,20 @@ def process_question_pl(source_filepath, output_path=None): # Question Preamble preamble = parsed_q["body_parts"].get("preamble", None) - #TODO: Remove Debugging print statement - #print(f"premable: {preamble}") - + # TODO: Remove Debugging print statement + # print(f"premable: {preamble}") + if preamble: - question_html = f"\n\n{ preamble }\n\n\n\n" + question_html = ( + f"\n\n{ preamble }\n\n\n\n" + ) else: question_html = f"" # Useful info panel useful_info = parsed_q["body_parts"].get("Useful_info", None) - #TODO: When PrairieLearn updates to BootStrap5, update this box as described here: https://github.com/open-resources/problem_bank_scripts/issues/30#issuecomment-1177101211 + # TODO: When PrairieLearn updates to BootStrap5, update this box as described here: https://github.com/open-resources/problem_bank_scripts/issues/30#issuecomment-1177101211 if useful_info: question_html += f"""