diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..351a829 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,11 @@ +FROM mcr.microsoft.com/devcontainers/anaconda:0-3 + +# Copy environment.yml (if found) to a temp location so we update the environment. Also +# copy "noop.txt" so the COPY instruction does not fail if no environment.yml exists. +COPY environment.yml* .devcontainer/noop.txt /tmp/conda-tmp/ +RUN if [ -f "/tmp/conda-tmp/environment.yml" ]; then umask 0002 && /opt/conda/bin/conda env update -n base -f /tmp/conda-tmp/environment.yml; fi \ + && rm -rf /tmp/conda-tmp + +# [Optional] Uncomment this section to install additional OS packages. +# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ +# && apt-get -y install --no-install-recommends diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..b2da107 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,33 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/anaconda +{ + "name": "Anaconda (Python 3)", + "build": { + "context": "..", + "dockerfile": "Dockerfile" + }, + "customizations": { + "vscode": { + "extensions": [ + "ExecutableBookProject.myst-highlight", + "vsls-contrib.codetour", + "searKing.preview-vscode" + ] + } + }, + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "pip install -r requirements.txt; pip install .", + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.devcontainer/noop.txt b/.devcontainer/noop.txt new file mode 100644 index 0000000..dde8dc3 --- /dev/null +++ b/.devcontainer/noop.txt @@ -0,0 +1,3 @@ +This file copied into the container along with environment.yml* from the parent +folder. This file is included to prevents the Dockerfile COPY instruction from +failing if no environment.yml is found. \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..f33a02c --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for more information: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +# https://containers.dev/guide/dependabot + +version: 2 +updates: + - package-ecosystem: "devcontainers" + directory: "/" + schedule: + interval: weekly diff --git a/.tours/overview.tour b/.tours/overview.tour new file mode 100644 index 0000000..cc9e4c0 --- /dev/null +++ b/.tours/overview.tour @@ -0,0 +1,114 @@ +{ + "$schema": "https://aka.ms/codetour-schema", + "title": "overview", + "steps": [ + { + "title": "Where to start", + "file": "setup.py", + "description": "setuptools is a Python package for installing programs. This file `setup.py` is special that `pip` will look for. ", + "line": 1 + }, + { + "title": "how it finds modules", + "file": "setup.py", + "description": "here it defines what to be installed, each of these refers to a folder with an `__init__.py` or a `.py` file. ", + "line": 6 + }, + { + "title": "dependencies", + "file": "setup.py", + "description": "and here it defines the dependencies to install first", + "line": 10 + }, + { + "title": "Console entry points, aka terminal commands", + "file": "setup.py", + "description": "this is what makes it have CLI programs available this says that the `cspt` command will be found in the `cspt/cli.py` file in the `cspt_cli` function. let's go look at that function next.", + "line": 13 + }, + { + "title": "top group function", + "file": "cspt/cli.py", + "description": "This funciton is not very interesting, but it defines the entrypoint that uses the [click package](https://click.palletsprojects.com/en/8.1.x/commands/)", + "line": 15 + }, + { + "title": "first subcommand", + "file": "cspt/cli.py", + "description": "these lines make the following function a sub command of the `cspt_cli` function's command (`cspt`)", + "line": 23 + }, + { + "title": "another subcommand", + "file": "cspt/cli.py", + "description": "here in this function, it calls other functions provided in the library with a little bit of logic to parse the options. \n\nthen it uses `click`'s `echo` method to print the info to the terminal. ", + "line": 59 + }, + { + "title": "tracing a function", + "file": "cspt/cli.py", + "description": "here it is not clear where this function comes from, but if we go to the top of the file, we can learn more. We will go there next. ", + "line": 63 + }, + { + "title": "imported functions", + "file": "cspt/cli.py", + "description": "From here we see that the `fetch_to_checklist` is defined in the `tasktracking.py` let's go there next", + "line": 10 + }, + { + "title": "a core function", + "file": "cspt/tasktracking.py", + "description": "here it is, this function takes in the date and assignment type", + "line": 105 + }, + { + "title": "docstring", + "file": "cspt/tasktracking.py", + "description": "here you can see that this line is incomplete. If no one has yet, you could make a suggestion on the PR to fix it. ", + "line": 113 + }, + { + "title": "requests", + "file": "cspt/tasktracking.py", + "description": "here is that line we talked about in class that fetches the course website. ", + "line": 119 + }, + { + "title": "regex", + "file": "cspt/tasktracking.py", + "description": "the majority of the work this function does is use regular expressions do clean up the text", + "line": 123 + }, + { + "title": "return", + "file": "cspt/tasktracking.py", + "description": "then it returns the actual text", + "line": 126 + }, + { + "title": "shared variable", + "file": "cspt/tasktracking.py", + "description": "this main URL is used in a lot of places, so I made it a variable", + "line": 117 + }, + { + "title": "imported variable", + "file": "cspt/tasktracking.py", + "description": "this variable is stored in the config.py file so that I only have to change it in one place, despite using it in many. ", + "line": 7 + }, + { + "title": "variables to update each semester", + "file": "cspt/config.py", + "description": "This is what I have to update each semester", + "line": 4 + }, + { + "title": "example options", + "file": "cspt/cli.py", + "description": "for this function, I wanted the options to be flags. ", + "line": 26 + } + ] +} \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..bd82d8d --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include cspt/assets * \ No newline at end of file diff --git a/README.md b/README.md index bd4923f..0241020 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,10 @@ helper code for class If you are trying to use this while finishing an incomplete, use the correct release. -## To install: +## To install from clone ``` -git clone https://github.com/introcompsys/courseutils +git clone https://github.com/compsys-progtools/courseutils cd courseutils pip install . ``` @@ -14,6 +14,26 @@ pip install . You may need to use pip3 instead of pip. On Windows, use GitBash for the clone and then Anaconda Prompt for install +## To install direct + +``` +pip install git+ https://github.com/compsys-progtools/courseutils + +``` + +You may need to use pip3 instead of pip. +On Windows, use GitBash for the clone and then Anaconda Prompt for install + + +## See the docs + +``` +pip install -r requirements.txt +cd docs +make html +``` + + ## To upgrade: ``` cd courseutils diff --git a/badges.py b/badges.py deleted file mode 100644 index 8130255..0000000 --- a/badges.py +++ /dev/null @@ -1,197 +0,0 @@ -import click -from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey -from cryptography.exceptions import InvalidSignature -import json - -gh_approvers = ['brownsarahm','ascott20','marcinpawlukiewicz'] - -badge_types = ['review', 'practice', 'explore', 'experience', - 'build', 'lab','community'] - - -@click.command() -@click.option('-b','--badge-name',help="name of the badge formated like type.YYYY-MM-DD") -@click.option('-a','--approver',help='github username of the badge issuer') -@click.option('-s','--signature',help = "the signature hash to be checked") - - -def verify_badge(badge_name,approver,signature): - ''' - check that file path/name matches included key - - .. deprecated:: 23.09 - receipts are no longer in use - ''' - # create expected message - badge_bytes = badge_name.encode('utf-8') # bytes type - - # convert signature - signature_bytes = bytes.fromhex(signature) - - # read public key for the approver - # TODO: fix this to read from installed package data - # TODO: make install save the file - with open(approver,'rb') as f: - public_bytes = f.read() # read file - - signer_key = Ed25519PublicKey.from_public_bytes(public_bytes) - # Raises InvalidSignature if verification fails - signer_key.verify(signature_bytes, badge_bytes) - - click.echo(badge_name + ' Verified') - - - -@click.command() -@click.option('-j','--json-output',default='badges.json',type=click.File('r'), - help='json file to parse') -@click.option('-f','--file-out',default=None,type=click.File('w'), - help='to write to a file, otherwise will echo') - -def process_badges(json_output,file_out = None): - ''' - process gh cli json - - .. deprecated:: 23.09 - receipts are no longer in use - this may be a valid starting point or useful reference for different processing though - ''' - - with open(json_output, 'r') as f: - PR_list = json.load(f) - - #filter for ones with reviews - reviewed = [(pr['title'], pr['latestReviews'][0]) - for pr in PR_list if pr['latestReviews']] - # filter to only process approved ones latestReviews.state - # extract title, latestReviews.author.login, latestReviews.body, - logged_badges = [(title,review['author']['login'], review['body']) - for title,review in reviewed if review['state'] == 'APPROVED'] - - # iterate approved - verified_badges = [] - questioned_badges = [] - for title, reviewer, body in logged_badges: - signature = [s for s in body.split(' ') if len(s)==128] - # verify - try: - verify_badge(title, reviewer, signature) - verified_badges.append(title) - except InvalidSignature: - questioned_badges.append(title) - - - # add to log - report = "verified badges:\n" - report += '\n -' + '\n -'.join(verified_badges) - if questioned_badges: - report += '\n\nquestioned badges: \n' - report += '\n -' + '\n -'.join(questioned_badges) - - if file_out: - with open(file_out,'w') as f: - f.write() - else: - click.echo(report) - # TODO: pair this with a commit to a "badges" branch, so it doesn't create conflicts - - - # review_df = badges_df['latestReviews'].apply(pd.Series)[0].apply(pd.Series) - # author = review_df['author'].apply(pd.Series) - - - -@click.command() -@click.argument('json-output', type =click.Path(exists=True)) -@click.option('-f','--file-out',default=None, - help='to write to a file, otherwise will use stdout') -def cli_get_approved_titles(json_output,file_out = None): - ''' - list PR titles from json or - to use std in that have been approved by an official approver - - gh pr list -s all --json title,latestReviews - - - ''' - - get_approved_titles(json_output,file_out) - - -def get_approved_titles(json_output, file_out = None): - ''' - process gh cli json - - Parameters - ---------- - json_output : filename - file generated from `gh pr list -s all --json title,latestReviews ` - file_out : file name - file to be generated and store the output - ''' - - with open(json_output, 'r') as f: - PR_list = json.load(f) - - #filter for ones with reviews - reviewed = [(pr['title'], pr['latestReviews'][0]) - for pr in PR_list if pr['latestReviews']] - # filter to only process approved ones latestReviews.state - # extract title, latestReviews.author.login, latestReviews.body, - logged_badges = [title for title,review in reviewed - if review['state'] == 'APPROVED' and - review['author']['login']in gh_approvers] - - - - if file_out: - titles_by_type = {bt:[t for t in logged_badges if bt in t.lower()] - for bt in badge_types} - verified_by_type = '\n' + '\n'.join(['\n## '+bt + ' ('+str(len(bl)) +')' +'\n- '+'\n- '. join(bl) - for bt,bl in titles_by_type.items() if len(bl)>0 ]) - valid_badges = [vi for v in titles_by_type.values() for vi in v] - not_typed = [p for p in logged_badges if not(p in valid_badges) ] - with open(file_out,'w') as f: - f.write('## all approved \n\n') - f.write('- ' + '\n- '.join(logged_badges)) - f.write(verified_by_type) - - if len(not_typed) >0 : - f.write('\n\n## Approved, not badges') - f.write('\n- ' + '\n - '.join(not_typed)) - f.write('\n') - else: - click.echo('\n'.join(logged_badges)) - -block_template = ''' -# {type} -''' - - -# -# Put this in student repos -# on: -# pull_request_review: -# types: [submitted] - -# jobs: -# approved: -# if: github.event.review.state == 'approved' -# runs-on: ubuntu-latest -# steps: -# - run: | -# # figure out how to write to file? - - -# on: -# pull_request_target: -# types: -# - closed - -# jobs: -# if_merged: -# if: github.event.pull_request.merged == true -# runs-on: ubuntu-latest -# steps: -# - run: | -# gh pr list --state all --json title,latestReviews >>tmp - # process json to extract badges diff --git a/config.py b/config.py deleted file mode 100644 index 6bc5270..0000000 --- a/config.py +++ /dev/null @@ -1,5 +0,0 @@ - -repo = 'http://introcompsys.github.io/fall2023/' -base_url = 'https://raw.githubusercontent.com/introcompsys/fall2023/main/_' - - diff --git a/cspt/__init__.py b/cspt/__init__.py new file mode 100644 index 0000000..7981ea5 --- /dev/null +++ b/cspt/__init__.py @@ -0,0 +1,10 @@ + +from .badges import badges_by_type, process_pr_json, generate_report +from .activities import files_from_dict +from .notes import process_export,init_activity_files +from .sitetools import generate_csv_from_index +from .tasktracking import calculate_badge_date, fetch_to_checklist + +from .badges import field_parser + +from .lesson import Lesson, Block \ No newline at end of file diff --git a/cspt/activities.py b/cspt/activities.py new file mode 100644 index 0000000..9fcc1eb --- /dev/null +++ b/cspt/activities.py @@ -0,0 +1,25 @@ +import click +import yaml + + + + +def files_from_dict(files_to_create, add_newline = True): + ''' + given a dictionary, create toy files named as keys with content as values + by default adds a new line + + Parameters + ---------- + files_to_create : dictionary + keys are names of files to create, values are contents + add_newline : bool + add a new line at the end of each file + + + ''' + for file_name, contents in files_to_create.items(): + if add_newline: + contents += '\n' + with open(file_name,'w') as f: + f.write(contents) \ No newline at end of file diff --git a/cspt/assets/bonus_unstuck.md b/cspt/assets/bonus_unstuck.md new file mode 100644 index 0000000..554c62b --- /dev/null +++ b/cspt/assets/bonus_unstuck.md @@ -0,0 +1,7 @@ +# Git-ing unstuck Bonus + +for each item in the checklist below, when you are about to complete it, hover to create a new issue +then use a closing keyword in the commit that does the task to close that new issue. + +- [ ] fix a commit added to the main branch pre-emptively +- [ ] rebase a PR \ No newline at end of file diff --git a/.tours/plclass.tour b/cspt/assets/plclass.tour similarity index 100% rename from .tours/plclass.tour rename to cspt/assets/plclass.tour diff --git a/templates/plworksheet.md b/cspt/assets/plworksheet.md similarity index 100% rename from templates/plworksheet.md rename to cspt/assets/plworksheet.md diff --git a/cspt/badges.py b/cspt/badges.py new file mode 100644 index 0000000..b1119fe --- /dev/null +++ b/cspt/badges.py @@ -0,0 +1,206 @@ + +from datetime import datetime +import re +from numpy import prod as npprod + +from .config import GH_APPROVERS, EARLY_BIRD_DEADLINE +badge_types = ['experience','review','practice','explore','build','lab','community','prepare'] +dated_types = ['experience','review','practice'] +supported_dates_re = [ + re.compile('20[2-9][0-9]-[0-1][0-9]-[0-3][0-9]'), # YYYY-DD-MM + re.compile('(\d{1,2})-(\d{1,2})-(\d{2})'), # MM-DD-YY and M-DD-YY + re.compile('(\d{4})\s(\d{2})\s(\d{2})'), # YYYY MM DD (Space separated) + re.compile('(\d{1,2})/(\d{1,2})/(\d{2})') # M/DD/YY + # Add other regex expressions as needed +] + +def is_title_gradeable(pr_title,errortype=False): + ''' + this defines if a pr title is good, it contains exactly one badge type word and + a well date if experience, review or practice, in ISO (YYYY-MM-DD, YYYY MM DD) or + M/DD/YY, MM-DD-YY or M-DD-YY + + Parameters + ---------- + title : string + title of pr + errortype : bool + if true, return bool + message + + Return + ------ + good : bool + true if good + + + ''' + badge_type_included = sum([bt in pr_title.lower() for bt in badge_types]) ==1 + dated_type = sum([bt in pr_title.lower() for bt in dated_types])>0 + + if dated_type: + # Search for supported dates in PR title + date_included = any(date_regex.search(pr_title) for date_regex in supported_dates_re) + else: + date_included = True + + good = badge_type_included and date_included + + if errortype: + if good: + return good,'' + else: + msg = '' + if not badge_type_included: + msg = 'missing a badge type keyword. ' + + if not(date_included) and dated_type: + msg += 'missing or poorly formatted date' + return good,msg + else: + return good + + +def badges_by_type(approved_prs): + ''' + parse list of approved PRs to filter for badges + + Parameters + ---------- + approved_prs : list + list of titles or list of dicts with 'title' as a key + + Returns + ------- + badges_by_type: dict + dict with keys as badge types, keys as input type + ''' + if type(approved_prs[0])==str: + badges_by_type = {btype:[title for title in approved_prs if btype in title.lower()] + for btype in badge_types} + else: + badges_by_type = {btype:[pr for pr in approved_prs if btype in pr['title'].lower()] + for btype in badge_types} + + badges_by_type.pop('prepare') + return badges_by_type + +def generate_report(approved_prs,): + titles_by_type = badges_by_type(approved_prs) + verified_by_type = '\n' + '\n'.join(['\n## '+bt + ' ('+str(len(bl)) +')' +'\n- '+'\n- '. join(bl) + for bt,bl in titles_by_type.items() if len(bl)>0 ]) + valid_badges = [vi for v in titles_by_type.values() for vi in v] + not_typed = [p for p in approved_prs if not(p in valid_badges) ] + + report_parts =[ '## all approved \n\n', + '- ' + '\n- '.join(approved_prs), + verified_by_type, + '\n\n## Approved, not badges', + '\n- ' + '\n - '.join(not_typed)] + + return '\n'.join(report_parts) + +field_parser = {'title':lambda t:t, + 'latestReviews': lambda lr: lr[-1], + 'createdAt':lambda d: d[:-1] + } + + + +def check_approved(pr, any=False): + ''' + check a pr dictionary for approval by default only approvers count + + Parameters + ---------- + pr : dict + single pr info from `gh pr list --json` + any: bool + if True consider any approavl + + Returns + ------- + approved : bool + True if approved by an eligible approver + ''' + if any: + return pr['latestReviews']['state'] == 'APPROVED' + else: + return (pr['latestReviews']['state'] == 'APPROVED' and + pr['latestReviews']['author']['login']in GH_APPROVERS) + + +filter_fx = {'approved': check_approved, + 'early':lambda pr: datetime.fromisoformat(pr['createdAt'])< EARLY_BIRD_DEADLINE, + 'good_title': lambda pr: is_title_gradeable(pr['title']), + 'bad_title': lambda pr: not(is_title_gradeable(pr['title'])) + } + +def process_pr_json(json_output,numbered=False, + titles_only=False, + filter_list = ['approved','good_title'], + custom_field_parser = {}, + custom_filters = {}, + filter_mode = 'filter'): + ''' + process gh cli json + + Parameters + ---------- + json_output : filename + dictionary generated from `gh pr list -s all --json title,latestReviews` and optionally + additional fields + numbered : bool + return with numbers as keys in dict + titles_only : bool + return titles as list + custom_field_parser : dict + dictionary with keys matching fields in the json result and values as functions + to apply to each feild before use, overwrites defaults in field_parser + custom_filters : dict + dictionary with keys as names, values that are boolean functions + filter_mode : string {'filter'} + filter to keep only ones that pass each filer or 'group' to group prs by which filter they pass + + + Returns + ------- + filtered_prs : iterable + list or dict as per value above if a flag is used or dict that is like the input json + ''' + # apply user updates + field_parser.update(custom_field_parser) + filter_fx.update(custom_filters) + filter_list.extend(list(custom_filters.keys())) + # with open(json_output, 'r') as f: + # PR_list = json.load(f) + + #filter for ones with nonempty reviews; + # process each feild according to spec or pass value if spec not defined + reviewed = [{k: field_parser.get(k,lambda mv: mv)(v) for k,v in pr.items()} + for pr in json_output if pr['latestReviews']] + # apply all filters and keep only the ones that pass all filters + if filter_mode =='filter': + # list of prs where all filters are true + filtered_prs = [pr for pr in reviewed + if npprod([filter_fx[f](pr) for f in filter_list])] + + if filter_mode == 'grouped': + # dict with keys per filter, value is list of prs that match that filter + filtered_prs = {f:[pr for pr in reviewed if filter_fx[f](pr)] + for f in filter_list} + + + # return version requested + if numbered: + return {pr['number']:pr['title'] for pr in filtered_prs} + + if titles_only: + return [pr['title'] for pr in filtered_prs] + + return filtered_prs + +block_template = ''' +# {type} +''' + + diff --git a/cspt/cli.py b/cspt/cli.py new file mode 100644 index 0000000..74d5d4f --- /dev/null +++ b/cspt/cli.py @@ -0,0 +1,366 @@ +import click +import json +import yaml +import os +from datetime import date +from .badges import badges_by_type, process_pr_json, generate_report,is_title_gradeable +from .activities import files_from_dict +from .notes import process_export,init_activity_files +from .sitetools import generate_csv_from_index +from .tasktracking import calculate_badge_date, fetch_to_checklist +from .config import EARLY_BIRD_DEADLINE + +from .lesson import Lesson + +@click.group() +def cspt_cli(): + pass +# -------------------------------------------------------------- +# Manage badges +# -------------------------------------------------------------- + + +@cspt_cli.command() +@click.option('--type', 'assignment_type', default=None, + help='type can be prepare, review, or practice') +@click.option('--prepare',is_flag=True) +@click.option('--review',is_flag=True) +@click.option('--practice',is_flag=True) +def getbadgedate(assignment_type=None,prepare=False,review=False,practice=False): + ''' + cli for calculate badge date + ''' + # set assignment date from flags if not passed + if not(assignment_type): + if prepare: + assignment_type='prepare' + + if review: + assignment_type ='review' + + if practice: + assignment_type='practice' + + click.echo(calculate_badge_date(assignment_type)) + + +@cspt_cli.command() +@click.option('--type', 'assignment_type', default='prepare', + help='type can be {prepare, review, or practice}; default prepare') +@click.option('--date', default=None, + help='date should be YYYY-MM-DD of the tasks you want; default most recently posted') + +def getassignment(date, assignment_type = 'prepare'): + ''' + get the assignment text formatted + + ''' + + if not(date): + date = calculate_badge_date(assignment_type) + + + md_activity = fetch_to_checklist(date, assignment_type) + click.echo( md_activity) + + +@cspt_cli.command() +@click.argument('passed_date') + +def parsedate(passed_date): + ''' + process select non dates + ''' + passed_date_clean = passed_date.strip().lower() + + if passed_date_clean == "today": + click.echo(date.today().isoformat()) + else: + click.echo(passed_date_clean) + + +# -------------------------------------------------------------- +# Site tools +# -------------------------------------------------------------- + + +@cspt_cli.command() +@click.argument('tldpath') + +def kwlcsv(tldpath = '.'): + ''' + generate the activity file csv file for the site building from site + located at the TLDPATH (directory of the top level of the course site) + ''' + activity_types = ['review','prepare','practice'] + ac_dir_list = [os.path.join(tldpath,'_'+ac_type) for ac_type in activity_types] + generate_csv_from_index(path_list = ac_dir_list,sub_re='.*\.md',file_out='kwl.csv') + + +# -------------------------------------------------------------- +# Course standing +# -------------------------------------------------------------- + + + +@cspt_cli.command() +@click.argument('json-output', type =click.File('r')) +@click.option('-f','--file-out',default=None, + help='to write to a file, otherwise will use stdout') +@click.option('-r','--report',is_flag=True, + help ='process approved badges by type to a more descriptive report') +@click.option('-s','--soft',is_flag= True, + help = 'soft check, skip title check') +def progressreport(json_output,file_out, report,soft): + ''' + list PR titles from json or - to use std in that have been approved by an official approver + + `gh pr list -s all --json title,latestReviews` + + + ''' + json_output = json.load(json_output) + + selected_filters = ['approved'] + if not(soft): + selected_filters.append('good_title') + + approved_prs = process_pr_json(json_output,titles_only=True,filter_list=selected_filters) + + if approved_prs: # not empty + if report: + report = generate_report(approved_prs) + else: + report = '\n'.join(approved_prs) + + + if file_out: + with open(file_out,'w') as f: + f.write(report) + else: + click.echo(report) + else: + click.echo('There are no approved badges') + + +@cspt_cli.command() +@click.option('-t','--pr-title', default = None, + help = 'title to check as string') +@click.option('-g','--ghpr',type =click.File('r'), + help = 'pass title as file, or gh pr view output through pipe') +def titlecheck(pr_title,ghpr): + ''' + check a single title + ''' + if not(ghpr) and not(pr_title): + click.echo('a title to test is required, see --help') + return + + if ghpr: + text_in = ghpr.read_lines() + # drop from the last # to end of the first line(there is probably a better way, to do this) + pr_title = ''.join(text_in[0].split('#')[:-1]) + + good, error_text = is_title_gradeable(pr_title,errortype=True) + + if good: + click.echo('good') + else: + click.echo(error_text) + + + + +@cspt_cli.command() +@click.argument('json-output', type =click.File('r'),) + +def prfixlist(json_output,): + ''' + check json output for titles that will not be counted as a badge + this will include `gh pr list -s all --json title,latestReviews` + + ''' + gh_dict = json.load(json_output) + + nums_avail = 'number' in gh_dict[0].keys() + + bad_pr_titles = process_pr_json(gh_dict,numbered=nums_avail, + titles_only=True, + filter_list = ['bad_title'], + custom_field_parser = {}, + custom_filters = {}, + filter_mode = 'filter') + + # do nothing if there are none to fix + if bad_pr_titles: + if nums_avail: + # make list of strings + bad_pr_titles = [str(k) + ' ' + v for k,v in bad_pr_titles.items()] + + click.echo('\n'.join(bad_pr_titles)) + + + + + + +@cspt_cli.command() +@click.argument('gh-cli-output', type =click.File('r')) +@click.option('-m','--message', + help ='messsage to prepend to output') +def mkchecklist(gh_cli_output,message): + ''' + transform input file to a gh markdown checklist, if the first + characters of eaech line are numbers, make them links + ''' + gh_cli_list = gh_cli_output.read().strip().split('\n') + + gh_cli_list = [ghline.strip() for ghline in gh_cli_list] + + if gh_cli_list[0][0] in '0123456789': + joiner = '\n- [ ] #' + else: + joiner = '\n- [ ] ' + + checklist = joiner.join(['']+gh_cli_list) + if message: + out_message = message + '\n\n' + checklist + else: + out_message = checklist + click.echo(out_message) + + +@cspt_cli.command() +@click.argument('json-output', type =click.File('r')) +def earlybonus(json_output): + ''' + check if early bonus is met from output of + `gh pr list -s all --json title,latestReviews,createdAt` and return + a message. input from either a file or -for stdin + + ''' + json_output = json.load(json_output) + approved_submitted_early = process_pr_json(json_output,titles_only=True, + filter_list= ['approved','early']) + + if approved_submitted_early: + eligble_by_type = badges_by_type(approved_submitted_early) + + earned = len(eligble_by_type['review']) + len(eligble_by_type['practice']) >=6 + + earned_text = {True:'was',False:'was not'} + message = 'early bird bonus ' + earned_text[earned] + ' earned' + click.echo(message) + else: + + click.echo('there were no approved early badges') + +# -------------------------------------------------------------- +# Instructor commands +# -------------------------------------------------------------- + + +@cspt_cli.command() +@click.argument('lesson-file',type=click.File('r')) + +def exportprismia(lesson_file): + ''' + export prismia version of the content + ''' + lesson = Lesson(lesson_file.read()) + prismia_text = lesson.get_prismia() + click.echo(prismia_text) + + + +@cspt_cli.command +@click.argument('source-yaml',) +@click.option('-n','--add-newline',is_flag=True,default=False, + help='add new line as last character') + +def createtoyfiles(source_yaml,add_newline): + ''' + from a yaml source file create a set of toy files with file names as the keys + and the values as the content of each file + + ''' + # TODO: check file type and use different readers to accepts files other than yaml + # read file + files_to_create = yaml.safe_load(source_yaml) + + # call creator + files_from_dict(files_to_create,add_newline) + + +@cspt_cli.command() +@click.argument('lesson-file',type=click.File('r')) + +def exporthandout(lesson_file): + ''' + export prismia version of the content + ''' + lesson = Lesson(lesson_file.read()) + handout_text = lesson.get_handout() + click.echo(handout_text) + + +@cspt_cli.command() +@click.argument('lesson-file',type=click.File('r')) +@click.option('-d','--ac-date',default=None, + help = 'date to use for writing file out') +@click.option('-p','--path',default='.', + help= 'base path of where to save ac date files into subfolders') +@click.option('--prepare', is_flag=True, + help= 'do prepare (otherwise do review & practice)') + +def exportac(lesson_file,ac_date,path,prepare): + ''' + export ac files for site from lesson + ''' + # read in content + lesson = Lesson(lesson_file.read()) + + # process date + if not(ac_date): + if prepare: + ac_date = calculate_badge_date(assignment_type='prepare',today=date.today()) + else: + ac_date = calculate_badge_date(assignment_type='pracice',today=date.today()) + + # cannot do all 3 in one because of source; if about to post badges, + # reveiw & practice are from current date and prepare is from next + if prepare: + lesson.create_ac_file('prepare', + ac_date,base_site_path=path) + else: + lesson.create_ac_file('review', + ac_date,base_site_path=path) + lesson.create_ac_file('practice', + ac_date,base_site_path=path) + +@cspt_cli.command() +@click.option('-d','--date-in', + help='date part of filename to read in') +@click.option('-p','--base-path', default = '.', + help='path that contains the notes folder') + +def processexport(date_in = None,base_path = '.'): + ''' + transform output from mac terminal export to myst notebook + (relies on regex specifically to brownsarahm) + ''' + if not(date_in): + date_in =date.today().isoformat() + + notes_file = os.path.join(base_path,'notes',date_in+'.md') + with open(notes_file,'r') as f: + export = f.read() + + notes_template = process_export(export,date_in) + + with open(notes_file,'w') as f: + f.write(notes_template) + + init_activity_files(base_path,date_in) + + \ No newline at end of file diff --git a/cspt/config.py b/cspt/config.py new file mode 100644 index 0000000..41b7d5a --- /dev/null +++ b/cspt/config.py @@ -0,0 +1,7 @@ +from datetime import datetime +# UPDATE: update this each semester +REPO = 'http://compsys-progtools.github.io/spring2024/' +BASE_URL = 'https://raw.githubusercontent.com/compsys-progtools/spring2024/main/_' + +GH_APPROVERS = ['brownsarahm','trevmoy','yussif-issah','marcinpawlukiewicz'] +EARLY_BIRD_DEADLINE= datetime.fromisoformat('2024-02-19') diff --git a/cspt/inclass.py b/cspt/inclass.py new file mode 100644 index 0000000..7d33149 --- /dev/null +++ b/cspt/inclass.py @@ -0,0 +1,7 @@ + +# TODO: assuming the Lesson.codeblocks output is hosted in the site repo +# (url from config) and each block has an identifier, write a function that +# gets a block from id as a hint +# world use case: to help get caught up/ ease typing +# code use case: a CLI that uses click.echo to be able to pipe or redirect +# hints to a script \ No newline at end of file diff --git a/cspt/lesson.py b/cspt/lesson.py new file mode 100644 index 0000000..5e15b30 --- /dev/null +++ b/cspt/lesson.py @@ -0,0 +1,271 @@ +import yaml +import json +import re +import os + +DEFAULT_BLOCK_META = {"lesson_part": "main"} +# TODO: validate that lesson has pace questoin +# TODO: add assignment creation flags + +def wrap(s,marker,newline=False): + ''' + wrap s with marker (put marker before and after), join with newline optionally + + Parameters + ---------- + s : string + content to be wrapped + marker: string + what to wrap with + newline : bool + if true join with newling character, otherwise join with empty string + ''' + if newline: + joiner = '\n' + else: + joiner = '' + return joiner.join([marker ,s , marker]) + + +class Lesson(): + # TODO add processing + def __init__(self,text): + # pull out header marked by --- + _,header,body = text.split('---') + self.metadata = yaml.safe_load(header) + + # split at +++ + blocks_in = body.split('+++') + # make a collection of blocks + self.blocks = [Block(b) for b in blocks_in] + # activate + self.activities = [b for b in self.blocks if b.labels['lesson_part'] == 'activity'] + + def save(self,file_out): + ''' + write out lesson to file_out + + Parameters + ---------- + file_out : string or file buffer + where to write file + ''' + text_out = '---\n' + # yamlify metadata + text_out += yaml.dump(self.metadata) + text_out += '\n---\n' + # get blocks in output version + text_out += '\n\n+++'.join(['']+[b.export() for b in self.blocks ]) + + with open(file_out,'w') as f: + f.write(text_out) + + + def get_prismia(self): + ''' + get prismia version + + ''' + prismia_subset = self.filter_blocks('lesson_part',['prismia','main']) + # format each block + prismia_blocks = [b.get_prismia() for b in prismia_subset] + return '\n\n---\n\n'.join(prismia_blocks) + + def get_site(self): + # process each block + site_blocks = [b.get_site() for b in self.blocks] + # drop empty + site_blocks = [b for b in site_blocks if len(b)>0] + return '+++'.join(site_blocks) + + def get_handout(self): + ''' + produce a handout that serves as an outline for students to follow along + ''' + # TODO: add key points? + # process each block + headings = [h for b in self.blocks for h in b.get_handout()] + # drop empty + headings = [b for b in headings if len(b)>0] + return '\n\n'.join(headings) + + def get_codeblocks(self): + ''' + produce hint sheet that is only the code blocks with an identifier + ''' + # filter for them + + + def create_ac_file(self,ac_type, + file_out,base_site_path): + ''' + extract activity instructions from lesson plan and write to file + + Parameters + ---------- + ac_type : str + prepare, practice, or review + base_site_path : str or path + site path + file_out : str + file nam to sace the file + ''' + + # find blocks for ac + + ac_text = ''.join([b.body for b in self.activities + if b.labels['ac_type'] == ac_type]) + + path_out = os.path.join(base_site_path, '_'+ac_type, file_out +'.md') + with open(path_out,'w') as f: + f.write(ac_text) + + + def filter_blocks(self,label,values): + ''' + filter blocks for ones that have value for the label + + Parameters + ---------- + label : string + label that is one the of keys in block metadata + value: string + value to filter for + ''' + subset_blocks = [b for b in self.blocks + if b.is_labeled(label,values) ] + return subset_blocks + + +class Block(): + def __init__(self,text): + ''' + ''' + # take first line + first_linebreak = text.find('\n') + meta_candidate = text[:first_linebreak] + # empty string will eval to false + if meta_candidate and meta_candidate[0] =='{': + try: + self.labels = json.loads(meta_candidate) + self.body = text[first_linebreak:].strip() + except: + # TODO: clean this up, make it process better + print(meta_candidate) + self.labels = json.loads(meta_candidate) + + else: + self.labels = DEFAULT_BLOCK_META + self.body = text.strip() + + def export(self): + ''' + export back to srting + ''' + text_out = json.dumps(self.labels) + text_out += '\n\n' + self.body + return text_out + + def add_label(self,label,value): + ''' + add an additional key value pair, label:value + to the labels for the block + + Parameters + ---------- + label : string + key to add to label attribute of block + value : string or string-like + value to store + ''' + self.labels.update({label:value}) + + def add_labels(self,label_dict): + ''' + add an additional key value pair, label:value + to the labels for the block + + Parameters + ---------- + label_dict : dict + key, value pairs to add + ''' + self.labels.update(label_dict) + + def is_labeled(self,label,values): + ''' + does this block have any of values in the label field + + Parameters + ---------- + label : string + field to search for + values : string or list + value(s) to check for. + ''' + if type(values) == str: + return self.labels[label] == values + elif type(values) == list: + return self.labels[label] in values + + def get_prismia(self): + ''' + return this block formatted for prisma, + stripping admonitions and empty after filter + ''' + body_prismia = '' + # re.search('```{.*}', b).group().replace('`','').strip('{}') + code_blocks = ['code-cell', 'code-block'] + last_end = 0 + + literal_blocks = re.finditer('```{(?P.*)}', self.body) + for m_lit in literal_blocks: + # parse match and find end + block_start,content_start = m_lit.span() + # end is next ``` + content_end = self.body[content_start:].find('```') + + # add text before the literal block to output + body_prismia += self.body[last_end:block_start] + # extract literal block content + literal_content = self.body[content_start:content_end] + block_type = m_lit.group('blocktype').replace('`','').strip('{}') + if block_type in code_blocks: + # keep ```, drop markup + body_prismia += wrap(literal_content,'```',newline=True) + else: + # add only content with type in bold + body_prismia += wrap(block_type,'**') + body_prismia += literal_content + + # update new start + last_end = content_end +3 + + #add any remaining content after matches + body_prismia += self.body[last_end:] + + return body_prismia + + def get_site(self): + ''' + return the version of this block for the site, + or empty if not the right lesson part + ''' + if self.labels['lesson_part'] in ['site','main']: + # strip ``` for anything not code + body_notes = self.body.relace('```','') + # strip code cell meta data + # return null if meta is notes only + else: + body_notes = '' + # drop mcq + # + return body_notes + + def get_handout(self): + ''' + return handout version (only heading lines) + ''' + lines = self.body.split('\n') + headings = [l for l in lines if l[0] =='#' ] + return headings \ No newline at end of file diff --git a/cspt/notes.py b/cspt/notes.py new file mode 100644 index 0000000..95905f8 --- /dev/null +++ b/cspt/notes.py @@ -0,0 +1,107 @@ +import re +import os +from datetime import date +import click +from .tasktracking import calculate_badge_date + + +activitypage_entry = ''' +## {date} + +[related notes](../notes/{date}) + +Activities: +```{include} ../{ac}/{date}.md +```''' + +header = ''' +--- +file_format: mystnb +kernelspec: + name: python3 +--- +''' + +badge_string = ("\n\n## Prepare for Next Class \n\n"+ + "```{{include}} ../_prepare/{prepdate}.md\n```\n\n" + + "## Badges\n\n"+ + '`````{{tab-set}}\n'+ + '````{{tab-item}} Review\n' + + "```{{include}} ../_review/{date}.md\n```\n\n````\n\n" + + '````{{tab-item}} Practice\n' + + "```{{include}} ../_practice/{date}.md\n```\n\n````\n`````") + + +footer_string = ('\n\n## Experience Report Evidence' + + "\n\n## Questions After Today's Class ") + +activity_dirs = ['_prepare','_review','_practice'] + +def process_export(export,date_in): + ''' + ''' + + # transform export to notes base + md_notes_starter = re.sub(r'\(base\) brownsarahm.*\$ (?P.*\n)', + '```\n\n+++{"lesson_part": "main"}\n\n```{code-cell} bash\n:tags: ["skip-execution"]\n\g```\n\n+++{"lesson_part": "main","type":"output"}\n\n```{code-block} console\n',export) + + # TODO: rm extra parts at top and bottom + prepdate = calculate_badge_date('prepare',date_in) + date_activity = badge_string.format(date=date_in,prepdate=prepdate) + + note_parts = [header,md_notes_starter,date_activity,footer_string] + notes = '\n\n'.join(note_parts) + return notes + +def init_activity_files(base_path,date_in): + ''' + create blanks and link + ''' + + for ac_dir in activity_dirs: + ac = ac_dir[1:] + badge_date = calculate_badge_date(ac,date_in) + # create blank ac file + with open(os.path.join(base_path,ac_dir,badge_date+'.md'),'w') as f: + f.write('```{index} \n```') + + + # add to summary page + with open(os.path.join(base_path,'activities',ac_dir[1:]+'.md'),'a') as f: + f.write(activitypage_entry.format(date=badge_date,ac= ac_dir, + include='{include}')) + +# +# ac_fix = '\n\n'.join([activitypage_entry.format(date=date_in,ac= ac, +# include='{include}') for ac in activities +# for date_in in date_list +# ]) +# print(ac_fix) + + + +def link_activities(): + ''' + Append activities via include to a file for all lessons + + FIXME... incomplete/possibly broken + ''' + lesson_files = os.listdir('lessons') + + + activity_string = ("\n\n## Prepare for this class \n\n"+ + "```{include} ../_soln_prepare/{filename}\n```\n\n" + + "## Badges\n\n"+ + '`````{{tab-set}}\n'+ + '````{{tab-item}} Review\n' + + "```{include} ../_soln_review/{filename}\n```\n\n````\n\n" + + '````{{tab-item}} Practice\n' + + "```{include} ../_soln_practice/{filename}\n```\n\n") + + for lesson in lesson_files: + date_activity = activity_string.format(filename=lesson,) + # include='{include}', + + + with open(os.path.join('lessons',lesson),'a') as f: + f.write(date_activity) \ No newline at end of file diff --git a/cspt/prep.py b/cspt/prep.py new file mode 100644 index 0000000..de9b442 --- /dev/null +++ b/cspt/prep.py @@ -0,0 +1,116 @@ +import click + +# Prep/ manage requirements + +# super-set copy for lesson design +# strip to prepare for prismia +# boxes -> bold?/skip +# do x. do y. +# questions (esp mcq) in +# append AC in single file +# strip to prepare for notes +# interpretation; context, sentences vs do x +# myst supported +# no MCQ/ +# maybe some try it yourself checks +# separate ac to separate pieces +# prepare terminal export, (exists) +# (stretch) combine terminal export with prepared notes +# handle ac/ prepare dates + + +# superset should be in an nb format +# ---------------------------------------------------------------- +# support functions +# ---------------------------------------------------------------- + + +def load_lesson(file_path,sep = '---'): + ''' + load a lesson file + ''' + + with open(file_path,'r') as f: + text_in = f.read() + + blocks = text_in.split(sep) + + + + +def swap_separators(file_string): + ''' + + ''' + + + +# ---------------------------------------------------------------- +# CLI +# ---------------------------------------------------------------- + +@click.command() +@click.argument('filename') +@click.option('-all', is_flag=True, default=False) + +def strip_solutions(filename,): + ''' + strip solutions from activity file and echo result + + parameter + --------- + file: path + ''' + + + stripped = process_ac_file(filename,process_like ='strip',) + click.echo(stripped) + + +@click.command() +@click.option('-f','--file_in') +def post_ac(file_in,date_out,site_base,gradebook_base): + out_file_name = date_out+'.md' + # load the file + + + # strip and write to ac file in stie + with open(os.path.join(site_base,),'r') as f: + soln_ac_text = f.readlines() + # write to solution folder + + +def parse_soln_file(filename,strip=True, ): + ''' + strip solutions from activity file and echo result + + parameter + --------- + file: path + ''' + if not('.' in filename): + filename += '.md' + + with open(filename,'r') as f: + soln_ac_text = f.readlines() + + if strip: + soln_ac_text.replace('+++','') + new_verion = ''.join([t for t in soln_ac_text if not(t[0]=='>')]) + + return new_verion + + +def parse_lesson_file(file_in, format_out = 'prismia'): + # load lesson + with open(file_in, 'r') as f: + lesson_text = f.read() + + # read in to useful data stucture + + # for prismia convert output with --- + + + + + diff --git a/cspt/sitetools.py b/cspt/sitetools.py new file mode 100644 index 0000000..47d34e2 --- /dev/null +++ b/cspt/sitetools.py @@ -0,0 +1,65 @@ +import os +import pandas as pd +import re + + + +def generate_csv_from_index(path_list,sub_re,file_out=None, + path_meaning = {'dir':'type','file':'date','result':'file'}, + dir_cleaner = lambda s:s.strip('_.'), + file_cleaner = lambda f:f.split('.')[0]): + ''' + parse a set of files for `{index}` elements + + Parameters + ---------- + path_list : list of strings or buffers + paths to search + sub_re : string + regex excerpt to be search for literal matches of after searchign for {index} + file_out : string or buffer + path to write file, default none, returns the dataFrame + path_meaning : dict + dict with keys [dir,file,result] and values to be used as column names in output + dir_cleaner : function + how to clean dir names for use in final result (default strips '._') + file_cleaner : function + how to clean file names (default drops . to end) + + + ''' + all_file_df_list = [] + # iterate types + file_list = [(p,f) for p in path_list for f in os.listdir(p) ] + + # iterate dates within type + for dir,file in file_list: + file_clean = file_cleaner(file) + dir_clean = dir_cleaner(dir) + + cur_file_path = os.path.join(dir,file) + with open(cur_file_path,'r') as f: + filetext = f.read() + + + complete_re = '{index}`'+sub_re+'` ' + # the "first" result will be the only one. + # TODO check that this is true + # first 8 characters & last 2 are not the file name + # iterate the regex results, make list of list for df + result_list = [[file_clean, a[0][8:-2], dir_clean ] + for a in re.finditer(complete_re, filetext)] + + all_file_df_list.append(pd.DataFrame(result_list, + columns = [path_meaning['dir'], + path_meaning['result'], + path_meaning['type']])) + # combine + all_file_df = pd.concat(all_file_df_list) + + if file_out: + all_file_df.to_csv(file_out,index=False) + return True + else: + return all_file_df + diff --git a/tasktracking.py b/cspt/tasktracking.py similarity index 62% rename from tasktracking.py rename to cspt/tasktracking.py index b767fad..5746860 100644 --- a/tasktracking.py +++ b/cspt/tasktracking.py @@ -4,8 +4,7 @@ from datetime import datetime as dtt from datetime import timedelta import re -# UPDATE: update this each semester -base_url = 'https://raw.githubusercontent.com/compsys-progtools/spring2024/main/_' +from .config import BASE_URL cur_days_off = [(dt(2024,3,10),dt(2024,3,16)), (dt(2024,2,19))] @@ -74,6 +73,10 @@ def calculate_badge_date(assignment_type,today=None): # if auto in the morning use past if dtt.today().hour < 12: today -= timedelta(days=1) + + # make date object from string + if type(today)==str: + today = dt.fromisoformat(today) last_class = today- day_adj[today.weekday()] # @@ -90,64 +93,6 @@ def calculate_badge_date(assignment_type,today=None): # return badge_date -@click.command() -@click.option('--type', 'assignment_type', default=None, - help='type can be prepare, review, or practice') -@click.option('--prepare',is_flag=True) -@click.option('--review',is_flag=True) -@click.option('--practice',is_flag=True) -def get_badge_date(assignment_type=None,prepare=False,review=False,practice=False): - ''' - cli for calculate badge date - ''' - # set assignment date from flags if not passed - if not(assignment_type): - if prepare: - assignment_type='prepare' - - if review: - assignment_type ='review' - - if practice: - assignment_type='practice' - - click.echo(calculate_badge_date(assignment_type)) - - -@click.command() -@click.argument('passed_date') - -def parse_date(passed_date): - ''' - process select non dates - ''' - passed_date_clean = passed_date.strip().lower() - - if passed_date_clean == "today": - click.echo(dt.today().isoformat()) - else: - click.echo(passed_date_clean) - - - -@click.command() -@click.option('--type', 'assignment_type', default='prepare', - help='type can be prepare, review, or practice') -@click.option('--date', default=None, - help='date should be YYYY-MM-DD of the tasks you want') - -def get_assignment(date, assignment_type = 'prepare'): - ''' - get the assignment text formatted - (CLI entrypoint) - ''' - - if not(date): - date = calculate_badge_date(assignment_type) - - - md_activity = fetch_to_checklist(date, assignment_type) - click.echo( md_activity) @@ -169,7 +114,7 @@ def fetch_to_checklist(date, assignment_type = 'prepare'): ''' - path = base_url +assignment_type + '/' + date +'.md' + path = BASE_URL +assignment_type + '/' + date +'.md' # get and convert to checklist from enumerated fetched_instructions = requests.get(path).text check_list = re.sub('[0-9]\. ', '- [ ] ', fetched_instructions) @@ -181,17 +126,4 @@ def fetch_to_checklist(date, assignment_type = 'prepare'): return cleaned_lists -@click.command() -@click.option('--type', 'assignment_type', default='prepare', - help='type can be prepare, review, or practice') -@click.option('--date', default=None) -def get_all(date): - ''' - ''' - type_list = ['prepare','review','practice'] - activities = [] - for assignment_type in type_list: - try: - activities.append(get_assignment(date,assignment_type)) - except: - print('no ' + assignment_type + ' currently posted for this date') + diff --git a/docs/api.md b/docs/api.md new file mode 100644 index 0000000..d1326a3 --- /dev/null +++ b/docs/api.md @@ -0,0 +1,53 @@ +# API + +The primary way to use `courseutils` is through the CLI as outlined through, but it can also be used as a Python library. + +This API is also useful for extending the CLI. Many of these functions offer flexible processing of basic information that could be used to create additional CLI functionality beyond what is already implemented. + +```{warning} +some functions are incomplete +``` + +## Badges + +```{eval-rst} +.. automodule:: cspt.badges + :members: +``` + +## Task Tracking + +```{eval-rst} +.. automodule:: cspt.tasktracking + :members: +``` + + +## Notes + +```{eval-rst} +.. automodule:: cspt.notes + :members: +``` + + +## Prep + +```{eval-rst} +.. automodule:: cspt.prep + :members: +``` + + +## Lesson Objects + +```{eval-rst} +.. autoclass:: cspt.lesson.Lesson + :members: +``` + + +```{eval-rst} +.. autoclass:: cspt.lesson.Block + :members: +``` \ No newline at end of file diff --git a/docs/cli.md b/docs/cli.md new file mode 100644 index 0000000..f71974d --- /dev/null +++ b/docs/cli.md @@ -0,0 +1,18 @@ +# CLI + +For each command the gray box shows the usage. + +These are documented according to bash/unix convention. +- the `[]` do not get used, they indicate that there might be more than one item in that position +- `[OPTIONS]` refers to any optional inputs or options +- `[ARGS]` refers to required inputs, or arguments + + + +```{eval-rst} +.. click:: cspt.cli:cspt_cli + :prog: cspt + :nested: full + :commands: + +``` \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index ab1dd64..08c3801 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,7 +1,7 @@ # -- Project information ----------------------------------------------------- -project = 'Computer Systems and Programming Tools courseutils' -copyright = '2023, Sarah M Brown' +project = 'courseutils' +copyright = '2024, Sarah M Brown' author = 'Sarah M Brown ' @@ -18,7 +18,8 @@ extensions = [ "myst_nb", 'sphinx.ext.intersphinx', - "sphinx_panels", + "sphinx_design", + # "sphinx_panels", 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx_click' @@ -47,8 +48,17 @@ html_theme_options = { - "search_bar_text": "Search this site...", - "navbar_end": ["search-field.html"], + "show_nav_level": 2, + "header_links_before_dropdown": 6, + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/compsys-progtools/courseutils", + "icon": "fa-brands fa-github", + }], + "secondary_sidebar_items": { + "**/*": ["page-toc", "edit-this-page", "sourcelink"], + } } # html_favicon = "_static/favicon.ico" @@ -62,8 +72,9 @@ # html_extra_path = ["feed.xml"] # map pages to which sidebar they should have # "page_file_name": ["list.html", "of.html", "sidebar.html", "files.html"] -# html_sidebars = { -# "index": ["hello.html"], +html_sidebars = { + "*": [], + "**/*": ["sidebar-nav-bs",] # "about": ["hello.html"], # "publications": ["hello.html"], # "projects": ["hello.html"], @@ -72,7 +83,7 @@ # "news/**": ['postcard.html', 'recentposts.html', 'archives.html'], # "blog": ['tagcloud.html', 'archives.html'], # "blog/**": ['postcard.html', 'recentposts.html', 'archives.html'] -# } +} blog_title = "Blog " blog_path = "news" @@ -100,6 +111,7 @@ "smartquotes", "strikethrough", "substitution", + "design" # "tasklist", ] diff --git a/docs/examples/index.md b/docs/examples/index.md new file mode 100644 index 0000000..d610d40 --- /dev/null +++ b/docs/examples/index.md @@ -0,0 +1,13 @@ +# Examples + + +Examples show how to use the commands organized by different goals. This includes short "recipes" for common tasks. + + +```{toctree} +:caption: Example Sections +:glob: +:maxdepth: 2 + +* +``` \ No newline at end of file diff --git a/docs/examples/standing.md b/docs/examples/standing.md new file mode 100644 index 0000000..fb4a810 --- /dev/null +++ b/docs/examples/standing.md @@ -0,0 +1,86 @@ +# Standing + + +(progressreport)= +## Get a Progress Report + +First, get your PR info: +``` +gh pr list --state all --json title,latestReviews >> badges.json +``` + +Notes: +- `--state all` is important to get both open and closed PRs +- `--json` is requires and `title, latestReviews` are the two requied attributes to that parameter, you can add additional ones though and for some features, additional ones are requires. +- if you have more than 30 total PRs(PRs only, not issues) use the `--limit`/`-L` option with a number >= your total number of PRs. + +Then use `cspt progressreport` to check which have been approved by a valid approver and have a badge keyword in them. + +``` +cspt progressreport badges.json +``` + + Options allow you to control the format of the report. + +```{eval-rst} +.. click:: cspt.cli:progressreport + :prog: cspt progressreport + :nested: full + :commands: + +``` + +## Check PR Titles + +### Check a single PR + +``` +cspt titlecheck -t 'title I am thinking about' +``` + + +```{eval-rst} +.. click:: cspt.cli:titlecheck + :prog: cspt titlecheck + :nested: full + :commands: + +``` + + +### Make a list of PRs to fix +This can check which titles will work with the grading calculation functions. + +```{eval-rst} +.. click:: cspt.cli:prfixlist + :prog: cspt prfixlist + :nested: full + :commands: + +``` + +### What counts? +Under the hood, the majority of the checking is done by this function: + +```{eval-rst} +.. automodule:: cspt.badges + :members: is_title_gradeable + :no-index: +``` + +## Check if Early bonus is met + + +``` +gh pr list -s all --json title,latestReviews,createdAt | cspt earlybonus - +``` + + +```{eval-rst} +.. click:: cspt.cli:earlybonus + :prog: cspt earlybonus + :nested: full + :commands: + +``` + diff --git a/docs/examples/tasktracking.md b/docs/examples/tasktracking.md new file mode 100644 index 0000000..bbfd4ba --- /dev/null +++ b/docs/examples/tasktracking.md @@ -0,0 +1,62 @@ +# Get assignment instructions + +There are a set of commands that can be used to create the badge issues. + + +## Get Assignment + +Example use case: making a badge issue + +``` +cspt getassignment | gh issue create -t 'next prepare work' -F - +``` + +The `-F` option of [`gh issue create`](https://cli.github.com/manual/gh_issue_create) +allows specifying a file for the body of the issue, and `-` reads from stdin, or in this case the pipe. + +(makeupbadgeissue)= +### Create a badge issue for a specific date +Put badge instructions into the PR comment, while creating a PR from the current branch + +``` +cspt getassignment --type practice --date 2024-02-15 | gh pr create -t 'practice 2024-02-15' -F - +``` + + +### Use instructions in PR issue +Put badge instructions into the PR comment, while creating a PR from the current branch + +``` +cspt getassignment --type practice --date 2024-02-15 | gh pr create -t 'practice 2024-02-15' -F - +``` + +### Details + +```{eval-rst} +.. click:: cspt.cli:getassignment + :prog: cspt getassignment + :nested: full + :commands: + +``` + + + + +## Get the most recent badge date + +To get the date of the most recently posted badge of a given type, for +example when creating issues for the title use `cspt getbadgedate` + +``` +pretitle="prepare-"$(cspt getbadgedate --prepare) +cspt getassignment --type prepare | gh issue create --title $pretitle --label prepare --body-file - +``` + +```{eval-rst} +.. click:: cspt.cli:getbadgedate + :prog: cspt getbadgedate + :nested: full + :commands: + +``` \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 1a191f3..4a059e9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,27 +1,88 @@ - -# Welcome to Computer Systems and Programming Tools courseutils's documentation! +# Computer Systems and Programming Tools courseutils +These are a set of tools for managing tasks and managing the markdown lesson plans. -## Setup +````{grid} 3 +```{grid-item-card} Progress Reports +:link: progressreport +:link-type: ref +commands to generate progress reports +``` + +```{grid-item-card} Badge Instructions +:link: makeupbadgeissue +:link-type: ref + +how to make a badge issue for a specific date +``` + +```` + + + +## Install + +You can install after cloning to work locally or directly from github. + +### By clone + +You can clone first +``` +git clone https://github.com/compsys-progtools/courseutils.git +``` + +and then install ``` -git clone https://github.com/introcompsys/courseutils.git pip install courseutils ``` +(possibly `pip3`) + +if you clone in order to develop, you may want to install with pip's `-e` option + +``` +pip install -e courseutils +``` + +Then to update, pull and instll again. + + +### Direct install + +you can also install without cloning first with + +``` +pip install git+https://github.com/compsys-progtools/courseutils.git +``` + +Optionally, you can specify a branch to install, by default it installs main. + +To update in this case, use the same command + ## Usage +The main use is as a CLI, for a list of all commands see the +[CLI](cli.md) page. + +For use as a python library in component functions, see them in +[the python library](api.md) page. + + ```{toctree} :caption: Contents :maxdepth: 2 -tasktracking.md -standing.md +cli.md +examples/index.md +instructor/index.md +api.md ```