Skip to content

Commit

Permalink
add grading
Browse files Browse the repository at this point in the history
  • Loading branch information
brownsarahm committed Apr 10, 2024
1 parent 6cda94f commit 386cdb8
Show file tree
Hide file tree
Showing 4 changed files with 235 additions and 13 deletions.
14 changes: 12 additions & 2 deletions cspt/badges.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,19 +84,27 @@ def badges_by_type(approved_prs):
badges_by_type.pop('prepare')
return badges_by_type

def generate_report(approved_prs,):
def generate_report(approved_prs,short=False):
'''
format approved prs into a report
'''
titles_by_type = badges_by_type(approved_prs)
verified_by_type = '\n' + '\n'.join(['\n## '+bt + ' ('+str(len(bl)) +')' +'\n- '+'\n- '. join(bl)
for bt,bl in titles_by_type.items() if len(bl)>0 ])
valid_badges = [vi for v in titles_by_type.values() for vi in v]
not_typed = [p for p in approved_prs if not(p in valid_badges) ]

report_parts =[ '## all approved \n\n',
if short:
report_parts = [verified_by_type]
else:
report_parts =[ '## all approved \n\n',
'- ' + '\n- '.join(approved_prs),
verified_by_type,
'\n\n## Approved, not badges',
'\n- ' + '\n - '.join(not_typed)]



return '\n'.join(report_parts)

field_parser = {'title':lambda t:t,
Expand Down Expand Up @@ -178,6 +186,8 @@ def process_pr_json(json_output,numbered=False,
# process each feild according to spec or pass value if spec not defined
reviewed = [{k: field_parser.get(k,lambda mv: mv)(v) for k,v in pr.items()}
for pr in json_output if pr['latestReviews']]


# apply all filters and keep only the ones that pass all filters
if filter_mode =='filter':
# list of prs where all filters are true
Expand Down
89 changes: 78 additions & 11 deletions cspt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .notes import process_export,init_activity_files
from .sitetools import generate_csv_from_index
from .tasktracking import calculate_badge_date, fetch_to_checklist
from .grade_calculation import calculate_grade, community_apply
from .config import EARLY_BIRD_DEADLINE

from .lesson import Lesson
Expand Down Expand Up @@ -108,10 +109,12 @@ def kwlcsv(tldpath = '.'):
@click.option('-f','--file-out',default=None,
help='to write to a file, otherwise will use stdout')
@click.option('-r','--report',is_flag=True,
help ='process approved badges by type to a more descriptive report')
help ='process approved badges by type to a more descriptive report')
@click.option('-s','--soft',is_flag= True,
help = 'soft check, skip title check')
def progressreport(json_output,file_out, report,soft):
@click.option('-b','--brief',is_flag= True,
help = 'short version of report')
def progressreport(json_output,file_out, report,soft,brief):
'''
list PR titles from json or - to use std in that have been approved by an official approver
Expand All @@ -129,7 +132,7 @@ def progressreport(json_output,file_out, report,soft):

if approved_prs: # not empty
if report:
report = generate_report(approved_prs)
report = generate_report(approved_prs,brief)
else:
report = '\n'.join(approved_prs)

Expand All @@ -143,6 +146,35 @@ def progressreport(json_output,file_out, report,soft):
click.echo('There are no approved badges')


@cspt_cli.command()
@click.argument('json-output', type =click.File('r'))
@click.option('-s','--soft',is_flag= True,
help = 'soft check, skip title check')
def badgecounts(json_output,soft):
'''
check if early bonus is met from output of
`gh pr list -s all --json title,latestReviews,createdAt` and return
a message. input from either a file or -for stdin
'''
json_output = json.load(json_output)

selected_filters = ['approved']
if not(soft):
selected_filters.append('good_title')

approved_prs = process_pr_json(json_output,titles_only=True,filter_list=selected_filters)


if approved_prs:
eligble_by_type = badges_by_type(approved_prs)
count_by_type = {btype:len(badges) for btype,badges in eligble_by_type.items()}

#last character would be a newline, we do not want that so that we can append
click.echo(yaml.dump(count_by_type)[:-1])



@cspt_cli.command()
@click.option('-t','--pr-title', default = None,
help = 'title to check as string')
Expand Down Expand Up @@ -232,28 +264,63 @@ def mkchecklist(gh_cli_output,message):

@cspt_cli.command()
@click.argument('json-output', type =click.File('r'))
def earlybonus(json_output):
@click.option('-y','--output-yaml',is_flag = True,
help = 'output as yaml compatible with grading')
def earlybonus(json_output,output_yaml):
'''
check if early bonus is met from output of
`gh pr list -s all --json title,latestReviews,createdAt` and return
a message. input from either a file or -for stdin
'''
json_output = json.load(json_output)
approved_submitted_early = process_pr_json(json_output,titles_only=True,
filter_list= ['approved','early'])
filter_list = ['approved','early']
try:
approved_submitted_early = process_pr_json(json_output,titles_only=True,
filter_list= filter_list)
except KeyError as e:
msg = (str(e) +' is required to be in the JSON_OUTPUT from gh'+
' for one for the selected filters: '+ ' '.join(filter_list ))
# click.echo(msg)
raise click.UsageError(msg)


if approved_submitted_early:
eligble_by_type = badges_by_type(approved_submitted_early)

earned = len(eligble_by_type['review']) + len(eligble_by_type['practice']) >=6

earned_text = {True:'was',False:'was not'}
message = 'early bird bonus ' + earned_text[earned] + ' earned'
click.echo(message)
if output_yaml:
message = 'early: ' + str(int(earned))
else:
earned_text = {True:'was',False:'was not'}

message = 'early bird bonus ' + earned_text[earned] + ' earned'
else:

click.echo('there were no approved early badges')
if output_yaml:
message = 'early: 0'
else:
message = 'there were no approved early badges'

click.echo(message)


@cspt_cli.command()
@click.argument('badge_file', type =click.File('r'))
@click.option('-i','--influence',is_flag = True,
help = 'return numerical instead of letter')
def grade(badge_file, influence):
'''
calculate a grade from yaml that had keys of badges/bonuses and value for counts
'''
badges = yaml.safe_load(badge_file)

badges_comm_applied = community_apply(badges)

grade = calculate_grade(badges_comm_applied,influence)

click.echo(grade)

# --------------------------------------------------------------
# Instructor commands
Expand Down
69 changes: 69 additions & 0 deletions cspt/grade_calculation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
from .grade_constants import exp_thresh, community_cost
from .grade_constants import letter_df
from .grade_constants import bonus_criteria, weights, default_badges


def community_apply(student_dict):
'''
take dictionary of badge keys and apply community badges, converting to
others if applicable, then return the updated
Parameters
----------
badges_in : dict
dictionary with keys as badges/bonuses and counts in values
'''
# apply community badges if needed
if student_dict['experience'] < exp_thresh and student_dict['community']>0:
experience_needed = exp_thresh - student_dict['experience']
community_needed = experience_needed*community_cost['experience']
if student_dict['community'] >=community_needed:
student_dict['community'] -= community_needed
student_dict['experience'] += experience_needed

rp_sum = student_dict['review'] + student_dict['practice']
if rp_sum <18:
# doing this lazy instead of a loop
if student_dict['community'] >= community_cost['practice']:
student_dict['community'] -= community_cost['practice']
student_dict['practice'] += 1
if student_dict['community'] >= community_cost['practice']:
student_dict['community'] -= community_cost['practice']
student_dict['practice'] += 1
if student_dict['community'] >= community_cost['review']:
student_dict['community'] -= community_cost['review']
student_dict['review'] += 1
if student_dict['community'] >= community_cost['review']:
student_dict['community'] -= community_cost['review']
student_dict['review'] += 1
if student_dict['community'] >= community_cost['review']:
student_dict['community'] -= community_cost['review']
student_dict['review'] += 1

return student_dict


def calculate_grade(badges_in,return_influence=False):
'''
compute grade from dictionary
Parameters
----------
badges_in : dict
dictionary with keys as badges/bonuses and counts in values
return_influence : bool {False}
if true, return influence instead of letter
'''
current_badges = default_badges.copy()
current_badges.update(badges_in)
# apply bonuses
current_badges.update({bname:bfunc(current_badges) for bname,bfunc in bonus_criteria.items()})
# compute final
influence = sum([current_badges[k]*weights[k] for k in weights.keys()])

letter_grade = letter_df[letter_df['threshold']<=influence].iloc[-1].name.strip()

if return_influence:
return influence
else:
return letter_grade
76 changes: 76 additions & 0 deletions cspt/grade_constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import pandas as pd

exp_thresh = 22
rp_thresh =18
learning_weights = {'experience' :2, 'lab': 2, 'review': 3,'practice': 6,'explore': 9,'build' :36}
community_weights = {'experience_replace' :3, 'review_replace': 4,'practice_replace': 7, 'review_upgrade': 3,}
bonus_participation = 18
bonus_lab = 18
bonus_breadth = 32
bonus_early = 9

default_badges = {'experience' :0,
'lab': 0,
'review': 0,
'practice': 0,
'explore': 0,
'build' :0,
'community': 0,
'hack':0,
'unstuck': 0,
'descriptive': 0,
'early': 0,
'question':10 }

bonus_criteria = {'participation_bonus': lambda r: int(r['experience'] >=exp_thresh),
'lab_bonus': lambda r: int(r['lab'] >=13),
'breadth_bonus': lambda r: int(r['review'] + r['practice']>=rp_thresh),
'community_bonus': lambda r: int(r['community']>=10),
'unstuck_bonus': lambda r: r['unstuck'],
'descriptive_bonus': lambda r: r['descriptive'],
'early_bonus': lambda r: r['early'] ,
'hack_bonus': lambda r: r['hack'] ,
'curiosity_bonus': lambda r: r['question']>10}
bonus_values = {'participation_bonus': bonus_participation,
'lab_bonus': bonus_lab,
'breadth_bonus': bonus_breadth,
'community_bonus': 18,
'hack_bonus':18,
'unstuck_bonus': 9,
'descriptive_bonus': 9,
'early_bonus': 9 ,
'curiosity_bonus': 9 }
weights = learning_weights.copy()
weights.update(bonus_values)
community_cost = {'experience':3,
'review':4,
'practice':7,
'review_upgrade':3}

learning_df = pd.Series(learning_weights,name ='complexity').reset_index()
learning_df['badge_type'] = 'learning'

# nans are for learning badges which all ahve weight 1
influence_df = pd.concat([learning_df]).fillna(1).rename(columns={'index':'badge'})
# final df


# base grade influence cutoffs
thresh_mrw = {'D ':22*learning_weights['experience']+13*learning_weights['lab']+bonus_participation + bonus_lab,
'D+':22*learning_weights['experience']+13*learning_weights['lab']+bonus_participation + bonus_lab + 6*learning_weights['review'],
'C-':22*learning_weights['experience']+13*learning_weights['lab']+bonus_participation + bonus_lab + 12*learning_weights['review'],
'C ':22*learning_weights['experience']+13*learning_weights['lab']+18*learning_weights['review']+\
bonus_participation + bonus_lab + bonus_breadth,
'C+':22*learning_weights['experience']+13*learning_weights['lab']+bonus_participation + bonus_lab + bonus_breadth + 6*learning_weights['practice'] + 12*learning_weights['review'],
'B-':22*learning_weights['experience']+13*learning_weights['lab']+bonus_participation + bonus_lab + bonus_breadth + 6*learning_weights['review'] + 12*learning_weights['practice'],
'B ':22*learning_weights['experience']+13*learning_weights['lab']+18*learning_weights['practice']+\
bonus_participation + bonus_lab + bonus_breadth,
'B+': 22*learning_weights['experience']+13*learning_weights['lab'] +18*learning_weights['practice'] +\
2*learning_weights['explore'] +bonus_participation + bonus_lab + bonus_breadth,
'A-': 22*learning_weights['experience']+13*learning_weights['lab'] +18*learning_weights['practice'] +\
4*learning_weights['explore'] +bonus_participation + bonus_lab + bonus_breadth,
'A ': 22*learning_weights['experience']+13*learning_weights['lab'] +18*learning_weights['practice'] +\
6*learning_weights['explore'] +bonus_participation + bonus_lab + bonus_breadth}

th_list = [[k,v] for k,v in thresh_mrw.items()]
letter_df = pd.DataFrame(th_list, columns = ['letter','threshold']).sort_values(by='threshold').set_index('letter')

0 comments on commit 386cdb8

Please sign in to comment.