From 8e546e8a77c0990d087179f8e5caf319c76b10fb Mon Sep 17 00:00:00 2001 From: Harsh Vardhan Rai Date: Fri, 4 Feb 2022 16:23:17 +0000 Subject: [PATCH 01/25] Add files via upload --- examples/eng.py | 115 ++++++++++++++++++++++++++++++++++++++++++++ examples/french.py | 106 ++++++++++++++++++++++++++++++++++++++++ examples/german.py | 105 ++++++++++++++++++++++++++++++++++++++++ examples/ital.py | 106 ++++++++++++++++++++++++++++++++++++++++ examples/nl.py | 106 ++++++++++++++++++++++++++++++++++++++++ examples/spanish.py | 105 ++++++++++++++++++++++++++++++++++++++++ examples/swedish.py | 105 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 748 insertions(+) create mode 100644 examples/eng.py create mode 100644 examples/french.py create mode 100644 examples/german.py create mode 100644 examples/ital.py create mode 100644 examples/nl.py create mode 100644 examples/spanish.py create mode 100644 examples/swedish.py diff --git a/examples/eng.py b/examples/eng.py new file mode 100644 index 0000000..2193016 --- /dev/null +++ b/examples/eng.py @@ -0,0 +1,115 @@ + +import gensim.downloader as api +import numpy as np +from functools import reduce +import gensim.downloader as api +import os + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding = WordEmbeddings('en') +glove_keyed_vectors = glove_embedding.precomputed_word_embeddings +model2 = WordEmbeddingModel(glove_keyed_vectors, 'en') + +models = [model2] + +# Load the WEAT word sets +word_sets = load_weat() + +# Create gender queries +gender_query_1 = Query( + [word_sets["male_terms"], word_sets["female_terms"]], + [word_sets["career"], word_sets["family"]], + ["Male terms", "Female terms"], + ["Career", "Family"], +) + +gender_query_2 = Query( + [word_sets["male_terms"], word_sets["female_terms"]], + [word_sets["science"], word_sets["arts"]], + ["Male terms", "Female terms"], + ["Science", "Arts"], +) + +gender_query_3 = Query( + [word_sets["male_terms"], word_sets["female_terms"]], + [word_sets["math"], word_sets["arts_2"]], + ["Male terms", "Female terms"], + ["Math", "Arts2"], +) + + +gender_queries = [gender_query_1, gender_query_2, gender_query_3] + +weat = WEAT() + +WEAT_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results = run_queries( + RNSB, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results = run_queries( + RND, + gender_queries, + models, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results) +#plot_queries_results(RND_gender_results).show() +#RND_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + + diff --git a/examples/french.py b/examples/french.py new file mode 100644 index 0000000..ab55b74 --- /dev/null +++ b/examples/french.py @@ -0,0 +1,106 @@ +import gensim.downloader as api + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding_fr = WordEmbeddings('fr') +glove_keyed_vectors_fr = glove_embedding_fr.precomputed_word_embeddings +modelfr = WordEmbeddingModel(glove_keyed_vectors_fr, 'fr') + +models_fr = [modelfr] + +# create the word sets +target_setsfr = [['Masculin', 'homme', 'garçon', 'frère', 'il', 'lui', 'le sien', 'fils'], ['femelle', 'femme', 'fille', 'sœur', 'elle', 'sa', 'la sienne', 'la fille']] +target_sets_namesfr = ['Male Terms', 'Female Terms'] +attribute_setsfr = [['exécutif', 'la gestion', 'professionnel', 'société', 'un salaire', 'Bureau', 'Entreprise', 'carrière'], ['domicile', 'parents', 'enfants', 'famille', 'les cousins', 'mariage', 'mariage', 'es proches']] +attribute_sets_namesfr = ['career', 'family'] +# create the query +gender_query_fr1 = Query(target_setsfr, attribute_setsfr, target_sets_namesfr, attribute_sets_namesfr) + +# create the word sets +target_setsfr2 = [['Masculin', 'homme', 'garçon', 'frère', 'il', 'lui', 'le sien', 'fils'], ['femelle', 'femme', 'fille', 'sœur', 'elle', 'sa', 'la sienne', 'la fille']] +target_sets_namesfr2 = ['Male Terms', 'Female Terms'] +attribute_setsfr2 = [['science', 'La technologie', 'la physique', 'chimie', 'Einstein', 'NASA', 'expérience', 'astronomie'], ['poésie', 'de l\'art', 'Danse', 'Littérature', 'roman', 'symphonie', 'drame', 'sculpture']] +attribute_sets_namesfr2 = ['Science', 'Arts'] +# create the query +gender_query_fr2 = Query(target_setsfr2, attribute_setsfr2, target_sets_namesfr2, attribute_sets_namesfr2) + +# create the word sets +target_setsfr3 = [['Masculin', 'homme', 'garçon', 'frère', 'il', 'lui', 'le sien', 'fils'], ['femelle', 'femme', 'fille', 'sœur', 'elle', 'sa', 'la sienne', 'la fille']] +target_sets_namesfr3 = ['Male Terms', 'Female Terms'] +attribute_setsfr3 = [['math', 'algèbre', 'géométrie', 'calcul', 'équations', 'calcul', 'Nombres', 'une addition'], ['poésie', 'de l\'art', 'Shakespeare', 'Danse', 'Littérature', 'roman', 'symphonie', 'drame']] +attribute_sets_namesfr3 = ['Maths', 'Arts2'] +# create the query +gender_query_fr3 = Query(target_setsfr3, attribute_setsfr3, target_sets_namesfr3, attribute_sets_namesfr3) + +gender_queriesfr = [gender_query_fr1, gender_query_fr2, gender_query_fr3] + + +WEAT_gender_results_fr = run_queries( + WEAT, + gender_queriesfr, + models_fr, + lost_vocabulary_threshold=0.4, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results_fr) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results_fr.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results_fr = run_queries( + WEAT, + gender_queriesfr, + models_fr, + lost_vocabulary_threshold=0.4, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results_fr) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results_fr.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results_fr = run_queries( + RNSB, + gender_queriesfr, + models_fr, + lost_vocabulary_threshold=0.4, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results_fr) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results_fr.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results_fr = run_queries( + RND, + gender_queriesfr, + models_fr, + lost_vocabulary_threshold=0.4, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results_fr) +#plot_queries_results(RND_gender_results_fr).show() +#RND_gender_results_fr.to_csv('test.csv', mode='a', header=True, index=False) + diff --git a/examples/german.py b/examples/german.py new file mode 100644 index 0000000..019c992 --- /dev/null +++ b/examples/german.py @@ -0,0 +1,105 @@ +import gensim.downloader as api + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding = WordEmbeddings('de') +glove_keyed_vectors = glove_embedding.precomputed_word_embeddings +model1 = WordEmbeddingModel(glove_keyed_vectors, 'de') + +models = [model1] + +# create the word sets +target_sets1 = [['männlich', 'Mann', 'Junge', 'Bruder', 'er', 'ihm', 'seine', 'Sohn'], ['weiblich', 'Frau', 'Mädchen', 'Schwester', 'Sie', 'Sie', 'ihres', 'Tochter']] +target_sets_names1 = ['Male Terms', 'Female Terms'] +attribute_sets1 = [['Führungskraft', 'Verwaltung', 'Fachmann', 'Konzern', 'Gehalt', 'Büro', 'Unternehmen', 'Werdegang'], ['Heimat', 'Eltern', 'Kinder', 'Familie', 'Cousinen', 'Hochzeit', 'Hochzeit', 'Verwandten']] +attribute_sets_names1 = ['career', 'family'] +# create the query +gender_query_1 = Query(target_sets1, attribute_sets1, target_sets_names1, attribute_sets_names1) + +# create the word sets +target_sets2 = [['männlich', 'Mann', 'Junge', 'Bruder', 'er', 'ihm', 'seine', 'Sohn'], ['weiblich', 'Frau', 'Mädchen', 'Schwester', 'Sie', 'Sie', 'ihres', 'Tochter']] +target_sets_names2 = ['Male Terms', 'Female Terms'] +attribute_sets2 = [['Wissenschaft', 'Technologie', 'Physik', 'Chemie', 'Einstein', 'NASA', 'Experiment', 'Astronomie'], ['Poesie', 'Kunst', 'tanzen', 'Literatur', 'Roman', 'Symphonie', 'Theater', 'Skulptur']] +attribute_sets_names2 = ['Science', 'Arts'] +# create the query +gender_query_2 = Query(target_sets2, attribute_sets2, target_sets_names2, attribute_sets_names2) + +# create the word sets +target_sets3 = [['männlich', 'Mann', 'Junge', 'Bruder', 'er', 'ihm', 'seine', 'Sohn'], ['weiblich', 'Frau', 'Mädchen', 'Schwester', 'Sie', 'Sie', 'ihres', 'Tochter']] +target_sets_names3 = ['Male Terms', 'Female Terms'] +attribute_sets3 = [['Mathematik', 'Algebra', 'Geometrie', 'Infinitesimalrechnung', 'Gleichungen', 'Berechnung', 'Zahlen', 'Zusatz'], ['Poesie', 'Kunst', 'Shakespeare', 'tanzen', 'Literatur', 'Roman', 'Symphonie', 'Theater']] +attribute_sets_names3 = ['Maths', 'Arts2'] +# create the query +gender_query_3 = Query(target_sets3, attribute_sets3, target_sets_names3, attribute_sets_names3) + +gender_queries = [gender_query_1, gender_query_2, gender_query_3] + + +WEAT_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results = run_queries( + RNSB, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results = run_queries( + RND, + gender_queries, + models, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results) +#plot_queries_results(RND_gender_results).show() +#RND_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + diff --git a/examples/ital.py b/examples/ital.py new file mode 100644 index 0000000..efb119d --- /dev/null +++ b/examples/ital.py @@ -0,0 +1,106 @@ +import gensim.downloader as api + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding = WordEmbeddings('it') +glove_keyed_vectors = glove_embedding.precomputed_word_embeddings +model1 = WordEmbeddingModel(glove_keyed_vectors, 'it') + +models = [model1] + +# create the word sets +target_sets1 = [['maschio', 'uomo', 'ragazzo', 'fratello', 'lui', 'lui', 'il suo', 'figlio'], ['femmina', 'donna', 'ragazza', 'sorella', 'lei', 'sua', 'la sua', 'figlia']] +target_sets_names1 = ['Male Terms', 'Female Terms'] +attribute_sets1 = [['esecutivo', 'gestione', 'professionale', 'società', 'stipendio', 'ufficio', 'attività commerciale', 'carriera'], ['casa', 'genitori', 'figli', 'famiglia', 'cugini', 'matrimonio', 'nozze', 'parenti']] +attribute_sets_names1 = ['career', 'family'] +# create the query +gender_query_1 = Query(target_sets1, attribute_sets1, target_sets_names1, attribute_sets_names1) + +# create the word sets +target_sets2 = [['maschio', 'uomo', 'ragazzo', 'fratello', 'lui', 'lui', 'il suo', 'figlio'], ['femmina', 'donna', 'ragazza', 'sorella', 'lei', 'sua', 'la sua', 'figlia']] +target_sets_names2 = ['Male Terms', 'Female Terms'] +attribute_sets2 = [['scienza', 'tecnologia', 'fisica', 'chimica', 'Einstein', 'NASA', 'sperimentare', 'astronomia'], ['poesia', 'arte', 'danza', 'letteratura', 'romanzo', 'sinfonia', 'Dramma', 'scultura']] +attribute_sets_names2 = ['Science', 'Arts'] +# create the query +gender_query_2 = Query(target_sets2, attribute_sets2, target_sets_names2, attribute_sets_names2) + +# create the word sets +target_sets3 = [['maschio', 'uomo', 'ragazzo', 'fratello', 'lui', 'lui', 'il suo', 'figlio'], ['femmina', 'donna', 'ragazza', 'sorella', 'lei', 'sua', 'la sua', 'figlia']] +target_sets_names3 = ['Male Terms', 'Female Terms'] +attribute_sets3 = [['matematica', 'algebra', 'geometria', 'calcolo', 'equazioni', 'calcolo', 'numeri', 'aggiunta'], ['poesia', 'arte', 'Shakespeare', 'danza', 'letteratura', 'romanzo', 'sinfonia', 'Dramma']] +attribute_sets_names3 = ['Maths', 'Arts2'] +# create the query +gender_query_3 = Query(target_sets3, attribute_sets3, target_sets_names3, attribute_sets_names3) + +gender_queries = [gender_query_1, gender_query_2, gender_query_3] + + +WEAT_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results = run_queries( + RNSB, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results = run_queries( + RND, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results) +#plot_queries_results(RND_gender_results).show() +#RND_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + diff --git a/examples/nl.py b/examples/nl.py new file mode 100644 index 0000000..4d5d7f5 --- /dev/null +++ b/examples/nl.py @@ -0,0 +1,106 @@ +import gensim.downloader as api + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding = WordEmbeddings('nl') +glove_keyed_vectors = glove_embedding.precomputed_word_embeddings +model1 = WordEmbeddingModel(glove_keyed_vectors, 'nl') + +models = [model1] + +# create the word sets +target_sets1 = [['mannelijk', 'Mens', 'jongen', 'broer', 'hij', 'hem', 'zijn', 'zoon'], ['vrouwelijk', 'vrouw', 'meisje', 'zus', 'ze', 'haar', 'haar', 'dochter']] +target_sets_names1 = ['Male Terms', 'Female Terms'] +attribute_sets1 = [['leidinggevend', 'beheer', 'professioneel', 'bedrijf', 'salaris', 'kantoor', 'bedrijf', 'carrière'], ['huis', 'ouders', 'kinderen', 'familie', 'nichten en neven', 'huwelijk', 'bruiloft', 'familieleden']] +attribute_sets_names1 = ['career', 'family'] +# create the query +gender_query_1 = Query(target_sets1, attribute_sets1, target_sets_names1, attribute_sets_names1) + +# create the word sets +target_sets2 = [['mannelijk', 'Mens', 'jongen', 'broer', 'hij', 'hem', 'zijn', 'zoon'], ['vrouwelijk', 'vrouw', 'meisje', 'zus', 'ze', 'haar', 'haar', 'dochter']] +target_sets_names2 = ['Male Terms', 'Female Terms'] +attribute_sets2 = [['wetenschap', 'technologie', 'natuurkunde', 'scheikunde', 'Einstein', 'NASA', 'experiment', 'astronomie'], ['poëzie', 'kunst', 'dans', 'literatuur', 'roman', 'symfonie', 'drama', 'beeldhouwwerk']] +attribute_sets_names2 = ['Science', 'Arts'] +# create the query +gender_query_2 = Query(target_sets2, attribute_sets2, target_sets_names2, attribute_sets_names2) + +# create the word sets +target_sets3 = [['mannelijk', 'Mens', 'jongen', 'broer', 'hij', 'hem', 'zijn', 'zoon'], ['vrouwelijk', 'vrouw', 'meisje', 'zus', 'ze', 'haar', 'haar', 'dochter']] +target_sets_names3 = ['Male Terms', 'Female Terms'] +attribute_sets3 = [['wiskunde', 'algebra', 'geometrie', 'rekening', 'vergelijkingen', 'berekening', 'cijfers', 'toevoeging'], ['poëzie', 'kunst', 'Shakespeare', 'dans', 'literatuur', 'roman', 'symfonie', 'drama']] +attribute_sets_names3 = ['Maths', 'Arts2'] +# create the query +gender_query_3 = Query(target_sets3, attribute_sets3, target_sets_names3, attribute_sets_names3) + +gender_queries = [gender_query_1, gender_query_2, gender_query_3] + + +WEAT_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results = run_queries( + RNSB, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results = run_queries( + RND, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results) +#plot_queries_results(RND_gender_results).show() +#RND_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + diff --git a/examples/spanish.py b/examples/spanish.py new file mode 100644 index 0000000..1c33c92 --- /dev/null +++ b/examples/spanish.py @@ -0,0 +1,105 @@ +import gensim.downloader as api + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding = WordEmbeddings('es') +glove_keyed_vectors = glove_embedding.precomputed_word_embeddings +model1 = WordEmbeddingModel(glove_keyed_vectors, 'es') + +models = [model1] + +# create the word sets +target_sets1 = [['masculino', 'hombre', 'chico', 'hermano', 'él', 'él', 'su', 'hijo'], ['mujer', 'mujer', 'muchacha', 'hermana', 'ella', 'ella', 'suyo', 'hija']] +target_sets_names1 = ['Male Terms', 'Female Terms'] +attribute_sets1 = [['ejecutivo', 'administración', 'profesional', 'corporación', 'salario', 'oficina', 'negocio', 'carrera profesional'], ['hogar', 'padres', 'niños', 'familia', 'primos', 'matrimonio', 'boda', 'parientes']] +attribute_sets_names1 = ['career', 'family'] +# create the query +gender_query_1 = Query(target_sets1, attribute_sets1, target_sets_names1, attribute_sets_names1) + +# create the word sets +target_sets2 = [['masculino', 'hombre', 'chico', 'hermano', 'él', 'él', 'su', 'hijo'], ['mujer', 'mujer', 'muchacha', 'hermana', 'ella', 'ella', 'suyo', 'hija']] +target_sets_names2 = ['Male Terms', 'Female Terms'] +attribute_sets2 = [['Ciencias', 'tecnología', 'física', 'química', 'Einstein', 'NASA', 'experimentar', 'astronomía'], ['poesía', 'Arte', 'danza', 'literatura', 'novela', 'sinfonía', 'drama','escultura']] +attribute_sets_names2 = ['Science', 'Arts'] +# create the query +gender_query_2 = Query(target_sets2, attribute_sets2, target_sets_names2, attribute_sets_names2) + +# create the word sets +target_sets3 = [['masculino', 'hombre', 'chico', 'hermano', 'él', 'él', 'su', 'hijo'], ['mujer', 'mujer', 'muchacha', 'hermana', 'ella', 'ella', 'suyo', 'hija']] +target_sets_names3 = ['Male Terms', 'Female Terms'] +attribute_sets3 = [['Matemáticas', 'álgebra', 'geometría', 'cálculo', 'ecuaciones', 'cálculo', 'números', 'adición'], ['poesía', 'Arte', 'Shakespeare', 'danza', 'literatura', 'novela', 'sinfonía', 'drama']] +attribute_sets_names3 = ['Maths', 'Arts2'] +# create the query +gender_query_3 = Query(target_sets3, attribute_sets3, target_sets_names3, attribute_sets_names3) + +gender_queries = [gender_query_1, gender_query_2, gender_query_3] + + +WEAT_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results = run_queries( + RNSB, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results = run_queries( + RND, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results) +#plot_queries_results(RND_gender_results).show() +#RND_gender_results.to_csv('test.csv', mode='a', header=True, index=False) diff --git a/examples/swedish.py b/examples/swedish.py new file mode 100644 index 0000000..3fdceb3 --- /dev/null +++ b/examples/swedish.py @@ -0,0 +1,105 @@ +import gensim.downloader as api + +from wefe.datasets import load_weat +from wefe.metrics import RNSB, WEAT, RND +from wefe.query import Query +from wefe.utils import run_queries +from wefe.word_embedding_model import WordEmbeddingModel +from flair.embeddings import WordEmbeddings +from wefe.utils import plot_queries_results +from wefe.utils import create_ranking +from wefe.utils import calculate_ranking_correlations, plot_ranking_correlations +from wefe.utils import plot_ranking + +glove_embedding = WordEmbeddings('sv') +glove_keyed_vectors = glove_embedding.precomputed_word_embeddings +model1 = WordEmbeddingModel(glove_keyed_vectors, 'sv') + +models = [model1] + +# create the word sets +target_sets1 = [['manlig', 'man', 'pojke', 'bror', 'han', 'honom', 'hans', 'son'], ['kvinna', 'kvinna', 'flicka', 'syster', 'hon', 'henne', 'hennes', 'dotter']] +target_sets_names1 = ['Male Terms', 'Female Terms'] +attribute_sets1 = [['verkställande', 'förvaltning', 'professionell', 'företag', 'lön', 'kontor', 'företag', 'karriär'], ['Hem', 'föräldrar', 'barn', 'familj', 'kusiner', 'äktenskap', 'bröllop', 'släktingar']] +attribute_sets_names1 = ['career', 'family'] +# create the query +gender_query_1 = Query(target_sets1, attribute_sets1, target_sets_names1, attribute_sets_names1) + +# create the word sets +target_sets2 = [['manlig', 'man', 'pojke', 'bror', 'han', 'honom', 'hans', 'son'], ['kvinna', 'kvinna', 'flicka', 'syster', 'hon', 'henne', 'hennes', 'dotter']] +target_sets_names2 = ['Male Terms', 'Female Terms'] +attribute_sets2 = [['vetenskap', 'teknologi', 'fysik', 'kemi', 'Einstein', 'NASA', 'experimentera', 'astronomi'], ['poesi', 'konst', 'dansa', 'litteratur', 'roman', 'symfoni', 'drama', 'skulptur']] +attribute_sets_names2 = ['Science', 'Arts'] +# create the query +gender_query_2 = Query(target_sets2, attribute_sets2, target_sets_names2, attribute_sets_names2) + +# create the word sets +target_sets3 = [['manlig', 'man', 'pojke', 'bror', 'han', 'honom', 'hans', 'son'], ['kvinna', 'kvinna', 'flicka', 'syster', 'hon', 'henne', 'hennes', 'dotter']] +target_sets_names3 = ['Male Terms', 'Female Terms'] +attribute_sets3 = [['matematik', 'algebra', 'geometri', 'kalkyl', 'ekvationer', 'beräkning', 'tal', 'tillägg'], ['poesi', 'konst', 'Shakespeare', 'dansa', 'litteratur', 'roman', 'symfoni', 'drama']] +attribute_sets_names3 = ['Maths', 'Arts2'] +# create the query +gender_query_3 = Query(target_sets3, attribute_sets3, target_sets_names3, attribute_sets_names3) + +gender_queries = [gender_query_1, gender_query_2, gender_query_3] + + +WEAT_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) + +print(WEAT_gender_results) +# Plot the results +#plot_queries_results(WEAT_gender_results).show() +#WEAT_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +# run the queries using WEAT effect size +WEAT_EZ_gender_results = run_queries( + WEAT, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}], "return_effect_size": True,}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(WEAT_EZ_gender_results) +#plot_queries_results(WEAT_EZ_gender_results).show() +#WEAT_EZ_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + + +RNSB_gender_results = run_queries( + RNSB, + gender_queries, + models, + lost_vocabulary_threshold=0.3, + metric_params={"preprocessors": [{"lowercase": True}]}, + aggregate_results=True, + queries_set_name="Gender Queries", +) +print(RNSB_gender_results) +#plot_queries_results(RNSB_gender_results).show() +#RNSB_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + +RND_gender_results = run_queries( + RND, + gender_queries, + models, + metric_params={"preprocessors": [{}, {"lowercase": True, }], }, + queries_set_name="Gender Queries", + aggregate_results=True, + aggregation_function="abs_avg", + generate_subqueries=True, + warn_not_found_words=False, +) +print(RND_gender_results) +#plot_queries_results(RND_gender_results).show() +#RND_gender_results.to_csv('test.csv', mode='a', header=True, index=False) + From e36bc983dcc8a753e750d31d5853b43667d0db57 Mon Sep 17 00:00:00 2001 From: Harsh Vardhan Rai Date: Fri, 4 Feb 2022 16:36:58 +0000 Subject: [PATCH 02/25] Create Harsh_Readme --- examples/Harsh_Readme | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 examples/Harsh_Readme diff --git a/examples/Harsh_Readme b/examples/Harsh_Readme new file mode 100644 index 0000000..4bb7fe5 --- /dev/null +++ b/examples/Harsh_Readme @@ -0,0 +1,42 @@ +FastText is an open-source framework that enables users to learn text representations and classifiers. We use fastText to get our pre-trained word embeddings for 7 +languages which are English, Dutch, Swedish, Italian, French, Spanish and German. All these were learned using Common Crawl and Wikipedia data. These models were +created with Continuous Bag Of Words (CBOW) with position-weights in a dimensionality of 300, character n-grams of length 5, a window of size 5, and 10 negatives. +Here, CBOW is a neural network-based system for learning the essential representation of words for every word. + +We have word sets defined from previous work (Badilla et al., 2020) that have been validated to examine bias from word embeddings. These embeddings have been used +originally in English only. In this research, word embeddings have to be used in 7 languages that are in focus to identify bias. We must also keep in mind that the +queries are only legitimate if the language of the word sets are same as the embeddings model’s language. Hence, a Google translator is used, similar to Kurpicz- +Briki and Leoni (2021), for this language translation to investigate bias in a particular language. Once this is complete, we have 7 different word sets for 7 +embedding models (1 for each language). + +Let us understand this with the help of an example. If we consider a query of Male and Female terms with respect to Career and Family, the attribute set for +‘career’ is initially defined as: + +Career: [ “executive”, “management”, “professional”, “corporation”, “salary”, “office”, “business”, “career” ] + +If the bias is to be measured with respect to Swedish, this attribute set must be translated to Swedish. This is done with the help of Google translator which will +translate to : + +Career: [‘verkst allande’, ‘f orvaltning’, ‘professionell’,‘f oretag’, ‘l on’, ‘kontor’, ‘f oretag’, ‘karriar’]. + +Queries, fairness metrics and embeddings models are the essential components that are given as an input to generate a bias score. As discussed earlier, we will +be working with 4 different fairness metrics (WEAT, WEAT-ES, RND and RNSB), 3 different set of queries and 7 different languages (word embeddings). Each time a +model in a respective language is selected, we evaluate them with all the 4 fairness metrics for each of the query to generate a score. This is the bias score +which will identify an embedding model as biased or not. + +The following example demonstrates how to use any sample gender query on any word embedding model considering a fairness metric. The workflow may be broken +down into three steps which are: + +• Download and install the word embedding model in any desired language. +• Structure the query based on the target set and the attribute set for that particular language using google translator. +• Execute the query utilizing the fairness metric through the Word Embedding Model. + +Any score of greater than 0 suggests that there is indeed a bias for the query in consideration for the particular language. + +In majority of the cases, a score will be achieved. Nevertheless, in a few occurrences, certain queries will contain about 20% missing words and the results will +reflect that and the query declares itself invalid and returns NaN. In such cases, a “lost vocabulary threshold” parameter is introduced which can limit the number +of words lost or missing. A 30% loss is permitted in the following scenario: + +BiasScore = WEAT().run_query(query, model, + lost_vocabulary_threshold = 0.3, + warn_not_found_words = True From 6afca8ef990a5173c0d48994a61eae624671d81d Mon Sep 17 00:00:00 2001 From: Pablo Badilla Date: Sun, 29 May 2022 21:42:54 -0400 Subject: [PATCH 03/25] fix bing liu lexicon read --- wefe/datasets/datasets.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/wefe/datasets/datasets.py b/wefe/datasets/datasets.py index 3303bf5..3b6d42d 100644 --- a/wefe/datasets/datasets.py +++ b/wefe/datasets/datasets.py @@ -240,18 +240,16 @@ def load_bingliu() -> Dict[str, List[str]]: resource_package, resource_pos_path ) - negative = pd.read_csv( - bingliu_neg_bytes, sep="\n", header=None, names=["word"], encoding="latin-1" - ) - negative_cleaned = negative.loc[30:].values.flatten().tolist() - positive = pd.read_csv( - bingliu_pos_bytes, sep="\n", header=None, names=["word"], encoding="latin-1" - ) - positive_cleaned = positive.loc[29:].values.flatten().tolist() + negative_words = [ + word.decode("latin-1").strip() for word in bingliu_neg_bytes.readlines() + ][31:] + positive_words = [ + word.decode("latin-1").strip() for word in bingliu_pos_bytes.readlines() + ][30:] bingliu_lexicon = { - "positive_words": positive_cleaned, - "negative_words": negative_cleaned, + "positive_words": positive_words, + "negative_words": negative_words, } return bingliu_lexicon From 3fdcc40fad434a1650922187f88aea399352e97b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mar=C3=ADa=20Jos=C3=A9=20Zambrano?= <53621345+mzambrano1@users.noreply.github.com> Date: Tue, 14 Jun 2022 21:52:46 -0400 Subject: [PATCH 04/25] Feature/half sibling regression (#30) * half sibling regression added * half sibling regression tests added * method description addded * code formatted * _ for non public methods * _ to non public methods * target and ignores sets included * minor errors fixed * tests target and ignore params * documentation updated * examples added * change duplicated lines * target words changed, progress bar added * method description changed * long lines shorten * error fixed in test * typos fixed * gender generalized to bias * gender generalized to bias * half sibling regression added to init * method addded to api * list comprehension changed * memory warning added * latex added to documentation * fix documentation and some types * minor order fix in docs and test * target and ignore sets error fixed * examples fixed * typo fixed * error in examples fixed Co-authored-by: Pablo Badilla --- doc/api.rst | 6 + wefe/__init__.py | 3 +- wefe/debias/__init__.py | 1 + wefe/debias/half_sibling_regression.py | 358 +++++++++++++++++++++++++ wefe/tests/test_debias.py | 132 ++++++++- 5 files changed, 493 insertions(+), 7 deletions(-) create mode 100644 wefe/debias/half_sibling_regression.py diff --git a/doc/api.rst b/doc/api.rst index e1b98f0..47d5961 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -89,6 +89,12 @@ This list contains the debiasing methods implemented so far in WEFE. MulticlassHardDebias +.. autosummary:: + :toctree: generated/ + :template: class.rst + + HalfSiblingRegression + Dataloaders =========== diff --git a/wefe/__init__.py b/wefe/__init__.py index 3c70c0f..5e1c4c6 100644 --- a/wefe/__init__.py +++ b/wefe/__init__.py @@ -6,7 +6,7 @@ load_bingliu, load_weat, ) -from wefe.debias import HardDebias, MulticlassHardDebias +from wefe.debias import HardDebias, MulticlassHardDebias, HalfSiblingRegression from wefe.metrics import WEAT from wefe.metrics.base_metric import BaseMetric from wefe.metrics.ECT import ECT @@ -36,6 +36,7 @@ "RIPA", "HardDebias", "MulticlassHardDebias", + "HalfSiblingRegression", "load_bingliu", "fetch_debias_multiclass", "fetch_debiaswe", diff --git a/wefe/debias/__init__.py b/wefe/debias/__init__.py index 5d67682..29d2188 100644 --- a/wefe/debias/__init__.py +++ b/wefe/debias/__init__.py @@ -1,2 +1,3 @@ from wefe.debias.hard_debias import HardDebias from wefe.debias.multiclass_hard_debias import MulticlassHardDebias +from wefe.debias.half_sibling_regression import HalfSiblingRegression \ No newline at end of file diff --git a/wefe/debias/half_sibling_regression.py b/wefe/debias/half_sibling_regression.py new file mode 100644 index 0000000..5afcabc --- /dev/null +++ b/wefe/debias/half_sibling_regression.py @@ -0,0 +1,358 @@ +"""Half Sibling Regression WEFE implementation.""" + +from copy import deepcopy +from typing import Dict, List, Optional + +import numpy as np +from tqdm import tqdm +from wefe.debias.base_debias import BaseDebias +from wefe.preprocessing import get_embeddings_from_sets +from wefe.utils import check_is_fitted +from wefe.word_embedding_model import WordEmbeddingModel + + +class HalfSiblingRegression(BaseDebias): + """Half Sibling Debias method. + + This method proposes to learn spurious gender information via causal + inference by utilizing the statistical dependency between gender-biased + word vectors and gender definition word vectors. The learned spurious + gender information is then subtracted from the gender-biased word + vectors to achieve gender-debiasing as the following where :math:`V_n` are + the debiased word vectors, Vn are non gender definition and :math:`G` is + the approximated gender information: + + .. math:: + + V_n' := V_n - G + + G is obtained by predicting Non gender definition word vectors (:math:`V_n`) + using the gender-definition word vectors (:math:`V_d`): + + .. math:: + + G := E[V_n|V_d] + + The Prediction is done by a Ridge Regression following the next steps: + + 1. Compute the weight matrix of a Ridge Regression using two sets of words + + .. math:: + + W = ((V_d)^T V_d + \\alpha I)^{-1} (V_d)^TV_n + + 2. Compute the gender information: + + .. math:: + + G = V_d W + + 3. Subtract gender information from non gender definition words: + + .. math:: + + V_n' = V_n - G + + This method is binary because it only allows 2 classes of the same bias + criterion, such as male or female. + For a multiclass debias (such as for Latinos, Asians and Whites), it + is recommended to visit MulticlassHardDebias class. + + .. warning:: + + This method requires three times the memory of the model when a copy of + the model is made and two times the memory of the model if not. Make sure this + much memory is available. + + Examples + -------- + The following example shows how to execute Half Sibling Regression + Debias method that reduces bias in a word embedding model: + + >>> from wefe.debias.half_sibling_regression import HalfSiblingRegression + >>> from wefe.utils import load_test_model + >>> from wefe.datasets import fetch_debiaswe + >>> + >>> # load the model (in this case, the test model included in wefe) + >>> model = load_test_model() + >>> # load gender specific words, in this case the ones included in wefe + >>> debiaswe_wordsets = fetch_debiaswe() + >>> gender_specific = debiaswe_wordsets["gender_specific"] + >>> + >>> # instance and fit the method + >>> hsr = HalfSiblingRegression().fit( + ... model=model, bias_definitional_words=gender_specific + ... ) + >>> # execute the debias on the words not included in the gender definition set + >>> debiased_model = hsr.transform(model = model) + Copy argument is True. Transform will attempt to create a copy of the original model. This may fail due to lack of memory. + Model copy created successfully. + >>> + >>> + >>> # if you want the debias over a specific set of words you can + >>> #include them in the target parameter + >>> debiased_model = hsr.transform( + ... model=model, target=["doctor", "nurse", "programmer"] + ... ) + Copy argument is True. Transform will attempt to create a copy of the original model. This may fail due to lack of memory. + Model copy created successfully. + >>> + >>> # if you want to exclude a set of words from the debias process + >>> # you can include them in the ignore parameter + >>> debiased_model = hsr.transform( + ... model=model, ignore=["dress", "beard", "niece", "nephew"] + ... ) + Copy argument is True. Transform will attempt to create a copy of the original model. This may fail due to lack of memory. + Model copy created successfully. + + References + ---------- + | [1]: Yang, Zekun y Juan Feng: A causal inference method for reducing + | gender bias in word embedding relations. + | In Proceedings of the AAAI Conference on Artificial Intelligence, + volumen 34, pages 9434–9441, 2020 + | [2]: https://github.com/KunkunYang/GenderBiasHSR + | [3]: Bernhard Sch ̈olkopf, David W. Hogg, Dun Wang, + | Daniel Foreman-Mackey, Dominik Jan-zing, Carl-Johann Simon-Gabriel, + and Jonas Peters. + | Modeling confounding by half-sibling regression. + | Proceedings of the National Academy of Sciences, 113(27):7391–7398, 2016 + """ + + name = "Half Sibling Regression" + short_name = "HSR" + + def __init__( + self, verbose: bool = False, criterion_name: Optional[str] = None, + ) -> None: + """Initialize a Half Sibling Regression Debias instance. + + Parameters + ---------- + verbose : bool, optional + True will print informative messages about the debiasing process, + by default False. + criterion_name : Optional[str], optional + The name of the criterion for which the debias is being executed, + e.g., 'Gender'. This will indicate the name of the model returning + transform, by default None + """ + # check verbose + if not isinstance(verbose, bool): + raise TypeError(f"verbose should be a bool, got {verbose}.") + + self.verbose = verbose + + if criterion_name is None or isinstance(criterion_name, str): + self.criterion_name_ = criterion_name + else: + raise ValueError(f"criterion_name should be str, got: {criterion_name}") + + def _get_bias_vectors( + self, model: WordEmbeddingModel, bias_definitional_words: List[str] + ) -> np.ndarray: + + vectors = [model[word] for word in bias_definitional_words if word in model] + return np.asarray(vectors) + + def _get_non_bias_dict( + self, model: WordEmbeddingModel, non_bias: List[str] + ) -> Dict[str, np.ndarray]: + + dictionary = get_embeddings_from_sets( + model=model, sets=[non_bias], sets_name="non_bias", normalize=False + ) + return dictionary[0] + + def _compute_weigth_matrix( + self, bias_vectors: np.ndarray, non_bias_vectors: np.ndarray, alpha: float + ) -> np.ndarray: + + a = bias_vectors.T @ bias_vectors + alpha * np.eye(bias_vectors.shape[1]) + b = bias_vectors.T @ non_bias_vectors + weight_matrix = np.linalg.inv(a) @ b + return weight_matrix + + def _compute_bias_information( + self, bias_vectors: np.ndarray, weight_matrix: np.ndarray + ) -> np.ndarray: + bias_information = bias_vectors @ weight_matrix + return bias_information + + def _subtract_bias_information( + self, non_bias_vectors: np.ndarray, bias_information: np.ndarray + ) -> np.ndarray: + debiased_vectors = non_bias_vectors - bias_information + return debiased_vectors + + def _get_indexes(self, model, target: List[str], non_bias: List[str]) -> List[int]: + return [non_bias.index(word) for word in target if word in model] + + def fit( + self, + model: WordEmbeddingModel, + bias_definitional_words: List[str], + alpha: float = 60, + ) -> BaseDebias: + """Compute the weight matrix and the bias information. + + Parameters + ---------- + model: WordEmbeddingModel + The word embedding model to debias. + bias_definitional_words: List[str] + List of strings. This list contains words that embody bias + information by definition. + alpha: float + Ridge Regression constant. By default 60. + + Returns + ------- + BaseDebias + The debias method fitted. + """ + self.bias_definitional_words = bias_definitional_words + self.non_bias = list( + set(model.vocab.keys()) - set(self.bias_definitional_words) + ) + self.alpha = alpha + + bias_definitional_words_vectors = self._get_bias_vectors( + model, self.bias_definitional_words + ).T + + self.non_bias_dict = self._get_non_bias_dict(model, self.non_bias) + + # ------------------------------------------------------------------------------ + # Compute the weight matrix . + if self.verbose: + print("Computing the weight matrix.") + weigth_matrix = self._compute_weigth_matrix( + bias_definitional_words_vectors, + np.asarray(list(self.non_bias_dict.values())).T, + alpha=self.alpha, + ) + + # ------------------------------------------------------------------------------: + # Compute the approximated bias information + if self.verbose: + print("Computing bias information") + self.bias_information = self._compute_bias_information( + bias_definitional_words_vectors, weigth_matrix + ) + + return self + + def transform( + self, + model: WordEmbeddingModel, + target: Optional[List[str]] = None, + ignore: Optional[List[str]] = None, + copy: bool = True, + ) -> WordEmbeddingModel: + """Substracts the gender information from vectors. + + Parameters + ---------- + model : WordEmbeddingModel + The word embedding model to mitigate. + target : Optional[List[str]], optional + If a set of words is specified in target, the debias method + will be performed only on the word embeddings of this set. + If `None` is provided, the debias will be performed on all + non gender specific words (except those specified in ignore). + Target words must not be included in the gender specific set. + by default `None`. + ignore : Optional[List[str]], optional + If target is `None` and a set of words is specified in ignore, + the debias method will perform the debias in all non gender + specific words except those specified in this + set, by default `[]`. + copy : bool, optional + If `True`, the debias will be performed on a copy of the + model. + If `False`, the debias will be applied on the same model + delivered, causing its vectors to mutate. + **WARNING:** Setting copy with `True` requires RAM at least + 2x of the size of the model, otherwise the execution of the + debias may raise to `MemoryError`, by default True. + + Returns + ------- + WordEmbeddingModel + The debiased embedding model. + """ + # check if the following attributes exist in the object. + check_is_fitted( + self, ["bias_definitional_words", "non_bias", "alpha", "non_bias_dict"], + ) + + if self.verbose: + print(f"Executing Half Sibling Debias on {model.name}") + + # ------------------------------------------------------------------- + # Copy + if copy: + print( + "Copy argument is True. Transform will attempt to create a copy " + "of the original model. This may fail due to lack of memory." + ) + model = deepcopy(model) + print("Model copy created successfully.") + + else: + print( + "copy argument is False. The execution of this method will mutate " + "the original model." + ) + + # ------------------------------------------------------------------- + # Substract bias information from vectors: + + if self.verbose: + print("Subtracting bias information.") + # if target or ignore are specified the debias is applied only in the + # columns corresponding to those words embeddings + if target or ignore: + if target: + target = target + + elif ignore: + target = list(set(list(self.non_bias_dict.keys())) - set(ignore)) + + indexes = self._get_indexes(model, target, list(self.non_bias_dict.keys())) + + bias_info = self.bias_information[:, indexes] + vectors = np.asarray(list(self.non_bias_dict.values())).T[:, indexes] + debiased_vectors = self._subtract_bias_information(vectors, bias_info).T + self.non_bias_dict = dict(zip(target, debiased_vectors)) + + # if target and ignores are not provided the debias is applied to + # all non bias vectors + else: + vectors = np.asarray(list(self.non_bias_dict.values())).T + debiased_vectors = self._subtract_bias_information( + vectors, self.bias_information + ).T + self.non_bias_dict = dict(zip(self.non_bias_dict.keys(), debiased_vectors)) + + if self.verbose: + print("Updating debiased vectors") + + # ------------------------------------------------------------------- + # update the model with new vectors + for word in tqdm(self.non_bias_dict.keys()): + model.update(word, self.non_bias_dict[word].astype(model.wv.vectors.dtype)) + + # ------------------------------------------------------------------- + # # Generate the new KeyedVectors + if self.criterion_name_ is None: + new_model_name = f"{model.name}_debiased" + else: + new_model_name = f"{model.name}_{self.criterion_name_}_debiased" + model.name = new_model_name + + if self.verbose: + print("Done!") + + return model diff --git a/wefe/tests/test_debias.py b/wefe/tests/test_debias.py index 010f43c..8aca79b 100644 --- a/wefe/tests/test_debias.py +++ b/wefe/tests/test_debias.py @@ -1,15 +1,15 @@ -"""Tests of Hard Debias debiasing method.""" -import pytest +"""Set of Tests for mitigation methods.""" import numpy as np +import pytest from gensim.models.keyedvectors import KeyedVectors - -from wefe.datasets import fetch_debiaswe, load_weat, fetch_debias_multiclass +from wefe.datasets import fetch_debias_multiclass, fetch_debiaswe, load_weat from wefe.debias.base_debias import BaseDebias +from wefe.debias.half_sibling_regression import HalfSiblingRegression from wefe.debias.hard_debias import HardDebias from wefe.debias.multiclass_hard_debias import MulticlassHardDebias -from wefe.word_embedding_model import WordEmbeddingModel -from wefe.metrics import WEAT, MAC +from wefe.metrics import MAC, WEAT from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel @pytest.fixture @@ -393,3 +393,123 @@ def test_multiclass_hard_debias_class(model): assert model == gender_debiased_w2v assert model.wv == gender_debiased_w2v.wv assert model.name == gender_debiased_w2v.name + + +def test_half_sibling_checks(model): + debiaswe_wordsets = fetch_debiaswe() + + definitional_pairs = debiaswe_wordsets["definitional_pairs"] + + with pytest.raises( + TypeError, match=r"verbose should be a bool, got .*", + ): + HalfSiblingRegression(verbose=1) + + +def test_half_sibling_regression_class(model, capsys): + + # ----------------------------------------------------------------- + # Queries + weat_wordset = load_weat() + weat = WEAT() + query_1 = Query( + [weat_wordset["male_names"], weat_wordset["female_names"]], + [weat_wordset["pleasant_5"], weat_wordset["unpleasant_5"]], + ["Male Names", "Female Names"], + ["Pleasant", "Unpleasant"], + ) + query_2 = Query( + [weat_wordset["male_names"], weat_wordset["female_names"]], + [weat_wordset["career"], weat_wordset["family"]], + ["Male Names", "Female Names"], + ["Pleasant", "Unpleasant"], + ) + + debiaswe_wordsets = fetch_debiaswe() + + definitional_pairs = debiaswe_wordsets["definitional_pairs"] + equalize_pairs = debiaswe_wordsets["equalize_pairs"] + gender_specific = debiaswe_wordsets["gender_specific"] + + # ----------------------------------------------------------------- + # Gender Debias + hsr = HalfSiblingRegression(criterion_name="gender",) + hsr.fit(model, bias_definitional_words=gender_specific) + + gender_debiased_w2v = hsr.transform(model, copy=True) + + assert model.name == "word2vec" + assert gender_debiased_w2v.name == "word2vec_gender_debiased" + + biased_results = weat.run_query(query_1, model, normalize=True) + debiased_results = weat.run_query(query_1, gender_debiased_w2v, normalize=True) + assert debiased_results["weat"] < biased_results["weat"] + + biased_results = weat.run_query(query_2, model, normalize=True) + debiased_results = weat.run_query(query_2, gender_debiased_w2v, normalize=True) + assert debiased_results["weat"] < biased_results["weat"] + + # ----------------------------------------------------------------- + # Test target param + hsr = HalfSiblingRegression(verbose=True, criterion_name="gender",) + + attributes = weat_wordset["pleasant_5"] + weat_wordset["unpleasant_5"] + + gender_debiased_w2v = hsr.fit( + model, bias_definitional_words=gender_specific + ).transform(model, target=attributes, copy=True) + + biased_results = weat.run_query(query_1, model, normalize=True) + debiased_results = weat.run_query(query_1, gender_debiased_w2v, normalize=True) + assert debiased_results["weat"] < biased_results["weat"] + + biased_results = weat.run_query(query_2, model, normalize=True) + debiased_results = weat.run_query(query_2, gender_debiased_w2v, normalize=True) + assert debiased_results["weat"] - biased_results["weat"] < 0.000000 + + # ----------------------------------------------------------------- + # Test ignore param + hsr = HalfSiblingRegression(verbose=True, criterion_name="gender",) + + # in this test, the targets and attributes are included in the ignore list. + # this implies that neither of these words should be subjected to debias and + # therefore, both queries when executed with weat should return the same score. + targets = weat_wordset["male_names"] + weat_wordset["female_names"] + attributes = weat_wordset["pleasant_5"] + weat_wordset["unpleasant_5"] + gender_debiased_w2v = hsr.fit( + model, bias_definitional_words=gender_specific + ).transform(model, ignore=gender_specific + targets + attributes, copy=True) + + biased_results = weat.run_query(query_1, model, normalize=True) + debiased_results = weat.run_query(query_1, gender_debiased_w2v, normalize=True) + + assert debiased_results["weat"] - biased_results["weat"] < 0.0000001 + + # ----------------------------------------------------------------- + # Test verbose + hsr = HalfSiblingRegression(verbose=True) + gender_debiased_w2v = hsr.fit( + model, bias_definitional_words=gender_specific + ).transform(model, copy=True) + + out = capsys.readouterr().out + assert "Computing the weight matrix." in out + assert "Computing bias information" in out + assert f"Executing Half Sibling Debias on {model.name}" in out + assert "Copy argument is True. Transform will attempt to create a copy" in out + assert "Subtracting bias information." in out + assert "Updating debiased vectors" in out + assert "Done!" in out + + assert model.name == "word2vec" + assert gender_debiased_w2v.name == "word2vec_debiased" + + # ----------------------------------------------------------------- + # Test inplace (copy = False) + hsr = HalfSiblingRegression(criterion_name="gender",) + hsr.fit(model, bias_definitional_words=gender_specific) + + gender_debiased_w2v = hsr.transform(model, copy=False) + assert model == gender_debiased_w2v + assert model.wv == gender_debiased_w2v.wv + assert model.name == gender_debiased_w2v.name From bfa58f8103c5c2dff4dd2beb77bc78f2afaec20f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mar=C3=ADa=20Jos=C3=A9=20Zambrano?= <53621345+mzambrano1@users.noreply.github.com> Date: Wed, 15 Jun 2022 00:07:59 -0400 Subject: [PATCH 05/25] Feature/double hard debias (#28) * double hard debias * documentation added, bug fixed * typo fixed * minor bug fixed * Double hard debias test added * check if model is fitted * typos fixed * check size of definitional pairs * errors fixed * fix styles, added new method to api * code formatted * _ added to non public methods * _ to non public methods * examples added * comments incorporated * bug fixed * progress bar added * embeddings mean error fixed * bias by projection error fixed * long lines shorten * test fixed * typos fixed * double hard debias added to init * definitional pairs embeddings not normalized * typos fixed * check argument types * fix docs and format * return type fixed * parameters moved from transform * target parameter added * tests added * examples fixed Co-authored-by: Pablo Badilla --- doc/about.rst | 1 + doc/api.rst | 6 + doc/conf.py | 1 - wefe/__init__.py | 8 +- wefe/debias/__init__.py | 3 +- wefe/debias/double_hard_debias.py | 526 ++++++++++++++++++++++++++++++ wefe/tests/test_debias.py | 156 ++++++++- 7 files changed, 693 insertions(+), 8 deletions(-) create mode 100644 wefe/debias/double_hard_debias.py diff --git a/doc/about.rst b/doc/about.rst index 737070f..a418f1c 100644 --- a/doc/about.rst +++ b/doc/about.rst @@ -449,6 +449,7 @@ Team - `Pablo Badilla `_. - `Felipe Bravo-Marquez `_. - `Jorge Pérez `_. +- `María José Zambrano `_. Contributors ------------ diff --git a/doc/api.rst b/doc/api.rst index 47d5961..c7f8500 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -89,6 +89,12 @@ This list contains the debiasing methods implemented so far in WEFE. MulticlassHardDebias +.. autosummary:: + :toctree: generated/ + :template: class.rst + + DoubleHardDebias + .. autosummary:: :toctree: generated/ :template: class.rst diff --git a/doc/conf.py b/doc/conf.py index 414f8ec..9f41c5a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -15,7 +15,6 @@ import os import sys -import sphinx_gallery import sphinx_rtd_theme # If extensions (or modules to document with autodoc) are in another directory, diff --git a/wefe/__init__.py b/wefe/__init__.py index 5e1c4c6..4590887 100644 --- a/wefe/__init__.py +++ b/wefe/__init__.py @@ -6,7 +6,12 @@ load_bingliu, load_weat, ) -from wefe.debias import HardDebias, MulticlassHardDebias, HalfSiblingRegression +from wefe.debias import ( + DoubleHardDebias, + HalfSiblingRegression, + HardDebias, + MulticlassHardDebias, +) from wefe.metrics import WEAT from wefe.metrics.base_metric import BaseMetric from wefe.metrics.ECT import ECT @@ -36,6 +41,7 @@ "RIPA", "HardDebias", "MulticlassHardDebias", + "DoubleHardDebias", "HalfSiblingRegression", "load_bingliu", "fetch_debias_multiclass", diff --git a/wefe/debias/__init__.py b/wefe/debias/__init__.py index 29d2188..bd96138 100644 --- a/wefe/debias/__init__.py +++ b/wefe/debias/__init__.py @@ -1,3 +1,4 @@ +from wefe.debias.double_hard_debias import DoubleHardDebias +from wefe.debias.half_sibling_regression import HalfSiblingRegression from wefe.debias.hard_debias import HardDebias from wefe.debias.multiclass_hard_debias import MulticlassHardDebias -from wefe.debias.half_sibling_regression import HalfSiblingRegression \ No newline at end of file diff --git a/wefe/debias/double_hard_debias.py b/wefe/debias/double_hard_debias.py new file mode 100644 index 0000000..76c7e12 --- /dev/null +++ b/wefe/debias/double_hard_debias.py @@ -0,0 +1,526 @@ +"""Double Hard Debias WEFE implementation.""" +import operator +from copy import deepcopy +from typing import Any, Dict, List, Optional, Sequence + +import numpy as np +from sklearn.cluster import KMeans +from sklearn.decomposition import PCA, IncrementalPCA +from sklearn.metrics import pairwise_distances +from tqdm import tqdm +from wefe.debias.base_debias import BaseDebias +from wefe.preprocessing import get_embeddings_from_sets +from wefe.utils import check_is_fitted +from wefe.word_embedding_model import WordEmbeddingModel + + +class DoubleHardDebias(BaseDebias): + """Double Hard Debias Method. + + This method allow reducing the bias of an embedding model through geometric + operations between embeddings. + This method is binary because it only allows 2 classes of the same bias + criterion, such as male or female. + For a multiclass debias (such as for Latinos, Asians and Whites), + it is recommended to visit MulticlassHardDebias class. + + The main idea of this method is: + + 1. **Identify a bias subspace through the defining sets.** In the case + of gender, these could be e.g. `{'woman', 'man'}, {'she', 'he'}, ...` + + 2. Find the dominant directions of the entire set of vectors by doing a + Principal components analysis over it. + + 3. Get the target words by finding the most biased words, this is + the words tha are closest to the representation of each bias group. In + case of gender 'he' and 'she'. + + 3. Try removing each component resulting of PCA and remove also the bias + direction to every vector in the target set and find which component + reduces bias the most. + + 4. Remove the dominant direction that most reduces bias and remove also + the bias direction of the vectors in the target set. + + Examples + -------- + The following example shows how to execute Double Hard Debias method + that reduces bias in a word embedding model: + + >>> from wefe.debias.double_hard_debias import DoubleHardDebias + >>> from wefe.utils import load_test_model + >>> from wefe.datasets import fetch_debiaswe + >>> + >>> # load the model (in this case, the test model included in wefe) + >>> model = load_test_model() + >>> # load definitional pairs, in this case definitinal pairs included in wefe + >>> debiaswe_wordsets = fetch_debiaswe() + >>> definitional_pairs = debiaswe_wordsets["definitional_pairs"] + >>> + >>> # instance and fit the method including bias representation words, in case of gender he,she + >>> dhd = DoubleHardDebias(verbose=False).fit(model=model, definitional_pairs=definitional_pairs, bias_representation=['he','she']) + >>> # execute the debias, if you don't want a set of words to be debiased include them in the ignore set + >>> gender_specific = debiaswe_wordsets["gender_specific"] + >>> + >>> debiased_model = dhd.transform( + ... model=model, ignore=gender_specific + ... ) + Copy argument is True. Transform will attempt to create a copy of the original model. This may fail due to lack of memory. + Model copy created successfully. + >>> + >>> + >>> # if you want the debiased to be performed over a sprecific set of words yo can add them in the + >>> # target parameter + >>> debiased_model = dhd.transform( + ... model=model, target = ['doctor','nurse','programmer','teacher'] + ... ) + Copy argument is True. Transform will attempt to create a copy of the original model. This may fail due to lack of memory. + Model copy created successfully. + + References + ---------- + | [1]: Wang, Tianlu, Xi Victoria Lin, Nazneen Fatema Rajani, Bryan McCann, + Vicente Or-donez y Caiming Xiong. + | Double-Hard Debias: Tailoring Word Embeddings for GenderBias Mitigation. + | CoRR, abs/2005.00965, 2020.https://arxiv.org/abs/2005.00965. + | [2]: https://github.com/uvavision/Double-Hard-Debias + """ + + name = "Double Hard Debias" + short_name = "DHD" + + def __init__( + self, + pca_args: Dict[str, Any] = {"n_components": 10}, + verbose: bool = False, + criterion_name: Optional[str] = None, + incremental_pca: bool = True, + n_words: int = 1000, + n_components: int = 4, + ) -> None: + """Initialize a Double Hard Debias instance. + + Parameters + ---------- + pca_args : Dict[str, Any], optional + Arguments for the PCA that is calculated internally in the identification + of the bias subspace, by default {"n_components": 10} + verbose : bool, optional + True will print informative messages about the debiasing process, + by default False. + criterion_name : Optional[str], optional + The name of the criterion for which the debias is being executed, + e.g., 'Gender'. This will indicate the name of the model + returning transform, by default None + incremental_pca: bool, optional + If `True`, incremental pca will be used over the entire set of + vectors. + If `False`, pca will be used over the entire set of vectors. + **WARNING:** Running pca over the entire set of vectors may + raise to `MemoryError`, by default True. + n_words: int, optional + Number of target words to be used for each bias group. + By default 1000 + n_components: int, optional + Numbers of components of PCA to be used to explore the one that + reduces bias the most. Usually the best one is close to embedding + dimension/100. By default 4. + """ + # check verbose + if not isinstance(verbose, bool): + raise TypeError(f"verbose should be a bool, got {verbose}.") + + # check incremental pca + if not isinstance(incremental_pca, bool): + raise TypeError(f"incremental_pca should be a bool, got {verbose}.") + + if incremental_pca: + self.pca_type = IncrementalPCA() + else: + self.pca_type = PCA(svd_solver="randomized") + + if not isinstance(pca_args, dict): + raise TypeError(f"pca_args should be a dict, got {verbose}.") + + self.pca_args = pca_args + self.verbose = verbose + + if criterion_name is None or isinstance(criterion_name, str): + self.criterion_name_ = criterion_name + else: + raise ValueError(f"criterion_name should be str, got: {criterion_name}") + + if not isinstance(n_words,int): + raise TypeError(f"n_words should be int, got: {n_words}") + self.n_words = n_words + + if not isinstance(n_components,int): + raise TypeError(f"n_components should be int, got: {n_components}") + self.n_components = n_components + + def _check_sets_size( + self, sets: Sequence[Sequence[str]], set_name: str, + ): + + for idx, set_ in enumerate(sets): + if len(set_) != 2: + adverb = "less" if len(set_) < 2 else "more" + + raise ValueError( + f"The {set_name} pair at position {idx} ({set_}) has {adverb} " + f"words than allowed by {self.name}: " + f"got {len(set_)} words, expected 2." + ) + + def _similarity(self, u: List[np.ndarray], v: List[np.ndarray]) -> np.array: + return 1 - pairwise_distances(u, v, metric="cosine") + + def _bias_by_projection( + self, + model: WordEmbeddingModel, + target: List[str], + ignore: List[str], + bias_representation: List[str], + ) -> Dict[str, float]: + word1 = model[bias_representation[0]] + word2 = model[bias_representation[1]] + if target: + vectors = [model[word] for word in target if word in model] + words = target + else: + vectors = model.wv.vectors + words = list(model.vocab.keys()) + vectors = model.wv.vectors + similarities_vectors = ( + self._similarity([word1], vectors)[0] + - self._similarity([word2], vectors)[0] + ) + + words = list(model.vocab.keys()) + similarities = dict(zip(words, similarities_vectors)) + for word in ignore: + if word in similarities: + similarities.pop(word) + return similarities + + def get_target_words( + self, + model: WordEmbeddingModel, + target: List[str], + ignore: List[str], + n_words: int, + bias_representation: List[str], + ) -> List[str]: + """Obtain target words to be debiased. + + This is done by searching the "n_words" most biased words by obtaining the + words closest to each word in the bias_representation set (in case of gender + "he" and "she"). + + Parameters + ---------- + model: WordEmbeddingModel + The word embedding model to debias. + ignore: List[str] + Set of words to be ignored from the debias process. + n_words: int + number of target words to use. + bias_representation: Sequence[str] + Two words that represents de bias groups. + + Returns + ------- + List[str] + List of target words for each bias group + """ + similarities = self._bias_by_projection(model, target, ignore, bias_representation) + sorted_words = sorted(similarities.items(), key=operator.itemgetter(1)) + female_words = [pair[0] for pair in sorted_words[:n_words]] + male_words = [pair[0] for pair in sorted_words[-n_words:]] + return female_words + male_words + + def _principal_components(self, model: WordEmbeddingModel) -> np.ndarray: + self.pca_type.fit(model.wv.vectors - self.embeddings_mean) + return self.pca_type.components_ + + def _calculate_embeddings_mean(self, model: WordEmbeddingModel) -> float: + return np.mean(model.wv.vectors, axis=0) + + def _drop_frecuency_features( + self, components: int, model: WordEmbeddingModel + ) -> Dict[str, np.ndarray]: + """Remove the frequency features from the embeddings. + + This is done by removing a component from the ones obtain by the PCA over the + set of embeddings. The component to remove is indicated by parameter + "components" and it is removed from the target words embeddings. + + Parameters + ---------- + components: int + The number of components to be removed. + model: WordEmbeddingModel + A word embedding model. + + Returns + ------- + Dict[str, np.ndarray] + The new embeddings for the target words. + """ + droped_frecuencies = {} + + for word in self.target_words: + embedding = model[word] + decendecentralize_embedding = embedding - self.embeddings_mean + frecuency = np.zeros(embedding.shape).astype(float) + + # for u in self.pca[components]: + u = self.pca[components] + frecuency = np.dot(np.dot(np.transpose(u), embedding), u) + new_embedding = decendecentralize_embedding - frecuency + + droped_frecuencies[word] = new_embedding + return droped_frecuencies + + def _identify_bias_subspace( + self, defining_pairs_embeddings, verbose: bool = False, + ) -> PCA: + + matrix = [] + for embedding_dict_pair in defining_pairs_embeddings: + + # Get the center of the current defining pair. + pair_embeddings = np.array(list(embedding_dict_pair.values())) + center = np.mean(pair_embeddings, axis=0) + # For each word, embedding in the defining pair: + for embedding in embedding_dict_pair.values(): + # Substract the center of the pair to the embedding + matrix.append(embedding - center) + matrix = np.array(matrix) # type: ignore + + pca = PCA(**self.pca_args) + pca.fit(matrix) + + if verbose: + explained_variance = pca.explained_variance_ratio_ + print(f"PCA variance explained: {explained_variance[0:pca.n_components_]}") + + return pca + + def _drop(self, u: np.ndarray, v: np.ndarray) -> np.ndarray: + return u - v * u.dot(v) / v.dot(v) + + def _debias(self, words_dict: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + + for word in tqdm(words_dict): + embedding = words_dict[word] + debias_embedding = self._drop(embedding, self.bias_direction) + words_dict.update({word: debias_embedding}) + return words_dict + + def _get_optimal_dimension( + self, model: WordEmbeddingModel, n_words: int, n_components: int + ) -> int: + n_components = n_components + scores = [] + for d in range(n_components): + result_embeddings = self._drop_frecuency_features(d, model) + result_embeddings = self._debias(result_embeddings) + y_true = [0] * n_words + [1] * n_words + scores.append(self._kmeans_eval(result_embeddings, y_true, n_words)) + min_alignment = min(scores) + + return scores.index(min_alignment) + + def _kmeans_eval( + self, + embeddings_dict: Dict[str, np.ndarray], + y_true: List[int], + n_words: int, + n_cluster: int = 2, + ) -> float: + + embeddings = [ + embeddings_dict[word] for word in self.target_words[0 : 2 * n_words] + ] + kmeans = KMeans(n_cluster).fit(embeddings) + y_pred = kmeans.predict(embeddings) + correct = [1 if item1 == item2 else 0 for (item1, item2) in zip(y_true, y_pred)] + alignment_score = sum(correct) / float(len(correct)) + alignment_score = max(alignment_score, 1 - alignment_score) + return alignment_score + + def fit( + self, + model: WordEmbeddingModel, + definitional_pairs: Sequence[Sequence[str]], + bias_representation: List[str], + ) -> BaseDebias: + """Compute the bias direction and obtain the principal components of the entire + set of vectors. + + Parameters + ---------- + model : WordEmbeddingModel + The word embedding model to debias. + definitional_pairs : Sequence[Sequence[str]] + A sequence of string pairs that will be used to define the bias + direction. For example, for the case of gender debias, this list + could be [['woman', 'man'], ['girl', 'boy'], ['she', 'he'], + ['mother', 'father'], ...]. + bias_representation: List[str] + Two words that represents each bias group. In case of gender + "he" and "she". + Returns + ------- + BaseDebias + The debias method fitted. + """ + self.definitional_pairs = definitional_pairs + + self._check_sets_size(self.definitional_pairs, "definitional") + + # ------------------------------------------------------------------- + # Obtain the embedding of each definitional pairs. + if self.verbose: + print("Obtaining definitional pairs.") + + self.definitional_pairs_embeddings = get_embeddings_from_sets( + model=model, + sets=definitional_pairs, + sets_name="definitional", + warn_lost_sets=self.verbose, + normalize=False, + verbose=self.verbose, + ) + # ------------------------------------------------------------------- + # Check bias representation + if bias_representation[0] not in model or bias_representation[1] not in model: + raise Exception("bias_representation words not in model") + self.bias_representation = bias_representation + + # ------------------------------------------------------------------- + # Identify the bias subspace using the defining pairs. + if self.verbose: + print("Identifying the bias subspace.") + self.bias_direction = self._identify_bias_subspace( + self.definitional_pairs_embeddings, verbose=self.verbose + ).components_[0] + + # ------------------------------------------------------------------- + # Obtain embeddings' mean + self.embeddings_mean = self._calculate_embeddings_mean(model) + + # ------------------------------------------------------------------- + # Obtain the principal components of all vector in the model. + if self.verbose: + print("Obtaining principal components") + self.pca = self._principal_components(model) + + return self + + def transform( + self, + model: WordEmbeddingModel, + target: List[str] = None, + ignore: List[str] = [], + copy: bool = True, + ) -> WordEmbeddingModel: + """Execute hard debias over the provided model. + + Parameters + ---------- + model : WordEmbeddingModel + The word embedding model to debias. + ignore : List[str], optional + If set of words is specified in ignore, the debias + method will perform the debias in all target words except + those specified in this set, by default []. + copy : bool, optional + If `True`, the debias will be performed on a copy of the + model. + If `False`, the debias will be applied on the same model delivered, + causing its vectors to mutate. + **WARNING:** Setting copy with `True` requires RAM at least 2x of + the size of the model, otherwise the execution of the debias may + raise to `MemoryError`, by default True. + + Returns + ------- + WordEmbeddingModel + The debiased embedding model. + """ + # check if the following attributes exist in the object. + self._check_transform_args( + model=model, ignore=ignore, copy=copy, + ) + check_is_fitted( + self, + [ + "definitional_pairs", + "definitional_pairs_embeddings", + "bias_direction", + "embeddings_mean", + "pca", + "bias_representation", + ], + ) + if self.verbose: + print(f"Executing Double Hard Debias on {model.name}") + # ------------------------------------------------------------------- + # Copy + if copy: + print( + "Copy argument is True. Transform will attempt to create a copy " + "of the original model. This may fail due to lack of memory." + ) + model = deepcopy(model) + print("Model copy created successfully.") + + else: + print( + "copy argument is False. The execution of this method will mutate " + "the original model." + ) + # ------------------------------------------------------------------- + # Obtain words to apply debias + if self.verbose: + print("Obtaining words to apply debias") + + if target: + self.n_words = len(target) // 2 + + self.target_words = self.get_target_words( + model,target, ignore, self.n_words, self.bias_representation + ) + + # ------------------------------------------------------------------- + # Searching best component of pca to debias + if self.verbose: + print("Searching component to debias") + optimal_dimensions = self._get_optimal_dimension(model, self.n_words, self.n_components) + + # ------------------------------------------------------------------- + # Execute debias + if self.verbose: + print("Executing debias") + debiased_embeddings = self._drop_frecuency_features(optimal_dimensions, model) + debiased_embeddings = self._debias(debiased_embeddings) + + # ------------------------------------------------------------------- + # Update vectors + if self.verbose: + print("Updating debiased vectors") + for word in tqdm(debiased_embeddings): + model.update(word, debiased_embeddings[word].astype(model.wv.vectors.dtype)) + # ------------------------------------------------------------------- + # # Generate the new KeyedVectors + if self.criterion_name_ is None: + new_model_name = f"{model.name}_debiased" + else: + new_model_name = f"{model.name}_{self.criterion_name_}_debiased" + model.name = new_model_name + if self.verbose: + print("Done!") + return model diff --git a/wefe/tests/test_debias.py b/wefe/tests/test_debias.py index 8aca79b..5eb31cf 100644 --- a/wefe/tests/test_debias.py +++ b/wefe/tests/test_debias.py @@ -4,6 +4,7 @@ from gensim.models.keyedvectors import KeyedVectors from wefe.datasets import fetch_debias_multiclass, fetch_debiaswe, load_weat from wefe.debias.base_debias import BaseDebias +from wefe.debias.double_hard_debias import DoubleHardDebias from wefe.debias.half_sibling_regression import HalfSiblingRegression from wefe.debias.hard_debias import HardDebias from wefe.debias.multiclass_hard_debias import MulticlassHardDebias @@ -87,14 +88,20 @@ def test_hard_debias_checks(model): with pytest.raises( ValueError, - match=r"The definitional pair at position 10 \(\['word1', 'word2', 'word3'\]\) has more words than allowed by Hard Debias: got 3 words, expected 2\.", + match=( + r"The definitional pair at position 10 \(\['word1', 'word2', 'word3'\]\) " + r"has more words than allowed by Hard Debias: got 3 words, expected 2\." + ), ): HardDebias().fit( model, definitional_pairs + [["word1", "word2", "word3"]], ) with pytest.raises( ValueError, - match=r"The definitional pair at position 10 \(\['word1'\]\) has less words than allowed by Hard Debias: got 1 words, expected 2\.", + match=( + r"The definitional pair at position 10 \(\['word1'\]\) has less words " + r"than allowed by Hard Debias: got 1 words, expected 2\." + ), ): HardDebias().fit( model, definitional_pairs + [["word1"]], @@ -395,11 +402,152 @@ def test_multiclass_hard_debias_class(model): assert model.name == gender_debiased_w2v.name -def test_half_sibling_checks(model): +def test_double_hard_debias_checks(model): + + debiaswe_wordsets = fetch_debiaswe() + definitional_pairs = debiaswe_wordsets["definitional_pairs"] + + with pytest.raises( + TypeError, match=r"verbose should be a bool, got .*", + ): + DoubleHardDebias(verbose=1) + + with pytest.raises( + TypeError, match=r"n_words should be int, got: .*", + ): + DoubleHardDebias(n_words=2.3) + + with pytest.raises( + TypeError, match=r"n_components should be int, got: .*", + ): + DoubleHardDebias(n_components=2.3) + with pytest.raises( + TypeError, match=r"incremental_pca should be a bool, got .*", + ): + DoubleHardDebias(incremental_pca=1) + + with pytest.raises( + ValueError, + match=( + r"The definitional pair at position 10 \(\['word1', 'word2', 'word3'\]\) " + r"has more words than allowed by Double Hard Debias: got 3 words, " + r"expected 2\." + ), + ): + DoubleHardDebias().fit( + model, + definitional_pairs=definitional_pairs + [["word1", "word2", "word3"]], + bias_representation=["he", "she"], + ) + with pytest.raises( + ValueError, + match=( + r"The definitional pair at position 10 \(\['word1'\]\) has less words " + r"than allowed by Double Hard Debias: got 1 words, expected 2\." + ), + ): + DoubleHardDebias().fit( + model, definitional_pairs + [["word1"]], bias_representation=["he", "she"] + ) + with pytest.raises( + Exception, match=r"bias_representation words not in model", + ): + DoubleHardDebias().fit( + model, + definitional_pairs, + bias_representation=["abcde123efg", "gfe321edcba"], + ) + + +def test_double_hard_debias_class(model, capsys): + + # ----------------------------------------------------------------- + # Queries + + weat_wordset = load_weat() + weat = WEAT() + query_1 = Query( + [weat_wordset["male_names"], weat_wordset["female_names"]], + [weat_wordset["pleasant_5"], weat_wordset["unpleasant_5"]], + ["Male Names", "Female Names"], + ["Pleasant", "Unpleasant"], + ) + query_2 = Query( + [weat_wordset["male_names"], weat_wordset["female_names"]], + [weat_wordset["career"], weat_wordset["family"]], + ["Male Names", "Female Names"], + ["Pleasant", "Unpleasant"], + ) + debiaswe_wordsets = fetch_debiaswe() definitional_pairs = debiaswe_wordsets["definitional_pairs"] + gender_specific = debiaswe_wordsets["gender_specific"] + + # ----------------------------------------------------------------- + # Gender Debias + dhd = DoubleHardDebias(criterion_name="gender",) + dhd.fit( + model, definitional_pairs=definitional_pairs, bias_representation=["he", "she"] + ) + + gender_debiased_w2v = dhd.transform(model, ignore=gender_specific) + + dhd = DoubleHardDebias(verbose=True, criterion_name="gender",) + + targets = weat_wordset["male_names"] + weat_wordset["female_names"] + attributes = weat_wordset["pleasant_5"] + weat_wordset["unpleasant_5"] + gender_debiased_w2v = dhd.fit( + model, definitional_pairs=definitional_pairs, bias_representation=["he", "she"] + ).transform(model, target=attributes, copy=True) + + biased_results = weat.run_query(query_1, model, normalize=True) + debiased_results = weat.run_query(query_1, gender_debiased_w2v, normalize=True) + assert debiased_results["weat"] < biased_results["weat"] + + biased_results = weat.run_query(query_2, model, normalize=True) + debiased_results = weat.run_query(query_2, gender_debiased_w2v, normalize=True) + assert debiased_results["weat"] - biased_results["weat"] < 0.0000001 + + # ----------------------------------------------------------------- + # Test ignore param + dhd = DoubleHardDebias(verbose=True, criterion_name="gender",) + + gender_debiased_w2v = dhd.fit( + model, definitional_pairs=definitional_pairs, bias_representation=["he", "she"] + ).transform(model, ignore=gender_specific + targets + attributes, copy=True) + # ----------------------------------------------------------------- + # Test verbose + dhd = DoubleHardDebias(verbose=True) + gender_debiased_w2v = dhd.fit( + model, definitional_pairs, bias_representation=["he", "she"] + ).transform(model, ignore=gender_specific, copy=True) + out = capsys.readouterr().out + assert "Obtaining definitional pairs." in out + assert "PCA variance explained:" in out + assert "Identifying the bias subspace" in out + assert "Obtaining definitional pairs." in out + assert f"Executing Double Hard Debias on {model.name}" in out + assert "Identifying the bias subspace." in out + assert "Obtaining principal components" in out + assert "Obtaining words to apply debias" in out + assert "Searching component to debias" in out + assert "Copy argument is True. Transform will attempt to create a copy" in out + assert "Executing debias" in out + + dhd = DoubleHardDebias(criterion_name="gender",) + dhd.fit( + model, definitional_pairs=definitional_pairs, bias_representation=["he", "she"] + ) + + gender_debiased_w2v = dhd.transform(model, ignore=gender_specific, copy=False) + assert model == gender_debiased_w2v + assert model.wv == gender_debiased_w2v.wv + assert model.name == gender_debiased_w2v.name + + +def test_half_sibling_checks(model): with pytest.raises( TypeError, match=r"verbose should be a bool, got .*", ): @@ -427,8 +575,6 @@ def test_half_sibling_regression_class(model, capsys): debiaswe_wordsets = fetch_debiaswe() - definitional_pairs = debiaswe_wordsets["definitional_pairs"] - equalize_pairs = debiaswe_wordsets["equalize_pairs"] gender_specific = debiaswe_wordsets["gender_specific"] # ----------------------------------------------------------------- From 2bea1d720a69f32ab8ae6986be4e4cf3e5f39a5b Mon Sep 17 00:00:00 2001 From: Pablo Badilla Date: Sat, 30 Jul 2022 23:01:26 -0400 Subject: [PATCH 06/25] improve metric testing structure --- {wefe/tests => tests}/__init__.py | 0 tests/conftest.py | 126 +++++++ tests/metrics/test_ECT.py | 47 +++ tests/metrics/test_MAC.py | 70 ++++ tests/metrics/test_RIPA.py | 23 ++ tests/metrics/test_RND.py | 81 +++++ tests/metrics/test_RNSB.py | 132 +++++++ tests/metrics/test_WEAT.py | 103 ++++++ .../metrics}/test_base_metric.py | 0 {wefe/tests => tests}/test_datasets.py | 0 {wefe/tests => tests}/test_debias.py | 0 {wefe/tests => tests}/test_preprocessing.py | 0 {wefe/tests => tests}/test_query.py | 0 {wefe/tests => tests}/test_utils.py | 0 .../test_word_embedding_model.py | 0 {wefe/tests => tests}/w2v_test.kv | Bin wefe/metrics/MAC.py | 2 +- wefe/metrics/RNSB.py | 1 - wefe/tests/test_metrics.py | 327 ------------------ 19 files changed, 583 insertions(+), 329 deletions(-) rename {wefe/tests => tests}/__init__.py (100%) create mode 100644 tests/conftest.py create mode 100644 tests/metrics/test_ECT.py create mode 100644 tests/metrics/test_MAC.py create mode 100644 tests/metrics/test_RIPA.py create mode 100644 tests/metrics/test_RND.py create mode 100644 tests/metrics/test_RNSB.py create mode 100644 tests/metrics/test_WEAT.py rename {wefe/tests => tests/metrics}/test_base_metric.py (100%) rename {wefe/tests => tests}/test_datasets.py (100%) rename {wefe/tests => tests}/test_debias.py (100%) rename {wefe/tests => tests}/test_preprocessing.py (100%) rename {wefe/tests => tests}/test_query.py (100%) rename {wefe/tests => tests}/test_utils.py (100%) rename {wefe/tests => tests}/test_word_embedding_model.py (100%) rename {wefe/tests => tests}/w2v_test.kv (100%) delete mode 100644 wefe/tests/test_metrics.py diff --git a/wefe/tests/__init__.py b/tests/__init__.py similarity index 100% rename from wefe/tests/__init__.py rename to tests/__init__.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e069de1 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,126 @@ +from typing import Dict, List + +import pytest +from gensim.models.keyedvectors import KeyedVectors +from wefe.datasets.datasets import load_weat +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +@pytest.fixture +def model() -> WordEmbeddingModel: + """Load a subset of Word2vec as a testing model. + + Returns + ------- + WordEmbeddingModel + The loaded testing model. + """ + w2v = KeyedVectors.load("./tests/w2v_test.kv") + return WordEmbeddingModel(w2v, "word2vec") + + +@pytest.fixture +def weat_wordsets() -> Dict[str, List[str]]: + """Load the word sets used in WEAT original work. + + Returns + ------- + Dict[str, List[str]] + A dictionary that map a word set name to a set of words. + """ + weat_wordsets = load_weat() + return weat_wordsets + + +@pytest.fixture +def query_2t1a_1(weat_wordsets: Dict[str, List[str]]) -> Query: + weat_wordsets = load_weat() + + query = Query( + [weat_wordsets["flowers"], weat_wordsets["insects"]], + [weat_wordsets["pleasant_5"]], + ["Flowers", "Insects"], + ["Pleasant"], + ) + return query + + +@pytest.fixture +def query_2t2a_1(weat_wordsets: Dict[str, List[str]]) -> Query: + """Generate a Flower and Insects wrt Pleasant vs Unpleasant test query. + + Parameters + ---------- + weat_wordsets : Dict[str, List[str]] + The word sets used in WEAT original work. + + Returns + ------- + Query + The generated query. + """ + query = Query( + [weat_wordsets["flowers"], weat_wordsets["insects"]], + [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], + ["Flowers", "Insects"], + ["Pleasant", "Unpleasant"], + ) + return query + + +@pytest.fixture +def query_4t2a_1(weat_wordsets: Dict[str, List[str]]) -> Query: + query = Query( + [ + weat_wordsets["flowers"], + weat_wordsets["insects"], + weat_wordsets["instruments"], + weat_wordsets["weapons"], + ], + [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], + ["Flowers", "Insects", "Instruments", "Weapons"], + ["Pleasant", "Unpleasant"], + ) + + return query + + +@pytest.fixture +def query_1t4_1(weat_wordsets: Dict[str, List[str]]) -> Query: + query = Query( + [weat_wordsets["flowers"]], + [ + weat_wordsets["pleasant_5"], + weat_wordsets["pleasant_9"], + weat_wordsets["unpleasant_5"], + weat_wordsets["unpleasant_9"], + ], + ["Flowers"], + ["Pleasant 5 ", "Pleasant 9", "Unpleasant 5", "Unpleasant 9"], + ) + return query + + +@pytest.fixture +def query_2t1a_lost_vocab_1(weat_wordsets: Dict[str, List[str]]) -> Query: + query = Query( + [["bla", "asd"], weat_wordsets["insects"]], + [weat_wordsets["pleasant_5"]], + ["Flowers", "Insects"], + ["Pleasant"], + ) + + return query + + +@pytest.fixture +def query_2t2a_lost_vocab_1(weat_wordsets: Dict[str, List[str]]) -> Query: + query = Query( + [["bla", "asd"], weat_wordsets["insects"]], + [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], + ["Flowers", "Insects"], + ["Pleasant", "Unpleasant"], + ) + + return query diff --git a/tests/metrics/test_ECT.py b/tests/metrics/test_ECT.py new file mode 100644 index 0000000..201d457 --- /dev/null +++ b/tests/metrics/test_ECT.py @@ -0,0 +1,47 @@ +"""ECT metric testing.""" +from typing import Any, Dict + +import numpy as np +from wefe.metrics import ECT +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +def check_ECT_result_keys(results: Dict[str, Any]): + assert list(results.keys()) == [ + "query_name", + "result", + "ect", + ] + + +def check_ECT_result_values(results: Dict[str, Any]): + # note: this checking only applies when the result is not np.nan. + assert isinstance(results["query_name"], str) + + # check result type + assert isinstance(results["result"], np.number) + assert isinstance(results["ect"], np.number) + assert -1 <= results["ect"] <= 1 + + +def test_ECT(model: WordEmbeddingModel, query_2t1a_1: Query): + + ect = ECT() + results = ect.run_query(query_2t1a_1, model) + + check_ECT_result_keys(results) + check_ECT_result_values(results) + assert results["query_name"] == "Flowers and Insects wrt Pleasant" + + +def test_ECT_lost_vocabulary_threshold( + model: WordEmbeddingModel, query_2t1a_lost_vocab_1: Query +): + # test metric with a target set that loses more words than allowed. + ect = ECT() + results = ect.run_query(query_2t1a_lost_vocab_1, model) + + assert results["query_name"] == "Flowers and Insects wrt Pleasant" + assert np.isnan(results["ect"]) + assert np.isnan(results["result"]) diff --git a/tests/metrics/test_MAC.py b/tests/metrics/test_MAC.py new file mode 100644 index 0000000..c8091b7 --- /dev/null +++ b/tests/metrics/test_MAC.py @@ -0,0 +1,70 @@ +"""MAC metric testing.""" +from typing import Any, Dict + +import numpy as np +from wefe.metrics import MAC +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +def check_MAC_result_keys(results: Dict[str, Any]): + assert list(results.keys()) == ["query_name", "result", "mac", "targets_eval"] + + +def check_MAC_result_values(results: Dict[str, Any]): + # note: this checking only applies when the result is not np.nan. + assert isinstance(results["query_name"], str) + + # check result type + assert isinstance(results["result"], np.number) + + # check metrics type + assert isinstance(results["mac"], np.number) + + targets_eval = results["targets_eval"] + assert isinstance(targets_eval, dict) + + for target_name, target_eval in targets_eval.items(): + assert isinstance(target_name, str) + assert isinstance(target_eval, dict) + for target_word, attribute_scores in target_eval.items(): + assert isinstance(target_word, str) + assert isinstance(attribute_scores, dict) + for attribute_name, attribute_score in attribute_scores.items(): + assert isinstance(attribute_name, str) + assert isinstance(attribute_score, (np.number, float)) + + +def test_MAC(model, query_1t4_1): + + mac = MAC() + results = mac.run_query(query_1t4_1, model) + + assert ( + results["query_name"] + == "Flowers wrt Pleasant 5 , Pleasant 9, Unpleasant 5 and Unpleasant 9" + ) + + check_MAC_result_keys(results) + check_MAC_result_values(results) + + assert len(results["targets_eval"]["Flowers"]) == len(query_1t4_1.target_sets[0]) + # 4 = number of attribute sets + for word in query_1t4_1.target_sets[0]: + assert len(results["targets_eval"]["Flowers"][word]) == 4 + + +def test_MAC_lost_vocabulary_threshold( + model: WordEmbeddingModel, query_2t2a_lost_vocab_1: Query +): + mac = MAC() + + # test metric with a target set that loses more words than allowed. + results = mac.run_query(query_2t2a_lost_vocab_1, model) + + check_MAC_result_keys(results) + assert np.isnan(results["mac"]) + assert np.isnan(results["result"]) + assert isinstance(results["targets_eval"], dict) + assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" + diff --git a/tests/metrics/test_RIPA.py b/tests/metrics/test_RIPA.py new file mode 100644 index 0000000..86ae315 --- /dev/null +++ b/tests/metrics/test_RIPA.py @@ -0,0 +1,23 @@ +"""RIPA metric testing.""" +import numpy as np +from wefe.metrics import RIPA +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +def test_RIPA(model: WordEmbeddingModel, query_2t1a_1: Query): + + ripa = RIPA() + + results = ripa.run_query(query_2t1a_1, model) + + assert results["query_name"] == "Flowers and Insects wrt Pleasant" + assert isinstance(results["result"], (np.number, float)) + assert isinstance(results["ripa"], (np.number, float)) + assert isinstance(results["word_values"], dict) + + for word, word_value in results["word_values"].items(): + assert isinstance(word, str) + assert isinstance(word_value, dict) + assert isinstance(word_value["mean"], (np.number, float)) + assert isinstance(word_value["std"], (np.number, float)) diff --git a/tests/metrics/test_RND.py b/tests/metrics/test_RND.py new file mode 100644 index 0000000..8a38ddb --- /dev/null +++ b/tests/metrics/test_RND.py @@ -0,0 +1,81 @@ +"""RND metric testing""" +from typing import Any, Dict + +import numpy as np +import pytest +from wefe.metrics import RND +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +def check_RND_result_keys(results: Dict[str, Any]): + assert list(results.keys()) == [ + "query_name", + "result", + "rnd", + "distances_by_word", + ] + + +def check_RND_result_values(results: Dict[str, Any]): + # note: this checking only applies when the result is not np.nan. + assert isinstance(results["query_name"], str) + + # check result type + assert isinstance(results["result"], np.number) + assert isinstance(results["rnd"], np.number) + + distances_by_word = results["distances_by_word"] + assert isinstance(distances_by_word, dict) + assert len(distances_by_word) > 0 + for word, distance in distances_by_word.items(): + assert isinstance(word, str) + assert isinstance(distance, (float, np.number)) + assert len(word) > 0 + + +def test_RND_with_euclidean_distance(model: WordEmbeddingModel, query_2t1a_1: Query): + # note: the euclidean distance is the default distance. + rnd = RND() + result = rnd.run_query(query_2t1a_1, model) + + check_RND_result_keys(result) + check_RND_result_values(result) + assert result["query_name"] == "Flowers and Insects wrt Pleasant" + + +def test_RND_with_cosine_distance(model: WordEmbeddingModel, query_2t1a_1: Query): + rnd = RND() + result = rnd.run_query(query_2t1a_1, model, distance="cos") + + check_RND_result_keys(result) + check_RND_result_values(result) + assert result["query_name"] == "Flowers and Insects wrt Pleasant" + + +def test_RND_wrong_distance_type_parameter( + model: WordEmbeddingModel, query_2t1a_1: Query +): + rnd = RND() + + with pytest.raises( + ValueError, match=r'distance_type can be either "norm" or "cos", .*' + ): + rnd.run_query(query_2t1a_1, model, distance="other_distance") + + +def test_RND_lost_vocabulary_threshold( + model: WordEmbeddingModel, query_2t1a_lost_vocab_1: Query +): + rnd = RND() + + result = rnd.run_query(query_2t1a_lost_vocab_1, model,) + check_RND_result_keys(result) + + assert result["query_name"] == "Flowers and Insects wrt Pleasant" + + assert np.isnan(result["result"]) + assert np.isnan(result["rnd"]) + + assert isinstance(result["distances_by_word"], dict) + assert len(result["distances_by_word"]) == 0 diff --git a/tests/metrics/test_RNSB.py b/tests/metrics/test_RNSB.py new file mode 100644 index 0000000..cceb16e --- /dev/null +++ b/tests/metrics/test_RNSB.py @@ -0,0 +1,132 @@ +"""RNSB metric testing.""" +from typing import Any, Dict + +import numpy as np +from wefe.metrics import RNSB +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +def check_RNSB_result_keys(results: Dict[str, Any]): + assert list(results.keys()) == [ + "query_name", + "result", + "rnsb", + "negative_sentiment_probabilities", + "negative_sentiment_distribution", + ] + + +def check_RNSB_result_values(results: Dict[str, Any]): + # note: this checking only applies when the result is not np.nan. + assert isinstance(results["query_name"], str) + + # check result type and probability interval + assert isinstance(results["result"], np.number) + assert 0 <= results["result"] <= 1 + + # check negative_sentiment_probabilities + negative_sentiment_probabilities = results["negative_sentiment_probabilities"] + assert isinstance(negative_sentiment_probabilities, dict) + assert len(negative_sentiment_probabilities) > 0 + for word, proba in negative_sentiment_probabilities.items(): + assert isinstance(word, str) + assert isinstance(proba, float) + assert 0 <= proba <= 1 + + # check negative_sentiment_distribution + negative_sentiment_distribution = results["negative_sentiment_distribution"] + assert isinstance(negative_sentiment_distribution, dict) + assert len(negative_sentiment_distribution) > 0 + + for word, proba in negative_sentiment_distribution.items(): + assert isinstance(word, str) + assert isinstance(proba, float) + assert 0 <= proba <= 1 + + assert len(negative_sentiment_probabilities) == len(negative_sentiment_distribution) + + +def test_RNSB_base(model: WordEmbeddingModel, query_2t2a_1: Query): + + rnsb = RNSB() + results = rnsb.run_query(query_2t2a_1, model) + check_RNSB_result_keys(results) + check_RNSB_result_values(results) + assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" + + +def test_RNSB_more_targets(model: WordEmbeddingModel, query_4t2a_1: Query): + + rnsb = RNSB() + results = rnsb.run_query(query_4t2a_1, model) + check_RNSB_result_keys(results) + check_RNSB_result_values(results) + assert ( + results["query_name"] + == "Flowers, Insects, Instruments and Weapons wrt Pleasant and Unpleasant" + ) + + +def test_RNSB_print_model_evaluation( + capsys, model: WordEmbeddingModel, query_2t2a_1: Query +): + + rnsb = RNSB() + results = rnsb.run_query(query_2t2a_1, model, print_model_evaluation=True) + check_RNSB_result_keys(results) + check_RNSB_result_values(results) + + print(capsys.readouterr()) + captured = capsys.readouterr() + assert "Classification Report" in captured.out + + assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" + + +def test_RNSB_no_holdout(capsys, model: WordEmbeddingModel, query_2t2a_1: Query): + + rnsb = RNSB() + results = rnsb.run_query( + query_2t2a_1, model, holdout=False, print_model_evaluation=True + ) + check_RNSB_result_keys(results) + check_RNSB_result_values(results) + + print(capsys.readouterr()) + captured = capsys.readouterr() + assert "Holdout is disabled. No evaluation was performed" in captured.out + + assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" + + +def test_RNSB_lost_vocabulary_threshold( + model: WordEmbeddingModel, query_2t2a_lost_vocab_1: Query +): + rnsb = RNSB() + results = rnsb.run_query(query_2t2a_lost_vocab_1, model) + + check_RNSB_result_keys(results) + + assert np.isnan(results["rnsb"]) + assert np.isnan(results["result"]) + assert isinstance(results["negative_sentiment_probabilities"], dict) + assert isinstance(results["negative_sentiment_distribution"], dict) + assert len(results["negative_sentiment_probabilities"].keys()) == 0 + assert len(results["negative_sentiment_distribution"].keys()) == 0 + + assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" + + +def test_RNSB_with_random_state(model: WordEmbeddingModel, query_2t2a_1: Query): + + rnsb = RNSB() + results_1 = rnsb.run_query(query_2t2a_1, model, random_state=42) + check_RNSB_result_keys(results_1) + check_RNSB_result_values(results_1) + + results_2 = rnsb.run_query(query_2t2a_1, model, random_state=42) + check_RNSB_result_keys(results_2) + check_RNSB_result_values(results_2) + + assert results_1 == results_2 diff --git a/tests/metrics/test_WEAT.py b/tests/metrics/test_WEAT.py new file mode 100644 index 0000000..ab7a2bf --- /dev/null +++ b/tests/metrics/test_WEAT.py @@ -0,0 +1,103 @@ +"""WEAT metric testing.""" +from typing import Any, Dict + +import numpy as np +from wefe.metrics import WEAT +from wefe.query import Query +from wefe.word_embedding_model import WordEmbeddingModel + + +def check_WEAT_result_keys(results: Dict[str, Any]): + assert list(results.keys()) == [ + "query_name", + "result", + "weat", + "effect_size", + "p_value", + ] + + +def check_WEAT_result_values(results: Dict[str, Any]): + # note: this checking only applies when the result is not np.nan. + assert isinstance(results["query_name"], str) + + # check result type + assert isinstance(results["result"], np.number) + + # check metrics type + assert isinstance(results["weat"], np.number) + assert isinstance(results["effect_size"], np.number) + + # check p_value options + assert isinstance(results["p_value"], (float, np.number)) or np.isnan( + results["p_value"] + ) + + +def test_WEAT(model: WordEmbeddingModel, query_2t2a_1: Query): + weat = WEAT() + + results = weat.run_query(query_2t2a_1, model) + + check_WEAT_result_keys(results) + check_WEAT_result_values(results) + assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" + assert results["result"] == results["weat"] + + +def test_WEAT_effect_size(model: WordEmbeddingModel, query_2t2a_1: Query): + + weat = WEAT() + + results = weat.run_query(query_2t2a_1, model, return_effect_size=True) + check_WEAT_result_keys(results) + check_WEAT_result_values(results) + + assert results["result"] == results["effect_size"] + + +def test_WEAT_left_sided_p_value(model: WordEmbeddingModel, query_2t2a_1: Query): + weat = WEAT() + + results = weat.run_query( + query_2t2a_1, + model, + calculate_p_value=True, + p_value_iterations=100, + p_value_test_type="left-sided", + ) + check_WEAT_result_keys(results) + check_WEAT_result_values(results) + assert isinstance(results["p_value"], float) + + +def test_WEAT_right_sided_p_value(model: WordEmbeddingModel, query_2t2a_1: Query): + weat = WEAT() + + results = weat.run_query( + query_2t2a_1, + model, + calculate_p_value=True, + p_value_iterations=100, + p_value_test_type="right-sided", + ) + + check_WEAT_result_keys(results) + check_WEAT_result_values(results) + assert isinstance(results["p_value"], float) + + +def test_WEAT_two_sided_p_value(model: WordEmbeddingModel, query_2t2a_1: Query): + weat = WEAT() + + results = weat.run_query( + query_2t2a_1, + model, + calculate_p_value=True, + p_value_iterations=100, + p_value_test_type="two-sided", + ) + + check_WEAT_result_keys(results) + check_WEAT_result_values(results) + assert isinstance(results["p_value"], float) diff --git a/wefe/tests/test_base_metric.py b/tests/metrics/test_base_metric.py similarity index 100% rename from wefe/tests/test_base_metric.py rename to tests/metrics/test_base_metric.py diff --git a/wefe/tests/test_datasets.py b/tests/test_datasets.py similarity index 100% rename from wefe/tests/test_datasets.py rename to tests/test_datasets.py diff --git a/wefe/tests/test_debias.py b/tests/test_debias.py similarity index 100% rename from wefe/tests/test_debias.py rename to tests/test_debias.py diff --git a/wefe/tests/test_preprocessing.py b/tests/test_preprocessing.py similarity index 100% rename from wefe/tests/test_preprocessing.py rename to tests/test_preprocessing.py diff --git a/wefe/tests/test_query.py b/tests/test_query.py similarity index 100% rename from wefe/tests/test_query.py rename to tests/test_query.py diff --git a/wefe/tests/test_utils.py b/tests/test_utils.py similarity index 100% rename from wefe/tests/test_utils.py rename to tests/test_utils.py diff --git a/wefe/tests/test_word_embedding_model.py b/tests/test_word_embedding_model.py similarity index 100% rename from wefe/tests/test_word_embedding_model.py rename to tests/test_word_embedding_model.py diff --git a/wefe/tests/w2v_test.kv b/tests/w2v_test.kv similarity index 100% rename from wefe/tests/w2v_test.kv rename to tests/w2v_test.kv diff --git a/wefe/metrics/MAC.py b/wefe/metrics/MAC.py index 259532e..b4faea6 100644 --- a/wefe/metrics/MAC.py +++ b/wefe/metrics/MAC.py @@ -243,7 +243,7 @@ def run_query( "query_name": query.query_name, "result": np.nan, "mac": np.nan, - "targets_eval": None, + "targets_eval": {}, } # get the targets and attribute sets transformed into embeddings. diff --git a/wefe/metrics/RNSB.py b/wefe/metrics/RNSB.py index 6a0af56..0da3f1a 100644 --- a/wefe/metrics/RNSB.py +++ b/wefe/metrics/RNSB.py @@ -542,7 +542,6 @@ def run_query( "query_name": query.query_name, "result": np.nan, "rnsb": np.nan, - "score": np.nan, "negative_sentiment_probabilities": {}, "negative_sentiment_distribution": {}, } diff --git a/wefe/tests/test_metrics.py b/wefe/tests/test_metrics.py deleted file mode 100644 index 757558f..0000000 --- a/wefe/tests/test_metrics.py +++ /dev/null @@ -1,327 +0,0 @@ -"""Metrics Testing""" -import numpy as np -import pytest -from gensim.models.keyedvectors import KeyedVectors -from wefe.datasets.datasets import load_weat -from wefe.metrics import ECT, MAC, RIPA, RND, RNSB, WEAT -from wefe.query import Query -from wefe.word_embedding_model import WordEmbeddingModel - - -@pytest.fixture -def model() -> WordEmbeddingModel: - """Load a subset of Word2vec as a testing model. - - Returns - ------- - WordEmbeddingModel - The loaded testing model. - """ - w2v = KeyedVectors.load("./wefe/tests/w2v_test.kv") - return WordEmbeddingModel(w2v, "word2vec") - - -@pytest.fixture -def weat_wordsets(): - return load_weat() - - -def test_WEAT(model, weat_wordsets): - - weat = WEAT() - query = Query( - [weat_wordsets["flowers"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant", "Unpleasant"], - ) - results = weat.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" - assert isinstance(results["result"], np.number) - assert isinstance(results["weat"], np.number) - assert isinstance(results["effect_size"], np.number) - assert results["result"] == results["weat"] - assert np.isnan(results["p_value"]) - - results = weat.run_query(query, model, return_effect_size=True) - assert isinstance(results["result"], np.number) - assert isinstance(results["weat"], np.number) - assert isinstance(results["effect_size"], np.number) - assert results["result"] == results["effect_size"] - assert np.isnan(results["p_value"]) - - results = weat.run_query( - query, - model, - calculate_p_value=True, - p_value_iterations=100, - p_value_test_type="left-sided", - ) - - assert isinstance(results["result"], np.number) - assert isinstance(results["weat"], np.number) - assert isinstance(results["effect_size"], np.number) - assert isinstance(results["p_value"], (float, np.number)) - - results = weat.run_query( - query, - model, - calculate_p_value=True, - p_value_iterations=100, - p_value_test_type="right-sided", - ) - - assert isinstance(results["result"], np.number) - assert isinstance(results["weat"], np.number) - assert isinstance(results["effect_size"], np.number) - assert isinstance(results["p_value"], (float, np.number)) - - results = weat.run_query( - query, - model, - calculate_p_value=True, - p_value_iterations=100, - p_value_test_type="two-sided", - ) - - assert isinstance(results["result"], np.number) - assert isinstance(results["weat"], np.number) - assert isinstance(results["effect_size"], np.number) - assert isinstance(results["p_value"], (float, np.number)) - - -def test_RND(model, weat_wordsets): - - rnd = RND() - query = Query( - [weat_wordsets["flowers"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant"], - ) - # test with euclidean distance - results = rnd.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert isinstance(results["result"], np.number) - assert isinstance(results["rnd"], np.number) - assert isinstance(results["distances_by_word"], dict) - assert len(results["distances_by_word"]) > 0 - - # test with cosine distance - results = rnd.run_query(query, model, distance="cos") - - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert isinstance(results["result"], np.number) - assert isinstance(results["rnd"], np.number) - assert isinstance(results["distances_by_word"], dict) - assert len(results["distances_by_word"]) > 0 - - with pytest.raises( - ValueError, match=r'distance_type can be either "norm" or "cos", .*' - ): - rnd.run_query(query, model, distance="other_distance") - - # lost word threshold test - results = rnd.run_query( - Query( - [["bla", "asd"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant"], - ), - model, - ) - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert np.isnan(results["result"]) - assert np.isnan(results["rnd"]) - assert isinstance(results["distances_by_word"], dict) - assert len(results["distances_by_word"]) == 0 - - -def test_RNSB(capsys, model, weat_wordsets): - - rnsb = RNSB() - query = Query( - [weat_wordsets["flowers"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant", "Unpleasant"], - ) - results = rnsb.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" - assert list(results.keys()) == [ - "query_name", - "result", - "rnsb", - "negative_sentiment_probabilities", - "negative_sentiment_distribution", - ] - assert isinstance(results["result"], (np.float32, np.float64, float, np.float_)) - assert isinstance(results["negative_sentiment_probabilities"], dict) - assert isinstance(results["negative_sentiment_distribution"], dict) - - query = Query( - [ - weat_wordsets["flowers"], - weat_wordsets["instruments"], - weat_wordsets["male_terms"], - weat_wordsets["female_terms"], - ], - [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], - ["Flowers", "Insects", "Male terms", "Female terms"], - ["Pleasant", "Unpleasant"], - ) - results = rnsb.run_query(query, model) - - assert ( - results["query_name"] - == "Flowers, Insects, Male terms and Female terms wrt Pleasant and Unpleasant" - ) - assert isinstance(results["result"], np.number) - - # custom classifier, print model eval - results = rnsb.run_query(query, model, print_model_evaluation=True) - - print(capsys.readouterr()) - captured = capsys.readouterr() - assert "Classification Report" in captured.out - - assert ( - results["query_name"] - == "Flowers, Insects, Male terms and Female terms wrt Pleasant and Unpleasant" - ) - assert isinstance(results["result"], np.number) - assert isinstance(results["rnsb"], np.number) - - # lost word threshold test - results = rnsb.run_query( - Query( - [["bla", "asd"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant", "Unpleasant"], - ), - model, - ) - assert np.isnan(results["rnsb"]) - assert np.isnan(results["result"]) - assert isinstance(results["negative_sentiment_probabilities"], dict) - assert isinstance(results["negative_sentiment_distribution"], dict) - assert len(results["negative_sentiment_probabilities"]) == 0 - assert len(results["negative_sentiment_distribution"]) == 0 - - # test random state - query = Query( - [ - weat_wordsets["flowers"], - weat_wordsets["instruments"], - weat_wordsets["male_terms"], - weat_wordsets["female_terms"], - ], - [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], - ["Flowers", "Insects", "Male terms", "Female terms"], - ["Pleasant", "Unpleasant"], - ) - results = rnsb.run_query(query, model, random_state=42) - - assert ( - results["query_name"] - == "Flowers, Insects, Male terms and Female terms wrt Pleasant and Unpleasant" - ) - assert isinstance(results["result"], np.number) - - -def test_MAC(model, weat_wordsets): - - mac = MAC() - query = Query( - [weat_wordsets["flowers"]], - [ - weat_wordsets["pleasant_5"], - weat_wordsets["pleasant_9"], - weat_wordsets["unpleasant_5"], - weat_wordsets["unpleasant_9"], - ], - ["Flowers"], - ["Pleasant 5 ", "Pleasant 9", "Unpleasant 5", "Unpleasant 9"], - ) - results = mac.run_query(query, model) - - assert ( - results["query_name"] - == "Flowers wrt Pleasant 5 , Pleasant 9, Unpleasant 5 and Unpleasant 9" - ) - assert isinstance(results["result"], np.number) - assert isinstance(results["mac"], np.number) - assert isinstance(results["targets_eval"], dict) - assert len(results["targets_eval"]["Flowers"]) == len(weat_wordsets["flowers"]) - # 4 = number of attribute sets - assert len(results["targets_eval"]["Flowers"][weat_wordsets["flowers"][0]]) == 4 - - # test metric with a target set that loses more words than allowed. - query = Query( - [weat_wordsets["flowers"], ["blabla", "asdf"]], - [weat_wordsets["pleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant"], - ) - results = mac.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert np.isnan(results["mac"]) - assert np.isnan(results["result"]) - - -def test_ECT(model, weat_wordsets): - - ect = ECT() - query = Query( - [weat_wordsets["flowers"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant"], - ) - results = ect.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert isinstance(results["result"], np.number) - assert isinstance(results["ect"], np.number) - - # test metric with a target set that loses more words than allowed. - query = Query( - [weat_wordsets["flowers"], ["blabla", "asdf"]], - [weat_wordsets["pleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant"], - ) - results = ect.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert np.isnan(results["ect"]) - assert np.isnan(results["result"]) - - -def test_RIPA(model, weat_wordsets): - - ripa = RIPA() - query = Query( - [weat_wordsets["flowers"], weat_wordsets["insects"]], - [weat_wordsets["pleasant_5"]], - ["Flowers", "Insects"], - ["Pleasant"], - ) - results = ripa.run_query(query, model) - - assert results["query_name"] == "Flowers and Insects wrt Pleasant" - assert isinstance(results["result"], (np.float32, np.float64, float)) - assert isinstance(results["ripa"], (np.float32, np.float64, float)) - assert isinstance(results["word_values"], dict) - - for word, word_value in results["word_values"].items(): - assert isinstance(word, str) - assert isinstance(word_value, dict) - assert isinstance(word_value["mean"], (np.float32, np.float64, float)) - assert isinstance(word_value["std"], (np.float32, np.float64, float)) From 70c1807460caf5787c17b8d3b47fb66c5f4595aa Mon Sep 17 00:00:00 2001 From: Pablo Badilla Date: Sun, 31 Jul 2022 13:40:10 -0400 Subject: [PATCH 07/25] End of metric testing improvement --- tests/conftest.py | 22 +++++- tests/metrics/test_MAC.py | 1 - tests/metrics/test_RIPA.py | 22 +++++- tests/metrics/test_base_metric.py | 125 ++++++++++-------------------- tests/test_datasets.py | 1 - tests/test_preprocessing.py | 1 - tests/test_query.py | 1 - tests/test_utils.py | 1 - tests/w2v_test.kv | Bin 16843925 -> 0 bytes 9 files changed, 78 insertions(+), 96 deletions(-) delete mode 100644 tests/w2v_test.kv diff --git a/tests/conftest.py b/tests/conftest.py index e069de1..4a47416 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,10 @@ +"""Test configurations and fixtures.""" from typing import Dict, List import pytest -from gensim.models.keyedvectors import KeyedVectors from wefe.datasets.datasets import load_weat from wefe.query import Query +from wefe.utils import load_test_model from wefe.word_embedding_model import WordEmbeddingModel @@ -16,8 +17,7 @@ def model() -> WordEmbeddingModel: WordEmbeddingModel The loaded testing model. """ - w2v = KeyedVectors.load("./tests/w2v_test.kv") - return WordEmbeddingModel(w2v, "word2vec") + return load_test_model() @pytest.fixture @@ -69,6 +69,22 @@ def query_2t2a_1(weat_wordsets: Dict[str, List[str]]) -> Query: return query +@pytest.fixture +def query_3t2a_1(weat_wordsets: Dict[str, List[str]]) -> Query: + query = Query( + [ + weat_wordsets["flowers"], + weat_wordsets["insects"], + weat_wordsets["instruments"], + ], + [weat_wordsets["pleasant_5"], weat_wordsets["unpleasant_5"]], + ["Flowers", "Weapons", "Instruments"], + ["Pleasant", "Unpleasant"], + ) + + return query + + @pytest.fixture def query_4t2a_1(weat_wordsets: Dict[str, List[str]]) -> Query: query = Query( diff --git a/tests/metrics/test_MAC.py b/tests/metrics/test_MAC.py index c8091b7..5850fc3 100644 --- a/tests/metrics/test_MAC.py +++ b/tests/metrics/test_MAC.py @@ -67,4 +67,3 @@ def test_MAC_lost_vocabulary_threshold( assert np.isnan(results["result"]) assert isinstance(results["targets_eval"], dict) assert results["query_name"] == "Flowers and Insects wrt Pleasant and Unpleasant" - diff --git a/tests/metrics/test_RIPA.py b/tests/metrics/test_RIPA.py index 86ae315..749d476 100644 --- a/tests/metrics/test_RIPA.py +++ b/tests/metrics/test_RIPA.py @@ -1,17 +1,20 @@ """RIPA metric testing.""" +from typing import Any, Dict + import numpy as np from wefe.metrics import RIPA from wefe.query import Query from wefe.word_embedding_model import WordEmbeddingModel -def test_RIPA(model: WordEmbeddingModel, query_2t1a_1: Query): +def check_RIPA_result_keys(results: Dict[str, Any]): + assert list(results.keys()) == ["query_name", "result", "ripa", "word_values"] - ripa = RIPA() - results = ripa.run_query(query_2t1a_1, model) +def check_RIPA_result_values(results: Dict[str, Any]): + # note: this checking only applies when the result is not np.nan. + assert isinstance(results["query_name"], str) - assert results["query_name"] == "Flowers and Insects wrt Pleasant" assert isinstance(results["result"], (np.number, float)) assert isinstance(results["ripa"], (np.number, float)) assert isinstance(results["word_values"], dict) @@ -21,3 +24,14 @@ def test_RIPA(model: WordEmbeddingModel, query_2t1a_1: Query): assert isinstance(word_value, dict) assert isinstance(word_value["mean"], (np.number, float)) assert isinstance(word_value["std"], (np.number, float)) + + +def test_RIPA(model: WordEmbeddingModel, query_2t1a_1: Query): + + ripa = RIPA() + + results = ripa.run_query(query_2t1a_1, model) + + check_RIPA_result_keys(results) + check_RIPA_result_values(results) + assert results["query_name"] == "Flowers and Insects wrt Pleasant" diff --git a/tests/metrics/test_base_metric.py b/tests/metrics/test_base_metric.py index a3ca7bd..007c7e4 100644 --- a/tests/metrics/test_base_metric.py +++ b/tests/metrics/test_base_metric.py @@ -1,99 +1,59 @@ import pytest -from wefe.datasets.datasets import load_weat from wefe.metrics.base_metric import BaseMetric from wefe.query import Query -from wefe.utils import load_test_model from wefe.word_embedding_model import WordEmbeddingModel -@pytest.fixture -def simple_model_and_query(): - test_model = load_test_model() - weat_wordsets = load_weat() - - flowers = weat_wordsets["flowers"] - insects = weat_wordsets["insects"] - pleasant = weat_wordsets["pleasant_5"] - unpleasant = weat_wordsets["unpleasant_5"] - query = Query( - [flowers, insects], - [pleasant, unpleasant], - ["Flowers", "Insects"], - ["Pleasant", "Unpleasant"], - ) - return test_model, query, flowers, insects, pleasant, unpleasant - - -def test_validate_metric_input(simple_model_and_query): - - # only for testing, disable abstract methods. - BaseMetric.__abstractmethods__ = set() +def test_validate_wrong_metric_inputs( + model: WordEmbeddingModel, query_2t2a_1: Query, query_3t2a_1: Query, +): + # Create and configure base metric testing. + # disable abstract methods. + # instance test metric + BaseMetric.__abstractmethods__ = frozenset() base_metric = BaseMetric() - base_metric.metric_template = (2, 3) - base_metric.metric_name = "Example Metric" - base_metric.metric_short_name = "EM" - - ( - test_model, - bad_template_query, - flowers, - insects, - pleasant, - unpleasant, - ) = simple_model_and_query - - bad_template_query = Query( - [flowers, insects], - [pleasant, unpleasant], - ["Flowers", "Weapons"], - ["Pleasant", "Unpleasant"], - ) + base_metric.metric_name = "Test Metric" + base_metric.metric_short_name = "TM" with pytest.raises(TypeError, match="query should be a Query instance, got*"): - base_metric._check_input(None, test_model, {}) + base_metric._check_input(None, model, {}) with pytest.raises( TypeError, match="word_embedding should be a WordEmbeddingModel instance, got*" ): - base_metric._check_input(bad_template_query, None, {}) - - bad_template_query = Query( - [flowers, insects, insects], - [pleasant, unpleasant], - ["Flowers", "Weapons", "Instruments"], - ["Pleasant", "Unpleasant"], - ) + base_metric._check_input(query_2t2a_1, None, {}) + with pytest.raises( Exception, match="The cardinality of the set of target words of the 'Flowers, Weapons and " "Instruments wrt Pleasant and Unpleasant' query does not match with the " - "cardinality required by EM. Provided query: 3, metric: 2", + "cardinality required by TM. Provided query: 3, metric: 2", ): - base_metric._check_input(bad_template_query, test_model, {}) - - bad_template_query = Query( - [flowers, insects], - [pleasant, unpleasant], - ["Flowers", "Weapons"], - ["Pleasant", "Unpleasant"], - ) + base_metric._check_input(query_3t2a_1, model, {}) + with pytest.raises( Exception, - match="The cardinality of the set of attribute words of the 'Flowers and Weapons " - "wrt Pleasant and Unpleasant' query does not match with the cardinality " - "required by EM. Provided query: 2, metric: 3", + match=( + "The cardinality of the set of attribute words of the 'Flowers and Insects " + "wrt Pleasant and Unpleasant' query does not match with the cardinality " + "required by TM. Provided query: 2, metric: 3" + ), ): - base_metric._check_input(bad_template_query, test_model, {}) + base_metric._check_input(query_2t2a_1, model, {}) + + +def test_validate_old_preprocessor_args_inputs( + model: WordEmbeddingModel, query_2t2a_1: Query, +): + # instance test metric + BaseMetric.__abstractmethods__ = frozenset() + base_metric = BaseMetric() base_metric.metric_template = (2, 2) - query_ok = Query( - [flowers, insects], - [pleasant, unpleasant], - ["Flowers", "Weapons"], - ["Pleasant", "Unpleasant"], - ) + base_metric.metric_name = "Test Metric" + base_metric.metric_short_name = "TM" with pytest.raises( DeprecationWarning, @@ -103,7 +63,7 @@ def test_validate_metric_input(simple_model_and_query): ), ): base_metric._check_input( - query_ok, test_model, {"preprocessor_args": {"uppercase": True}} + query_2t2a_1, model, {"preprocessor_args": {"uppercase": True}} ) with pytest.raises( @@ -114,7 +74,7 @@ def test_validate_metric_input(simple_model_and_query): ), ): base_metric._check_input( - query_ok, test_model, {"secondary_preprocessor_args": {"uppercase": True}} + query_2t2a_1, model, {"secondary_preprocessor_args": {"uppercase": True}} ) with pytest.raises( @@ -126,8 +86,8 @@ def test_validate_metric_input(simple_model_and_query): ), ): base_metric._check_input( - query_ok, - test_model, + query_2t2a_1, + model, { "preprocessor_args": {"uppercase": True}, "secondary_preprocessor_args": {"uppercase": True}, @@ -135,18 +95,15 @@ def test_validate_metric_input(simple_model_and_query): ) -def test_run_query(simple_model_and_query): - - # only for testing, disable abstract methods. - BaseMetric.__abstractmethods__ = set() +def test_run_query(model: WordEmbeddingModel, query_2t2a_1: Query): + # disable abstract methods. + BaseMetric.__abstractmethods__ = frozenset() base_metric = BaseMetric() base_metric.metric_template = (2, 2) - base_metric.metric_name = "Example Metric" - base_metric.metric_short_name = "EM" - - test_model, query, _, _, _, _ = simple_model_and_query + base_metric.metric_name = "Test Metric" + base_metric.metric_short_name = "TM" with pytest.raises(NotImplementedError,): - base_metric.run_query(query, test_model) + base_metric.run_query(query_2t2a_1, model) diff --git a/tests/test_datasets.py b/tests/test_datasets.py index 73a2f7e..32833e0 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -180,4 +180,3 @@ def test_load_gn_glove(): for word in set_: assert isinstance(word, str) assert len(word) > 0 - diff --git a/tests/test_preprocessing.py b/tests/test_preprocessing.py index 625cf22..a66f3cc 100644 --- a/tests/test_preprocessing.py +++ b/tests/test_preprocessing.py @@ -666,4 +666,3 @@ def test_threshold_param_on_get_embeddings_from_query(caplog, simple_query, mode embeddings = get_embeddings_from_query(model, query, lost_vocabulary_threshold=0.5) assert embeddings is not None - diff --git a/tests/test_query.py b/tests/test_query.py index 436ea4d..6338a17 100644 --- a/tests/test_query.py +++ b/tests/test_query.py @@ -342,4 +342,3 @@ def test_wrong_target_and_attribute_sets_and_names(caplog): ["Flowers"], ["Pleasant", "asdf"], ) - diff --git a/tests/test_utils.py b/tests/test_utils.py index b081085..59c4d56 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -528,4 +528,3 @@ def test_correlations(queries_and_models): ] ), ) - diff --git a/tests/w2v_test.kv b/tests/w2v_test.kv deleted file mode 100644 index e4b9d2a54c31899a7bc13dc0c2960a2c6d61e2fe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16843925 zcmX`!2e=kR)-B+JL={j36QUr=Kt@H8^L?lY1`H@FCQOKm0TEQhNX}6Z3`(&N*it(->zQ!+qEE{P%yJb=Tf&@2ak@+Ev~CamG8k))miG6#qB9>8XQ)Q z$Ft5pdGMJdI-W6j)ZmlPA3W&XvxkqEH@(S#vxlGD^@zcP`bNh8mFLdeVa&Ws=50T{ z?*C8Rbm)0!4I9;Q(AmQWcRcI7GtV7z;_%@oj+!^U{_(>H4?273h;xUZH|X4XbLUO3 z{ollS)9VgBxk#BiuW!YKdA(|!G;h+pL+5SZx5k05bv>|sjfwM4TYq}(lg}MBY;c;b zcgoK5rc5p}YEG|FIq#r@4%+{3|M%bjr*Fl%^G=$w*RVd{xvBN5Dk@H_uBcd2Sy533 zZs-32Lus>Ndby&aA9OEOR2+c&h;@@U3{Qa@=?}pw_P_8`Ybq)}!i&~cR8-M!(Z^rx z9r0rNSlSPG16&U-)7M0?P2k9KMaAcG_r-(w_1I0>GuczL843H#Kb6*pJ)Yj4UB=hI zziHpYwN(`rd*R0)sHkX;f8p1V_W-`1zpLCXa56N9hA<5F6+fw5Wf}9^< z-Tf66Yf8nutdcVVmhx-Ld5XQeHtQS1zPnpT`Ss}a^wE;O4f{}bBWMM=*6A>|+B(4F z`WeOF6XwdFN;`r*hMoBuBA)sE8F%LQ5PO8*lf4~2j9v?W%kPBm3)8-YWB4bRi}l-x zz6tvl`Yd{1_BQYYeFD^`eSvq!1LV98xsOL_Z6NcL+NuG|#jbSU73`g=D=J#EZd%3;Xd}vJ1*s{JoA=my$P5dObl~`(XA9m(= zI{z7COdXD7kANlOxsS`C30%y-5Ej53d6#ONS{TP(U;J>$9PP$GP|md2w7c;ew1Mo0 z;5+ts+H253Ug~O^YiB>Lf?wTpwYptGKS2CT`@WraZTYF~Yv304AlScT9&ktT$?S1B zYjr5?W!SE)4(KPav!@&LQzu!2y`eFEG|sh7rk{^5Dpgdx3C-nhiGMd|Bk8sHC-axe zyAeOXwxZ%LdTn~<^Jv;6$R0U~Kb)O4Z_YmeFW0UW-e0?kwAAt}IoVs=icNw_cJ}1a z?Ada1ww?qtA!qs#w9Zh??*}=HTIlCgvCQ8ERYl!Dgx{cjLz~PV4>|u*?;o%;KT}II zyqUZeYm4!oNc*>%nns^a`^>%P-cl#K;0vtF$6{ym*WkC|H>d*_L!(l$-bd+k1iOYd z>+mg*+S;_NFPyWb(sd5SIeUI}oev@XJP%oq)viAS=gexq( zejfU=e}-N4IRa<@eWRZ|kFsCxF|PAV){#~V?=3zO?<=+~q<;6q6L8jetTCpxR#p`? zP{w6`_U8BSn|{VX_VE(q-TnYNQBLOMUflhET$4SIof^*i zAH~nS4}qTgy^KDPb}F8ZtMs4oFJ(Um3yd?hyi;Yd&+lh%qMu=<;w<@8{zcLBHxJ(| z?`7Jx;$QMNpy#=C8@qy?`se}Y(vH$kXIe{qAb(SK_Rr3)IaAJTdd}SmwDa_v`_EdO zC$@^;3{T{5i2rXL@($9J)&}pYT}}3U@fA3Aau__^z1Y8F+289kHB`ZF$sVZP0{kv* zKU$vi_ZUyckXqdzTGL0$dAd?fv+p&JT@#bji6>0jgP`QzAa@KpYjCFireK5!iULt66U-)K$_6@Q(d^RKBk zU-7fQr{Sw$Eqf1o{H)a?7{lL;pEbD}=IZxI+9cX2epCJKN?XnU9xjEe_!G)%3V+QX zTxG2AF8_Z1FWSswSL!EcU&fJj$@@mmkvt31&m-))mDYjZQ{H6Q0x}mzi+w0(25y0~ zuTm51!G-+H@gtD4G<*MQ_9g7u?7H~Ywbqn<7v9)-4y0wBuYq0km2-Xudnmg~>^t`P zw147%XtN!CG<4+mfYz{OwewIcXZSl+o`Z?uo#oxh-hurB9uGBWsjW)(&yagP3Fgw* z;Ys?M^gliZcY!>=t8i!iUz}K}sKXZQ{*Y%zg?mZ8)HaUn&->^*%gg;A%+ERU0A$~e z7t1|eqs>S7A~~Pwdu+Mb|EaaiMf&|g>>hsRd9}WuW#{>tI?7yU-{+owV(0nrTH^dp zIPI6YK6|yCPvB|VW3)@O%NlPiw*x#x>w=fdO|5+mFR@$GqAy`T zqy4$;=IqqsI5?+V?9rpycUE~P62Aps0eQak79RtL(HE79voEzXxkp9CE4a3tPjJ?2 zYdNX6yyI?%r^1P}M{z}!Jwoq?ABD#32k8HBjvUXv-1t8-=5`ObXL@5f;kR(&6U6tc zb|&$2UhM8)$kQ`BX*+Gu<1 z+Twm&(59RBe1EKzyG(2Wz6}P$VeT>K;vd#G-({0w3O%(xn>~!4`=3j@r=+I%SKzn# zf5VaNJUfnHx5epaV|4?8+Z;z!;f%8 zWpNf}{(5?r*3bB~&oxtnHDD-vYy1S{ovAjzj8ike(caVM0QO1ng#HH7c4yCIw_(o` zZ^R$U&lY>@Jq^a!^G8;B?yzryXP`Sj=lIvKqiY9q>`wCTl$Y_&qxXf>&j)xi?k9e~ z@#J0QFrm z^Xn@;6WFPPMRGH?D#%{`1m6o?VBeB!%XyN%m_Gpq=yN>1ul6}-(qHBy>px!XI=ltW z**aYQF3`pGm*U)iYJUuFqfO?v7W)^R_)+Y4==JYf$X>SA{PBO~_ra;bHs#{X zJA{9aoIG=xC= zr2VVx3aACGY3s|G%C3n|=eNQKx>ojL&d=<%K~UTMcEVSQU(lnd-#OxWSKo-1Gb}aT znSC@~2$#WQRchB5vQI8$?=5dS`(^fpkahW&>*f5p4sI9w1pbC0aBY=q&{IGEVm}Lc z=UWQ1Xqm6IkTYTnIIy~SKmU!LTDn%=q>^_9_Kolk{AfIRZrw;fNqiT)8T}4+>Nn4j zZSZsWK4=fQ@7J~a+r9r=>`MNY_zf7f*8btAW-`_h)y4BG^V1j}F8RizT?1e8-*ATi zLVJNdN!wNQX=QC`>Gx53AJ~wcTA#>1N8X+|=RmuXGo)JI#*;nRopy~_1H2J^F+Jbp z@6&!3?*qMHj@V>5SF-aQKb^g@s;K!quP$(n?Ds1m@88G5DE5Uo?|Du2`MR79IZ%UK<9}zovhMcjspg zrs1qnQ(E>)ds_N9o|d*l*u7}8#qxap3}>zf;$QsM)=;c9J7-kRw%x=ohuO~AnpNi2 zc(>-?z|MX5F(;kM#aU5bY#jSnvHxg)JNq8I(7m@{KP{d$K2@6)csslieZ4AmU0r-r z)RJ45HdA~U+(>^9XUt{%J^w;{G_--t>0CSr_k!%7jjHIh)X%-_?1fKo*8UPX**iH) zD`2v@&b+2pws7ryt7X2_AHmY+CkRq|cRjF~7e2Lt!)6+Bni*TXVLaoPM+ld3jguLO)P`#&HNg z&z=tKYM8@+SiFLr+D)DH=l8`~&&>09{xY%WAm?jqT1VG81Yap9b=(vWn#5v{ur^`?>gEPby1H#m9{VLhxd|yH9Ix^iTp?Ssf*nABKqTy^~iZqO&^03&pXON z@~7eh#cwvImN?fuRy^l^wOG!Vjxdfs3Z}0u_Vx-nt>{_b9j(tR@^fzkl8@)`_YoTb zx3H5xm;QnF`R2Ney&PwLn$f1Sb9OGECojK$=4{FR-5~b8eou#;#ZH9t*r~UbVww9E zw2S4YzV9j*_i&xqPw=Vu6j&uEYn1Pr9oXB`y5LvvTUFLT>`t+r*SZ#7Mr$Pg4}3jj zOqZ96-wsycGsW(}*K79zKAL|doT6_nWpTUu58wpWSU(TCH< z;|>|nXs2Oqv?m@ z8T=N$;g-2x*0dJw+fs3+CwCn`&w*3fgN(l!KlQ$q{I?+MHbv|~I9O~~xlL$$!C{HP zV6i2*4m>5ET0V!Jv->8oUuoH6f3oM|(fWN3#=!P+wwJpBY|U;5z2uFTw>!Hlw5@i} za&zC^_&gPh;_3&d986ZjSE)X#L=Xb3? z#B+al)9w*FnSBvteg@&J=Qw)udea(cm$mQC-X2D>x5nwalUVBKZFc5wf_4M#q0GUt zumpzD{&o+S!U1w}-JGFqa5bFhjK18st|%3?k!xi=hvDp%F*y5VJbO2N_m_KHx%eJC zSf5?R+RI68rCvtkYW~Oc%w+{FXV3)pUHC_R9LP@2mv{j*gZDB9m?@rG{F$9PyIEdc zeWqSI@i&AUX{zYeN0-}>8Dd=GwVKKuM5`st9f_kH{iXh@$2Pn669oGMlm zXP&mh*F)CqCi?I0V=#L?_9OJv+xs}bqtx`BbqW0jI6&Te^i?pOe~R~$ybH8oFQ;{` zD$dTsaUX3nuF7(8u4g~zoW4e0>OS{&H6DN`;_vAzAJ~!^LmKUx;_X3;3JiiE>g8J^9b# zA9|=0_89hX@vpVdeC8g`rCp}|G&qf(dv8Y{OHWPb`Q3wmFWgmK)XZzNPV9_n2!2!D zk7u`)lRYw=T?;>7D&B`)$DL^V;@e&4662{ScLwcG=)mqIR!^I)p|$wKv_04-okNt*t&WF_2NhRaN zsj=30PyQIZ1w13??ASPauro}D+L&BUYI36b)Fi(nf)QXHhVjGik)}l?ZtcYyTVm#i*vLo{T2Pa zBPZu{e|FyY&XAkFrt*)Gn>{`gx|Y1pTbnWBAMkVEi|}6hdCv1QbJt5O^|zFrTI>T= zFib!7anAcO_+>dm;q@x@pv`FZ#`sRLb;i(*-G_D*$dWhjFwlt1q#P z^JUx*ze#^u``km$!@2Ya< ze7m+`_h+w!oB_}Af5G|ptUvH|@_XQ$VS#Z(Yb@t9eIKq(&iT~9o%T^{xp^;Y1gV+v zVmGmOVz+}0`KhU0w4EV$L-()&zav~AZz*0$Z-Ylw7vBlL@Q>zKX_IsHOq^?sU~d9% z>f>4d?Q$C9nc}CiGmlqmyO1`SowKKgSmq_~M*n>_@oVxotuFr7;63)1?r{>m4Li?` zJ+ynb#D~Kxy|?34kp0--xzY>gce~%U`KHQqgTE603vPq4^smHsWZx(DIsZfUhNWV@ z^Bq>RY`vi&tuMS!drVIDT<-ZKcIG;Bmh=4}TJAOD-J700ly$9Ozb2kq9b0MM+)G>b zKdfcmi*{jug2&3;Mebyr_w$^!$MCaP*0Gyu+kxGTmcH`7)Rnyt{x!awN1-!qU#LgR znRo_X47vCG`%2dIMDgb#d#ysOik_Mo3EM#KA!l&|$hG?6>1yc&oVETxV$g)@e=+J69D+1<3+4L8K?%f)ZFm+w}*Tdv^uhSb_E)kXcE#NHPMy7mpU zywwyZABrRU{jKYzg=gA>oby;8f(UG~gK?V7Mx;>>#u zTAt4rvvaP!#^0w}PGxb1&t^9l&wWjR3)orzoF8xE3-FKl9at##C!Pp1=>OKocv}AL zVGo==vmjmGeL}tivJYO zeC`8}h}{B9;Q^=z|D^w`RMh9O{Ijcz_mlkXao+9H_FeG<#9lG(JU2^ndc!<^#VOY}dFCkXDz*D% zZBYj~2M>oWeP1l6cNC8{%k>_S^A~L-yD9Byu{+p<*=5|5pZ&0qooDf%ka}+|7&zwjF+zpYy0YJ{y0GyUESB?X_|~g{}E-i?!*gHrTas>NIEOf5g+K5_12k&72K; z!u#+Y<;5XWqwQc$L>p!ul%g$A}9P<8>{r{xgo5c>q>uGZ_?N>OHR>Ggs zUtp)-YPImMvKTFO_5m$x_aysP_97UkP0rxd$MZ0Yb~?W{{)V4(b`$nkd3hgO%x}=M zn44Gl56JyVd^P=h7%!GH;z+TK;coE-kUjbneH<+{o_(}8q*k*w`-)%4{)6@kJ|Fg> zyiU;7WaN_MrQrT><+T zOU|yfVwb~%v@78y+GYHn?2Xxz^}9JgwUwIbOW&XUnmK-ho%MTB-hB4?uo={%SOZ2-BZdFyBx4W?4c3;0#7xkPu8NhBKekQvEd#USGx5I&8OKO? zDjM7bx5E7D;(Xjl+mZYV*IZ2d7d*;dVgA?C$IW8d6JyX)d;JD}*19WX zzs#X!9I5*m>_gb6L&kK3YYoBoz(nmw(NhPz$*Tu-#5aJvk9{xJU0&XeQ=f<9vuU~4 zeewH{JusdACEoOZ*thI`oc%Ul0sUcXe%5V0_9g5KOU3&{&iT~%Y}x|%_W}K7_F3>R zcCDU8z5P;lw$N^)O=GWQrzTI<_CNpA?xycT+XBBzPc3DxFQsiJ=V$$|<^KZ)!TWL+ z(ccz-h@Tqh?|R4Jyb~NjzX3mJpOt8@!S1w6w97es2^`0N7N_R!W3OZ9OgIPn(f)#* z_512KXWszv6Ja5o1lb?0tBZd}`ij4!yjA#CwK{>Gddyn2XTJowpLP#=u50%NJAVVe zx7=}J$HG!tYOJyN0sPVIE>+fAzj=<$p#7k(^6y&B+4&o$O~mppaDp~-3d}DGp)h8NMN0FP^dH+c@urU-L)d-1Bnw0Q@vu2gzT* zT>Q?~3eThcE@w9VDt;$^##58shkY#jW_&RJTHF)r$$yuh`^uVU?`FJHp_)Ate%J0s ze%5TD{6FAy_M`ON-wdct%lUI1JLg9QyDuEV&Ts7P#6RYzzvtL9%;6gL#${{9{t|C2 z_XfOTs-I5_PI7K??%}tO=#KE zd(-aW&%?Q=)czQKZK7?iw+UPz?_qP{*hju=UgvI=3^iSpXVy|RRpx;(yUHA|1 zb7rJw|IN?(bcEdBxA3|-nL^LI-AAS3d;ClOXy_s@=j6ZX8N;@)Gdq3f@8>JU&k*ko zv+0A4VH7)iVGhpw-b>};8zFy3bzKkdL;Sul3?7%?wc6PwKW9tya%C}xpE?t=?-%nY z<0GLibbxO1r|R!#v8McXINvhcRoj1JnV(wpzi`&BBV1Qi^xK+VO&>=)nY|Y@-o5xX z%=~8_Q~&MRweUW2yXpHgem{OIerx<0jFfwkelu6s(DVG-p}NSS2UDL9!kN&S*0`+J;dMEi(T3Bm;ODvW-)956xxQ+_ z=Ij;pJLF`qFT(h_nXaC-XPo@2gFXp#kk7eJ_{)hfwFBQMpeaFsR9xC2fJZI1K#`Q1vlC?ey9}Yi= zO@^#n&VlH;mo@xDwNJe@f}hy=zQ2>6eKdo=*f{Ip;o=W!lf5%tY(D=}xn;3yXt}@a zgZ?wFFJipprWWUWmvQ*T;_UzR3ruaGI`_#Bk>jAXX%p>|8 zr_KG48p}BIp1Cc1Td_+1j_muiJw{IIV>_JtSi#QzxC8R6XjUrr$7H+&))T)=EaRQZ zzaQt^n1?I)ABfMwsm+{8N8mm%mwm45<^AP&*qeVIJ@?lUa{X}DTL`1&?95-PkG8Oib~!EYIG5td*0@AV-CxIFr;pUlV(Wg0oYP=e z*g|X{ykn{Ob}Qinv`Nh@6U!Krvj)EyG#I8)0AgQeNY7v1h(w?@U{tzdOz~c7&_hebOI0`Mcn(%{Y30$lrYA zoPG;7r%#~mCGSYjnOA7TX(MQ}@I?B4><4jgm`uxe-5EIVE)TG;qURpcch>G~v4h!n z(4K-7v}bw0rl)2@UY*xQKh4{hDc zhvH|lYr`Xue+xKDPQQ|OlIr3-ej1+!iKl+1h_!$}U@re?d~cO!AdHunXZQ#>p8YxP zBs>N(Z;!@y-Gj@;|9&ms!#mMtL-x@qe3QJ1#?hRAC;OR_^8t6EwNP`J*XcOtY=N@)L$XVM3?qjExE?}<| z-?F;cw@=}$W!iMo*IN9wG3_ijzwa&;%YJPruLWE#-i*I7w1JCZFzp$6v!Mm7HZPUc z#re}xJm>9iawgJpepd1`7aQWNdES5W-tja)&ynW*J>}&Ylku#_zl=RnJp1!T{`&BM z*t7EH8SgCq8FFgjcj?(5d7f3Vzo4hDmSX+rsmmca>%0(m=g*OswVcA$05_J3IUO!m0h!y2A+ZMhO>oZ0jPHE*E#mpx>Fm*r z?+AF-oMlh_1ICq#GiH{toTr~&;)~hYhmVQneP%VjMPI3pyoY72RznRrOK?lIbgEdM zxj7^9ez=f5AD;?8vEMHhwKs^L_m6J)S8Lduo`0*FjWge0voqGo;;EB+XsNjg{N`fY z;3teB`(Zr$3wHL^-t2v$k^H%E9&FCv3^LBF_dRl#vlodSN1wyK8FF9gKXZPa*v9PV z*mcBEIy#*dxw_2v_Jb!_CD+SS$O1b#k+DpXdWc>Wk@sa#Nco%tHXcKXs zzfIWF@caBj<$MiuXqP};T3IaT-2&JcvIozGyl1@#S>Gn~|Co4_7g6{B(d%IUx{~Q_o7X$Qa_dEnEgFG=e(Ij%lmH5kE7Yw;g(|A z!%J~P_(LD5mF%Cq6J?*~I(h$oN!yLIxgJut-Jvn$zWyPnR=IfRtAjV-A5!Ic0I#x# z(B6-xpPB5I)qV>VdmJ+7&n9k-QtSEsW+^_Sx;Rf>VrMVj51E&XasJyH$(_i5K)cD+ z*!WH>73Xp-diMB_v{P~FbPx86xQ+Z>jAN^`hg)GUIWzE{Z~+V!pDg!Y{p7y#?lG6Y zJI*^uhiYd&WDh+|Z^fR0+l!scPL1!tzXd-Dd7fn6o`%)%BWwyk%g=c@6K73tfUM03 zoIRHF`fzp~+U5A!|HYb$jpDBoAIDzI{swZMoq{{yg<`$gIS-z4KP%k-yKt&lLwHU6 zJpH8>b03GXkAiQ^Pk!6#N6Q(!AFP81p9&FgLPJNT>cGF+jbo!QT@&ysr|yAFGE`d{*g(E8FcZ=2KC z(ih=HYm0Z1Wq1v(9_<`BtZW{<7w5T=eb5GOqi388emv|>pM(FR{fhfl z=^JOi<-R71U&Ve8p3={Cv>uQ>^$0ERrE}Szu(Kbt4ynIoFwFYxg0H6itpAdDXPC^t zfxZER4_spr zZY!2OlbU~sy*~SDIThmb+4s^Xz$q|@{vIr(=ULuJep~(`xlQo7P?KI;Zst5|mj4#X zQS>LFpX(n%KT%G7u{Cf$`v9E1*$PkMHCz;+~98~ zdsP?Dm`iYD7y>1+UtP1Mxo^sUnf+kd+QKdRTq=HFsd&z%W~b0nM=j-E%g$VGOTPg> zDEDD@_VPwJ_mVRv=R-}o{pmFzb2J*yf@NZvg9VVkJ-8ZwCU>4#_U6TMyTb2~zk&M+ z=h=|CslhJGOJ8|snu`A}74PT=@=J0C&>n%2?9XXk*=Mt#X18O1fVYJ!O2zNt`OX-~ z-`*I8v)8!hRI%xDGmbIPh93V+*bjE#KPo44F$D%#&oY0V-0u7h@g~*9?;NR{HtY#- zKKo_y(XMqN4An0C`3*S3b^j<8ztv?=9wBc9&VCq3PYvviN6=GmITL>1f6qRey)({O zJLt1YZoWMphTLDqvj2PaeOtFjOISE{!?g!U0`QdRuj?YH!I_~$@sdJA}ieJiYhoY%+W?8)Qg?#G|+`j_CW@3H#J z`sCkduY|4XIlmh~=5QR&zWAJ-=j?bnz2$rc*^l{+aWFgM&)8pIYah}l@SlWj*r}Zc z`sgE;@zjD9kQ(ePp7TCuLFZEO`&h311b?=^_h9F&&KzY8eq(3OvSzubaqxz`yUNA+ zeveo;v27vGn7;fmxQX@1-pk+Z_Luu81Wci7FUWlqP+{J@bf%bY6@SY%SI%j2M?(Jh zZgiw+587@0@+_f>Dkj)@y~SstNCX{gDZ?>Ykaf#wQ_U5EMlMF9vOi+ahIKvM z4?dyndQgMiTuv?aDt6}gNT|vGkbi!a-*#~J;r;BRaAUDfcrN|F@80w^wA9EckoU^n zXxj(u;W*=d22Y1=%kIg$P8Z{xQ<95M#&zK{_nf`IMqZx1)7T%=>+zSc|H6+~+iUEa z_;dGjjc0Jy_j>+I{G79?|HEmI(5fM4;XCDGzt^GXUHmQnGwc(qjLWrl#XHb5zK7W7 z2A{*AT~8>Ry`NZ~Nq^(*SJ=~463wmRxbADL;BfJ?r{FOkh3KJjlRcdBg8g<%^bYbUt zn*LH3tzm!K^R(OG8}Aw0(&mz#9c9M1FRHRCv=WL)BhX_GO{qz~aA#~z&+ zOr>4VzmNV6`!qa~zY<1j*MdJ@UJJ2x?5*f|Mr6FfnJ|o>b0YIlhyNXP=J$lm!)yAP z!rlZ9=1*N~ujubOc0<~HJRQzpr#{bM?*whx(Qm-nXCLXm0e&C9rF|{P_d@ndo|!}B zJq-(J^JoXK`^&kV9Y6c462{V&%Sk=$Os|7KlAE*d`^w_oD{FEWeYU)uTbq@Ne-pTy zzLcIixrX)%(5PITBNT@5ly6kfLcYgmQXCc1}eHZpA z^rPAP;^)=Nh1$POA6jWm=_BOs3H_lVy$Yi{3M0jG-KWKO700H^U+9ZRK=@)L;I$j#uHCW$)QyEph65Hf=4WCJ&+?g=gZt zuN^~gqTMU-7wrIA33BFc4EICo=L*^|7{kx`@&@GZ8TRDQ&^F_1PODj6Jd++>TddvI z{MuE;b=J{l%lk}T&V(c2CjQxQyPQ0OCgUsh_ai=|+MdVX@$;SA2{z60T~aIX8LhSWG#C$i z@W+Wif&iY1RuaEaHrcn^X}3s!x3Y7#zt7)*y+5py(}7kO9|`NTv;VUP-qC0F#ZK&jV%amp z#qwS?i+?j*0sZCNNZ*$IhOvE&PlS5(4I%UM9ZWWc4yEFot(HE%r=@O|u(Jo(hZX$Q zuG@-sx%d(MOZa&Yi@lt_H9h;}bAEGn&fg2^`3>lB{1YwrlXHHGyqxVB(_v5n>&eOW z^1D>(WdQpFcCPm%Y$RR}dc#WboWD)jTj7myPpE_ophvk_hojaM@8GF{oY|vX^Jjf# z4(90N4d@16R29FePNUz)-%f7&+z_(2%Ctq0ddr+;{vYCZV4o+RnwmvhM$4J8q4&&( z#d6k95jz(CSD;U>5awh+I;xN`pN29wJ6Rmqx)9mF5Y?g=}xGv}w$e~|wl z_mgq$Aiq7k8_v1ErM%cD%bNfl`Az9BLgr{1twMeqIe*ZyAGc2oe$nr8H8DYa8o#;x zDYPkaHk1E2J{RvUmbq^t?>O3>{OgipDr@xudvpCQp=ZC}D*tBoTH0cM_D#m|Pn`4ZW%@nk;u&$N zyko?&S6&v&c{K%U>SF=UneaW{U49jQ7Pe=1!Skxj31nR^g7>S7Z=~C4f5SZMop+zd zT`#piOirFf|7IUS-=I`He{u%a5WAS3y}T0d$G=Fs?D4#>O{bkFZxiiOhgqW!Xq{=D z_)X)pe}FytTZv`APJj;lM(_x`hrBX7@A{*02mXh0E7%vazmt=Do&-5pM~J1rv&6o4 z?brDY>7(S`1pWAji03)K8yreI6yGTKVEnG!O>w(w?d`kdW&ADS9Ow*b-$g&&@fht_ zl#Aa)zTju<g$-t#@unARL}ompay^pX462Qs(!%9$;{CwmFbIgncDC3k+jJcHhR7@BiMk`{51xK-%~CD)~R-+0d|5 ze80A&b%qjuaby9tof$wQ^acW_rf`s z^6aP$<7oNK>?X*2;wRA0{bXJ9?07-WhT2?A&%X)$lb`k*!q=tZoJc+2!cPC+R2KQQ zw3*EQQk&G#X#MA2Q{U^!%ekK4PtNCOuT97M;rfuV->yw+s2+VL?9YFhel=`DpM&Sp zawgQ)cWPmKc{#T;-e1@mQ{GW)^HaBZ56K*54vvJ(`8#sHtSrvb&G?zSTOr>FH;c8Q zWh@imKlGeC+0TdZKjL@h-+}kU4QO|ZJ)^%0yd&&EJ5X#kOimsxd#wTgE_s=!4fq?u zZgO9QE99kaa{Ya+Mb5{qU^mG7_5$3xY;KJ4pZv_hX|!`7bCYxXTXS-kxt}fGkDh;P zeV(5&OoP<%FOarV;da`#{B|&aKkt{>&1J>|s!`EWj2hZcoX>!|FyFZ-2PrQNl5KQFn4z0zqAJ)Tr@DDbI)5Uj$ zXCTj`DYP!)tE=1xJN4O{pMLZHlDQpDyM(_Htkxzq`ZVow{y4aVeQddSwpFn2T3gia z26FR$nwlIVz5tf8pOiBeJ`-OFnfDfuJ~OUwAhnb;{1{r+IQNhm-&Jg>K1Z@olGBTR zEbSjKlbvVDdGz(6E!@xFR4nJ(AKE>{|CC=6%O1-5K1WM^oCK30OYj+eQ(7(e@gaQ5Z$!_!oUhMU*)`d}vIpTu`OWd=^0KaT>6w#*>Dl`O zX^%nPg?6Dgfxr1XXp_A@(0y&pK7)M&%qtc9u)Dn6=TlH$+mFQ-;cIapoO|6GZq+t5 zn)z<8&2X{Y`^EI^qeggr7{lINPWDY-_Vtjz9rdPC;pa2$P> z_BGhK$8qf1w8iXY)yC+ZI%jq6C2KShvd(+pF0{MlX5PPo=H2xrb|iZ|{Y2=mzZLQ_ zzd1`@q1{(iJo8h>heGP_5A9mZ?I7nT+ywt6?>6nWV7Gy+MHhM5Lz%P9aNgfDzRqGD z`CTCIJ!|n<^h?yu)$Cg3;@x2??OOgd{G1Q7O2&X6;5WzJA@h?qssCAU6MrALP`_En zFH71();_hmKW!u|hpf*IFowMZelWg%IOo!9IP<(*yUfS^)+gtFYI0NlrEQdMJQG_%?l1dl9AqAs;@o#P7@$op*ooY%=bSv8ww&FDHbE?PoVgsy{v6t}U&ez= z&I+-k;0^ZowBBN=-OSHiSOVk3^UjlNj=}rlrf@Di=vtGZG5gcC=B2DJNM5efv)b7u z-iv*KzH;7V|7_2{xm273y=d!+Wj%5h=D+2-hn%!sL!XS_N-k~)LtQ8L^)~D%FXP{_ zT>M@-pZ%&edPaWsXI*~!oq_-4pGR-R&tAwo$_(25ur+NPISbe`ah^L_@7LwM4fo5< z95-NZPd^NQROLS57XDOY>TBFv(0f4k%5waocpEszm~s|uu8+Ujce7Wso8igONB$-H zc)ZGaNB<80K);`!b={6O5YMLXgwN-{BEB{I4*WVl`|G{x1*f% zEBU*?JC()zK$MLp&!koODB4(@ zXJI$IFJ!ITJByBy^Q8FB+SC#+!&CnKYh&C`PUh-L*o>Wd%Dm;+8hck`~HTa&KJJ`Kop1ccPe}8)R!*tr;?8hMYc?|6e z_t=hqHXKFkNK1WOfOF3OT2(v;Qnxw(?!n!}Zo^Ap5d3$nIQO3Os2`rE?Ogmk|Xko z{EYu1_BfogVIl0s?+U4d?f7rHzx+*O&X=~dLm=lu&X>1gwJ|Ofzh2(Yaz=<>DPEI) zB>#3fuh3dSYIK=c=3y9pIKREToF6+tYBKxdW;xjdH6VL;1+IrD%iRFaf$_BaAm`&n z_-t+8g}2~AxPqRsb`pE8RIK4X?1k*yZ#(1782i)K@ZY63=~3Ks<}Ll_H;K>WJZgZQ27|Gu<*7i=x}O?K8}iTEn9RwebsPfg_dS+hyy;=Li~ax1lnzTtc&+Xi29cTc1%h}FYUKHEH__ME`HLf;dYvFPF z8?>DL`^wGpBYP>|@f+a5W&2btePw-KVlRi?VFG^;JnEXAtBSLJ+v=kB+K9JfH^LX` zYXEy6b`5!ZvFrMGyzb>NvsDw`(-U2Kz~>K75e71ouM~eMZXVfYjdPn z>O1?gHSH_yQpdT@81^uF)^3$Cq+M-j!#@pQM$5OzWOi40nf-@0A8UJ-oVB%w0T_kt5?jrsX^pQZSDNS!qj%UnzmZ)9GdW>14X#hbx_;zx_+y4g?H z;zhKaTc_iUGv`g_Z>0We@PDi-&f<^E%_R1RkTWi0{1LjacbAtMx|O}qn6md%uM6d- z1~Vteuye2Z-}KGGd6&+Z@@|^7*!q8YIscAgf1;l}gDyr^mVIeZ#{kF+UaL5)P<~N?*CRfANR0_*=d`xeaFuI zhL`I5NIcOs^WXp6nLk3#x-#ELHw}K$t*!9l1-uUKn@tt)wy{SGQlDm?fI^IDn?-G~71=^p4bMMQb1wa3NoEO<6s_hqu zzMb}a;Zw!8#|P4{fKK$^;U?Ntemh*oztK*^`9?SaUupd_=jYNdWIwXjI+TmwN`}yz zLY|!+O2zy5rR*csL_O`_WS=GXPk06{k(=@UAUDsI^i_v;0GvRpmVY7p8OS%{=Vj-o zemWI}dAD_`v5A&f0y}$N3=RfD?zsq?ip1|)$ZzVnj=Ue$0_F?Rm z<}G{nCD;r~^#2&gyX>sx3ig+@2l$`jt!ZDf+v9=wNZOW=vF=i3PqKeykHDK(7w-=1 z@Y}Qj;`7+OTtDMli65lh47>9;b*(kD^t&FMSuTD%EQw8HXT1)>tK`2Yw+_7{e-S;u zaja$!7HbCG#kQtjN6)>dwsP+k^m+IU7|qU^*;s6Lxp?n?9zO?-;9WVnPWE;qXa(QW zy5Udhsq^F5&2ZkK^Zd9Lj^?lBHt?G` zU7UG&wq&n0pe47l^CoLNhM&5c>x@i&E@$T)$X@ujel};%X5S$1L3aLK?j*cKtS+qu z`&c|)&RAOJFZXpP?G4Dg>D@Tz%N^RLmY4I-f?JJa5`AK+_#QqUXWjRvr}iJ$_95|q z8e2DhKk;$=%k{5t-H zHc0#hyplg0w}AKH5N$__&7zNl%^>dx`R-_-zb*Ms=;vXax_w{n#?XtOJ$VM-BCjpfW|=bD#` zZwtftA42xnv&Oi;ocuQQ4NRlG44IE%^trGtjIT5Y^z7NwVGCMo+!5zYuEG8XUQ;qw zSU}HwZ%jJ^_To2}^8`C{aeI96)OF6SoH3`sx%|Un0sAA@JgK^<&ssQZekd*PY+Le& zYPTO$LH5|cpeJOE7ifP7{!(sTd?-$RR>M*9_Z8ob{Svz?eqX;&)0#kS+8wmiNyeS; zxU5O${$qJp;`^W;Tn+n)^(;Aqe&(tkY{_0vP6zf>I2KadtL0_?rdH>&>*7(>#kbld_5#zXq==i{F8FQPAEuY*10UxLf@Ki~|0?!7inJ>JW1$Icjj<2Pm3f%DiK z;2|((Z868AX~&f9cbqlvSSse{eDTa-bGc8@uY(8KIU{SZub1}-yG%!_T{u0j~+!fcSWk1|QpCqT7{CA-NEzhysL*{x(Ne#hFIE8<^{H$00TSU9y z9q7qgij)1QX?&Ttbg`zUpBtl0T@Q(6gb=AXf@fc&?ibAGil*81}IqwNJxvzy@5 zKnwXZ_-C?n&FS=i$zMzV1y1KD{-SZF{vTnlhSBnG6`zIs)6;MEWj&m~p&Q5UBrn%` zSbtAyKl1<4bl>q_&+q@hD=kSSNs6>1gb?MNw?rZ#lu@>jmAzMcskBv6ii*rMNSz~O z%N`$l@4Yv_`{Voa`{QwYT-W2ep3m3w_1y3GIi18dQ6}rA4mgDQ)2>O6d-c-Hx*_(hnpAzsH|_?qh!b2L9(b&rSQ!mOp|& z2v@;7!Y%YV%6yEc^A}ZGlW+vQD=+W!40{gliu2s1_$Kdj8@na@JDl&Q^ZaRW0PPNZ z2Yq99EnE{{fvp53m5`R*D z-eEg94Hl`px!Bf_c3y$Ml$Z56pP#!+_MiE*lkh#X^|X!H4PdeIY1=4BU$=ogb3b@o z?#=wX%WdjAKpoZSS-&}B@1p15_1uJ?5j&VZR^ID!RvgBKCX{D!CmZ(#mlgyytuFbq>eNBYsGG(9fiB` zpTN7}wzOW5*s+zx=k0p*><#s3-|EAg@QM67^o+||?>LiP7t&|X!lCTUr&jFaD~o&U zGVfRo*3o*4J%pbw*N>2Q?k4YO=qR>WUcN8RyMH2R3o43w|9`(U6T4Sz zHT?i{XJ0w%*-fD%eRF;nb{n`%d≦P*b_*JuBx_=FJ1Nj9=ZP+|AlprLy=O|0I7sWZrKkXSm!Kw52V(4Sg$m#yah9$A6PO1OE=0quD#sZyBpz z_$hroKDxYp;8qwZ=N9oJ@Hp{Rkng-JX@A27^3%63$<5l!JZpo$=kHiq%&S?nmHdoh zb!Y_nZ1^>;W=X%}gK6onQGL~mS2X+_yskKc zD6UF-8}AXj+`f$S*<^w;TS67+>6sO@yT#Jq>7T*;ZP+`BPlL_av-NBC{mkJ!zmdG% z`1|0Q%HGbNsN5C&sgSifls1<=334_~tS~-nivG{fTIz}C^J3Qd?ds2*+?9WzynFB{ zdgj&BV$<2n)UyqI1u2_7?P^G0yt}&i-G!0-tevzYd*NR48si=CboFGN{2`XRME253 zeOH}-oAT+;QS4zQ`w}jLM(m~dTKuQ-Ig4AcU(&v;t?T(K*&nE@20Q!yne2d%A8saAv!d8{+R;|A55$Y<8T*~JVKDzi zcINRy@$5nM`FWQuX>Y0b0RDdT4d7u~=GFw9oIk}+<<}P5kbf}yHGC?(1=-`zg|us9 zIoS^v$vFd$EGs_Gz9cpf&W0Ld4~ymJ7r8@EVz0yXm0d2LeP&xZndcAi``}j4LjJ-0 zkF+PTyw8VnYvAVmUEvG%Pmno#PepN;8Hd;LvrqJ@EIv!+tUa2ZpMAcf-u$d%rdZb1 zmUu88s7;^XJLO#=|4{mB$Q*f$_O1By_(@pL{*C^U{LJ}dEZSg*0eIaAlS$vN6WNtJU z`xQouHQ{dq4WKK`QRkhNo+Y;-JM%MZ_)J>*V6D1R=RLGBko__HML2 z6VCl1cZ=W#vF!cz#ri=L$XduAb|ZTl+%GSmxze5uAY*t8JT52Ssr14l;4ita!nf?k z^dE6K&fV%3ctdOu^rCtei~#4~6)Q^$*EpH9D}e0z3J z$p6ijbq1e&#j-@wbuYv4WFY?nD`-@e>r$W|u3uwT;tyJvITl%awPj2Q~8_0X^C{|0} zNd8rxdlT(&ejR%HAnm)_vs3RnI9^_B{h2Y{ot^cWxitv?DEk=1POQ3i^=EGYnSZeg7MLC-!vL;xlsA+_U`saJ$N4KRbiHKK|$1^7n|Z zmv;hx2e^@aHdJAcgGt(w|I={3GU?|oU>~w&U$?f-;RH#Z7800WWSzG zUykcR{=dGK{Pf)({JPMNb`w4tR#w_KU;@llS8dw)aFDsRB|ZQ?tSs)@Sqtx|C-eFM z_GtJU4lUUuXqVzT@Rq*rsQfpy3&d|?zlkgO8UN+t`96JMd9i-RiDkZQj@RS--+}o% zcE7SSCv%3~F7~4`Gx=GcaV5hGJh`m5Nnf;aAtMTuU_uHaW?ECMqXTpj6 z!*R~>p6n~wud&mHs_duPHPw9`e{cRL_%;{@hpRhh$bRhX3$IuDeQumS%v#ujKL{4E zuac8JKfep}PJFnCmfsz!At(KuHPV#*HvLY1ey8a!T$}d0*roVAd>LL2(;<2D)Nwz1 zE#&{TY==i0hw&wAg#C@2k?hRv`7ne3F}{|T`$5L`UG_2bA<7*KnOAo}Yq9?LC~cXD zEBXJxWAGDyclulGsj!)6r(Z_F$LighcD=gNe|i6`iP#}h#gTX&gaaLFWDzT_Qa#))zI#YMVZ*KICr2YhkGb@40{uHHFdV( z=ea4{TmD6~2iRF(IcJ)bitjXzr!7)v_PLYTZ!34dcgX&|Ku#Z=IoO$g0?yeM|IJeI zJ2iQaM#`?^pFz*h8GfN%&0h;Q($dGx@D21Tcn>+3;QL@$shH>OX?Lsp2gv-$`28V& z4Q@=kM(#ZJ6v+7f4d1x$5V&?x`?V;&G9)zjMfmh3m^ZY{g z1oq~Tv#15+b4F{NyJ?R~dmKAse-3^W_JQN+*W%k@RJpTSyf3>ceM4x^ACFt(+AvsM z3;C}@!YKLiG|ACNbL zow?bIpZ3**YuS0X#dr)ftSr9UoxxtO%!&Ny1994#yZUR8F-`xkQ?>^Fmv$!m0QMAk zmYsI(%rSV{j5zJje`Bidj*sJxiFSxYVXLrdnA zIdK+#m{tw1)0dm!!^Iope)I|Y?K%F>a5=vN{s%ITN8+vJ)n(`XCP8!PU0(F_k@N+y zP~PeIGO_8f0sDN~(Re3*?$vGY_uhLJV>6h(zxWAahqCu%FJo7f7vI5Tj7GEbUYU1k z+x5!+Pd!-!GsH4Z6Dy0o>NI?~cRd$(qwj>9$a#p?U)ijm>=)6~*DLU+VjGHW4rxPg z`ZjnvKYg5e*st7O&@*4-cNfb#&u54Hu2bqcQG6P`1iQd|{th_n?ixtHK257+KU^yI z=(;#{=N+#T+qlxW%Ui&{hn+FWxQ~O5^!3oEl){IQ^6PwAypBcjoN*3bJS4TPnT_9m3xa zhVWD0zVLzEoJmdOrN4TpKhJATdl9GX9I-By#hJT`y{lOM?o#^TeD<$m+2{X&3*a02 zVA`Hyd&ql|zmk41djWl2MX{bU4zpoz+SlS|uy2MJ*-hna&d%86-uHlZ-6-#I7(>sU zHt+Yncz^y_sNmnF?5}W)_-@L~p|_#;@a)ug9Q5Qr1;@&3#2;eMISFqpPzNxpt88Hr>-C2V*W&!1hZivTvRIB z)meLgEi2B*{61{@{Q~ViNc?Ab0@9wU^z``=p7SNnXZjUlc}64tRQ2TdySnka%T1rv zVXuelXB^>8cxG59*q zor1TM|F>8#$iAC)=XcTP^7HO{@Rveg_BZ15@g>ThiIcmpyxb?#_TlV|X)}5g_3kSt z-(^n0d%)drTdDZ&F7qmVvlon^9SQl2mpPI#tIf|I`80nD?(dm7Q*-~QSy|i-I?-R{ zZw9r*^1T1CH)a1RwgLML*jCxh$H&-}>;?ExIb-mxuz}bhydC8J^k}($q~&i@=J`wD zN!m`z?2lXFsp3z;hU!b-w}rF#dH<2h-Bni1m78c|;8gxi>Z#4XTh0=w#a@VShc0k* zskp!7XCh6-vR7uzr^CAy_Ah)g?IwI5Kl@KFxoOif>mqyHNaYTKC1Ta_YO#mhad+gm zU^k>?U8T==R5tT1@AwM)6Ff;y`X}doLph_w+d|r&{pWr9R`dsP^5){4h54J5&FPo2 z^S5`Mtb%b@o{Q7ONw_zWBxb+_N(e9>NdNUs7f;L=mkH^t)a~QV(;;{W-q1P!avcPeUo-hnenGT#coc|&#$ZSbGBZ} z-lkNn^Bwtj>aQ6%^=14rUk(t@-)pVMK1VF=e*y;3FPC>Lp1^@`$xQm_!!z3(38Fy?LxQ@u2R<<>dYOdpV$ukli3%@oe78YPlx!Y z$e+btgS$cfQgO!^huevdpxpy+!dm`0^sJHNarUB(+1Xz<5zG3WC2tO--OKP`+RfTh zRbJM8GkiTgZR!A(?0exrcD@^IR5Fh2VL0#9lYI<5W0cR#wP~4OZ}BrXF2SeC`BXn= zj7LKY{)Lcty&h6$9mu?R7Cux@cmC#bdMdXUt}1W5Sf2ePdpV?@*GuM*oZ0Me#e3sN z@X@#_WWL=ZeitokIAfN25A=*axCt%K&pEsRPZiI6xsrYhZ5TcW?xv;8ACR2x&{uxO zqo%y~Wd9Hz5JLjDHcR{WQ>aTdRo_=Dxe`psVS z7<+-(_U!cA!~7lK$cp0g>U{Q0vE}%CTE;Z{@1IbYy$rsg-^AZo-d-@2oq3V@-ckH8 z_U(8}{&2jMpLsYcioTGx9%rtNQYQ23PPote=71LGEIA6r0HJ1!;dDZO9tS+^vq&#s%!5C2Jo)QBmxzb=b4v3(q|kXJ5?UEjU}= z7_*onI{qn5Q%;g{W58}M@yUJw0TH!hQPB8D2 zeRmP0{H9`eN7b2edT9~gt*fEz2UQT!d=2|q9AS16aW3(gpo;4ZP(VRBi~=h@@WDi!aLHSmA) zP#qb|4mkUM_RJTQN&T&9nFn98H^MX3le^h<{CC85WN%nm>@S0H#);QXzMoaw9Br_dUdiuby*y!d^eyhHY& zxp0!ajCV_AG6!bB3G^3nL&(}Vgx?kNeMkKI>idH~9x_i`mlt!elAS#zdu;ZYjMEjc z1Kgo(CwT|Me0h7*&aJf9u-~B7<4<9~%kC<-iP&~fo&PhOMaw%6gbU{C(PYg?9z?-uipJ=qiGwZg;sm&wcTFEr-w${q{9K1*If{zmjJkl)wc6lY)jzrNJAti$DLquigw9>G;0@0M6IWhSa4YiTX5 zw^+`R$#^s6ZY}%ocSpoNr(M9mO?)XkYbu`$7PFrhuZtIXpR?q=&2P-#5?_P&Q2+aQ zx-wtj>@!(2JJa**d}ipZEgf)$Sf|Qjy=I=|^GQ=Ufp#;0A@qSd{C)8xv0BiGeE==( z9LCT6_-*z}<;9t_4}V+w$=ZAf&fO(@Y9oBDoJO?dH-HBr^;|`33u$YWJ1E1^PbNK1+J1s z-x{2s4`!X_z4ughTRA_&82)lNj`j*(h;Jz`&bL9z>;tdUXLwfLv4i*~%H7JoQY`bk zo!FI7lXef(f`0tz_yIYC**{hkb29U4Hv0~-{Eein!R!Oq(#mn#c9hr%_P4a`bD0af z;pLvY4ec9se#g(c&c2^Ld>}jbh^+rQ%D%L-4cW zSsS&*+lz1K886d5q@4=yvQtOynxmjI90+Oei*j?vI!HY2x{kgZJ9GRXb`ADLcsaZv z{xe)!(m%L~oF3x4#TQ@2ZbZMHpS#~&b?2Em%OA4_@{Suo=GA<8S+iL)`5ouI>DdPt z!6+C?{}pFG&xVEk-Al!1qX*balz9l}p17s5Ip?wlc7+!54;9Zi9WPcD(#E{YP_Zs> z5x)!EL+i%Ry)5IMI=jPFwD0hBu$S^j<4Mo}K8Ew*2ABrf3nt+8<@P0L^%`qli6e7cja2s9;ZDo=V0wUmfwh9 z5B?RujDIIS2(MRf1v~36|CZrw`F-d|!6W=p_;K+i<;6bR4Ck(teRDx&@pGkp*;ykE z#ja!T1Bdcg!imrY&JfEuPAwImZ-%I&X+`nu1@s?0cQ5|W+Wa0qTA8$=QtWNH{c%3G z)uQh}&v>1QFIVmt`_>rotfLjmE`ZFRtmPrFrPv!dYdO!|nmq+Nu+txz({I2I;$2}5 zWWVhTXVJTtig!61KTLa+ma%MxtKnPlM^Ih-EpcF0;@N9vc+cMa zA&|XfsJz|0%az(Rkw091`fmfA`Q3qiG5)E-eNNeoL+0>1{4*i@?T@s{v;lZ7>;wZU z%ma0vL|=w;A0ERVM&E^9f_K^DY3H%G#JOu{P5<8>sgBJ5^jB+n$E+^y=vj9o#72r` zu8xK52`BR}V?QjG`SmtCW0bRFBX&=E#;`wo1|F$RFT=g`>=)PKJNbF%;G~M;%uij5 zA!E@GYQlZ${YuV;mBrbS^_R~F?Pxhi_ro>hW&Y>hF&IYBbEY=Me?dFiba`9RUiaK4 z{JZ(1yyq|CQ~5a$vZu}9@5JBJbGKw~3?u1nY1@^G`|?>hV|6xUEoNO-sO$gMyx3`Q zlQOr!EcTW7Nx06K9#WwkWY0J=nz9-5KJXdTlApb0D%>rWdHfswe)y5rU;J-&_Q6*C zj^1Y}K1zG@bF-|6y8o4FAh)Mj)@IhylkAm{_sjbq$}Yz(`0uIbCH@?C#8TR zXVGbJAFQH{!~awicjs3jJf}pP!0sS^xx6cs zKb}^boqjqF?tz=U&kQ*o>4U_#fb{XLw0qb+*dIW1{wZ?i;92yXF+D{p2upr6klfD z{rR^*+LXD_Uis^A#=RMPBPc=R%3>dHL(l%WkDQEe)<^!m-9Tu;AI{%dY(0Dg=PLKL z=j1)6;VZN;=Xq6rf3Y1P^XqiwQuiEX_Qf+GbLDq_)^uN-@p>Kpgqh{mjQXx--@`sw zZua*Lp*OoG3>NzeM#w-%T2#~GKV{A*xK{t-Cy;w*M` zWhcqo7?Pj&+pb*S;*43=UFsVK`FjY*RmxQ+drxzA_Q%n##hcT1 zgK4mupU-0F!?jQ*XJh%J#WJTp6VDvm05``E(X$qQ5X)ZrDt*0jEAU59m%atg+-Xx$ z)VCEs`$I!mMthgP3LnRBj4x6?Yh*WNCa_OpXS{RI+E`ARoDux&QMqru&Cbu&*2~Y` zsU0oIcleJfpU-2v(*Kkmp;kAz4!#Wu;;n^tx`|geFvU^Z>T8vdUl?7 zq`KzQvd&Im4^uX6xs%onccPDEAH&YN?kApe{AIXT>^ZUL@gsNuE#uZ-&i$3{7M`Cu z`6T}yu{YUu;eT@SnfDv~m)NOte`crOa)#|HR#SdmSSa?3oX;R*HMOGHZw_YX-;lPH z)1Q{UpN$`+4}^8}5z6L!lzeB@kN=K%)_BId8$Om+5C2kL>~Hhs))LFS$vXZ-{ka#6 zhcD#q3f+}y>sgr}?dh4L*>CR^%f4}^a<$=5_N($L;4JY9+TXCUqF9eTaK`LU7%ILH zk199+*lY3cFrB@K{u<=p0)@}m5v{Jr?2;X~SFcnD5Y_HD72P#ZQEpF!V; z-Wv|2ok+{ySIinJherHE=vgZl^3%3icp%QYm;`sgM1I!lQoIHKM&(X{2iUvgYLI_J zb1c7SW%2o^3wvHgamQ|pZ{i<}_k-)pin~que`Pz+x{3Ey_Bydz_SqxFdctkAjPS5!tt+H&n;LVqdZ!$9uy9_R;iwMoV9|^W5z5 zbLofBb1r0EZp6;{_nlbA{S7>nKNB9(&diUj@2ZeFn6j(!0(oiwPLMOCH@^-(x~w>V zX3*3B+tX6VHZTkZRp?*2&2iRI=G)$t`jYk*>_TtLA5YKu-2~_P^`HjSg8bfazslka zJQUBRrEQCpZ^oa^&K~}kXFm-sX&=y5!Vo!C`P;GA%1Pf{53T7#XiM<`XhNF;B{_Fl z_g~URt1J6O&Y6|)68l&=m2xhDt(DK7kv8leAKw7!uNL?>Wj2RAzk+rtTqbWkbYO2v zpUIwt4~Mj?vHTlpnX@Od(}#ya`ZW8?>G&e3CVm}6FRLu}ygkKVr{(uc_Mz9HSEC*5 zJsR`Vr`2hfKVG|ygV9}ah^=hTYgjLo{tnj9zRA-oU%xZF6&>8kum z{HyVnxGwxFHi3T`K2iQ2cx&%aF187KHe_!-lYc3Uf$bpoi6-*qm%KOoYkBQGBkT1d zdiKA)VHCdyz6M&t@%*z&#rKo@^N(PUg~5>L52-Bf%h|8y(VFsKTvNPf+P+?ybLbh@ z+}B>fE9CAcHUTeziS(BE)AC{u|5g5@>UfB@y}WY#5A>(6fjwXZKYvH=P5iT*)bjvj z?w^k{KU=D|zTB+8&ipmdMQmyM3$KxT9WCRWbEl7Iwqob~vk&ctTjPV&lQnZ4JN=pS zHGP-;=?=&q`6^^xm&)}sRFiib-W|WB-2d25DRVIYmWtvt?)Ugpv75wt!CsK}8Z0OC z`)Yn$yjHv3V?Pc})zJ{|t6avYGrmC1X?QIEQ}32NW-@fIP@nuQ*pETxQPz6q^9k&c z_y$}LABnTaZBl9fVrO5?Ik*ShCVnyeR<5n|v=YzW_k^5!mBr_+i*e>#N2smLr(!90J6-^j=-JP+ z2R*Q-xu7j=$~{N!WO!Ll_OK@Mj*!y@50#Vt%HH^c_z(PnxGPMf1^HRX_58AOcLVjN z-%o+j%4N^YTFIJEKhBhYD?5F0Dtyb%I80|Z#=F2!I6$56R%(;_POi{rYl^#EXWGf) z+ry^xGR%r82*Ab z6Lw@jXHWV_I}YHdUo*%5fq9TQxvhF{t+0;doFRT5ZJ4qBgtj;TUGa?Be)MB#2RTEA zr5@UQ{Hg5GIPXyxvj0z!dnCL_e-mdMvJU1|7U%a4cz@^yS;LvPyTcQZahyi`$#Y** zCUYzA{Hs{L>!{05y}5t1liL&eijBv)*WC+gXE)j_@Fe{fe%>W}(Qv$oz6@&eGro)2 z7vTl!9OK=8P{&F9%*DaV&&O}!;kbwLRpD6nSb3S>hrtS%P5ZRmn!@AdYzlQ@oO+MJ z!|1JG1Z-3)_N$BOnX?zmIgP$=d2yEa!`T1!nZLF03h|@hYW76DKwdBWy}Z3> zd&$c?<$V{jyNg{T=V119d{%LTRcwO=AdCTah@iR};ww$B=A@8$2J$JI~{hOET2b{US4zjoHCZ`R)n_nsw zb*G&>(8tr-({fh!=dZ*W%YL-_IOB0NY|g%sc98tm>_PY$&&=NeSZ(aGFCGuiLPN;h zzZAb9??!cg3E8je^3%3@>~ee=>x6>dTtS*nbVb!!e~| zFL+Ws{g(bZmz};j7v6*&`SZj&u(vK53%of$f6pv?-4>AZ=5u*_XU z7GH#OHk?ntjh!|2`kLZ9w^4qtwlV!~@$^wG`2)nZr=>r0SDuA`fv4G5%E`JPiEGgE zdEpGa8RT6)5j#m8H{iU>1?+v{NB;G49)%s1$)26Q%=qR1SeQm{tlvJvpNhW*^(%_c zOFi%da-XC%6@MCr!Xf;b>N$^nhqC#dhF#eQz_av2;BFXDS*6N5Vwrn+$2TkNnRpxJ zXRP-8lKl>T^6%Pyk$0GQ=Jws<7t+VEv(~ddUQ~D9e`EINIOCFfcVu6yuG~8|V^=G8 z*Tj8F#a@uQCY7lhUk+c1uMw|?dHv{v#4|Sg@jLSm7kdx}z;$8`aMnvDdlaNU2H>6KzU++NUhGTSGqim6KUw|x ztouH{BRzMTmFyL8H#AT#dwR4+Vm0Mw{POe9BfS3-oIAnQID5(Ic(&Z^<(VVHU`$2v zebT0Q6S@20$&kB4o|Aq)4jzMy^{?v4zF1H0>7H?>oXnx@`JEy44qoH?4zaAU2HtBk zxpiS2e|!ZSo?>^z55p&raUG}LjPvsH;w-8`%Np%VZ&_K~hxX&Qfz#!T&_^HPoS!G) z+vR6(NgKwo^E+q1v$xWoi}`=DGd}67fBCQTbAMRJ&%RKV{Ta@geF7W>Y1g&#c7lAT z*GKuev_@jv@tc&2eg12DPdRBF zP23%C0PV#_;W6H&hIVHR>h4*5&dZ#t2T#&>f|bsXn(}v-pY!$`oc(=A_7LTV$nDKe zpXcn(-Wu&C+GQ|W9qH59DVuqdaqfXPEft@?&&Jo#deJ(=LT$-;pLt_4w=IR(96K;p|oI+rvuhZAYZ${{;aREwQMem&-Y>xqLJxJ$ zkbiB3b;!?nrQc8GKM#+>V){hd{*d*(4dfiErX8O`=F$1`&Vl#T^)&5bIr%@ zZv@^A-jv%+`OLFR`Bm_zo^w8JFLoUmW*_Z@KcIaIIfJ+2&%_HWiuIK5Dl$&VZ$aM} z_7JND8ISA>sXO!h2Yz+!JzJRt^xak$>vw*6u`YiS&v%a5|39PcsLUyHGcH*#*{ANM zb;p%*#^O!ICgaQSLS-{{Ie)4v*ZjYDU-}#L>>1nP-NXh%_PHZz8OJJe(y!yaOU85? zSSIHWoN@nGTQe8#qGiw7i`ELZRX%6Kt77}|PhpQ{_mT4+-i5yvJ7Y2w50H~RYCRmP zuD5aKbbjw4)?3Uth~srA)d1#ZCWPpVzHE8iob>Q*V+2#7@YU~fHo4h zgzMok{xjwF6?|ufW9eVuo5kLw zJj!74 zYd+qcmNvY|&fYVUzp?!FI6s4an7+8O=+7GL%=w?>_l4zh^1Q71nabwfMzMFpY0o&Z z;rJ}v53=uV%)gPnE6#X4Ny~cOlfMVN&dz-O8Gp^cjlF_h8$Y03htS5uE&MY2zMh+V z%+~sFYDH1kK=I7~5qO}S<#>!(=4tw@H?1Ci3a?gngxC)J>~G7J8%BSYy$a`h&Y#8W z^8e+(4SDYd^14F)eerQ(`P|hCXT5zcemHxwva99nqfAxkYd*{q>n?U6Yz-5{w-QU6 z?vS7T?p%8MG=IOR4*e@y`YPk_weox8pccFcjbQ`&W6CUmqu8xz*OrRU2BYyhxSw8^ z-qjdpU1gp%felOA6(4_rGq*Q~tl4MP_ar;`%OV_O-cd?AO{|#%>@loeAWruS6F!Z41Ov_m2d5Jfbdpke<^BX))JF~pFGw00B`tKwD zj9AXVqiJ9AtI6vrmhX=Kvdx7#D?OF=r2Pv$au9=R|9tLHaoGizw8R> z*G0_&34=T5O_KdL{zV$Y_Xh_{uKGv`2c?}am$bJy9O z{;9H=-|x_tv2UT@&E6f}kej{ddG^2XS>kv&?IO>}yw2RocQ0Ar$HD9T#r&K@U*rCA zex>iN>?UFx(9`dk|4sR+^B4Rs^p?97JkCz;PvynutjwVi>N*rpq>YEx@;-F8*p3$e zC-Es2>RVmhQx~wu(f_~$@JsN!oEh@dhV;SbaGrWb;`g;<1#T&p^_zJ)4&L;h>9^*z zoL{-a-mjkYPtK4^T!tsZ8S*~DOKBO4%!hpM(?dO}Z%iz#<`)KA?#+#Ok;0}FXw0e?!*30?h`Q5JLF8f zM=X8R8!}Iu%UQ(!QQ7Q?FW`*DX6*Fq8uq91_Q1ETDekp9vnSI!;zw}K`(MP)@r>*@ znZIe%$Kp@n_WbH_8h;bWe5vm_S$`LltXKRht%sbPr%Tl}h@anMd=Xdv_uTI62b626 zOy)-y$eO+uua}d(AY=Twa`~P=>mzGwD`ozsuVJ5ptKn@RcgGTuW&x;+(-ifviJL7b-I@XG1Ke}Bk{reg{b9@Lo$dGv1d*7Oaa2D=~q3CLN#3T}tK^0Ma)tt`%) z)cdx6X~2)x55AyH!WHGkeRN={xc^V%KThw(KOOdF9|42ebMVLF$Ea&L&RV>)qWIo_ z82f5iL|+C|9b)cdQIASFdKdpYpU$i?98Wia54K3 zbskjdE^Tkjd><%p22S7pT%k?k8Jo=8W1v2Jf%ty-L^+u^2jaX}WBNOC@_k$%ydmu# zoOON>`!u`=?xQWkRcXy&W5}MLxp%Sp&c^v0GgIYdU8K!f>)AV>q&2`fBOmjeM*K#5 z>M#C9@@|9{>^6|Kc@iEeKj->T*dso4X17u<_koYqb1Lmgs4I5`?9M(|&V`V5)=gf< zEaTNiyc4}P`!YOSJaZuZ@Co||sKVZe_9eRsZUsx(zm^#pr7atAP<)qD-gVn_|pV#7-A?qP?{TB9C%B*JJ$sSozoPmF`v)AYDei>~e z{$N-pFZFJ#A2J5nho8fvD}6S@8TV^ou(sVR)`6YRjA{4!3hULgvS!x9ne1=mTmq~4 zo0b*(%A4|XSIp~LIhs>45IBOvDY#h5GbYzdfnFklDr&O+t{eJ>p2mR?Q#qal=YjN&o z*V40pcjl+hTd5=KeP_6e_9#9cCW&P~IJs1O|JVd)EdQ3%+OzIsAAqOg&BeB&*MbxH zCn)zZUJ5tTGGC5o55Vd7GvHrkvzFH4E^ z#Gk`0pgz4n%z-QA{s9*#HRd_77?$G?VTpIj zKC?A#Pn`9TasD1|gJ&RTW1js4JO}IGDdStMy!d?41(%e|{OFS);&wQW_6Wbs^EbgY)Nze>dxL#C zdpkU(y!gD)mfZxh$EMGg%e!0o+_5uWIRl#VZ-%_nE1vZ>{+WK3x-MdG!2SyFE2k@- zM;`|1qX!}1RrROmy#ExhrZs2RVdp-%FaJt-f|hl8yn5QepW?IV-DnxRG2(gFY)Jb~ zr41MB#IMJ%$9`+IKBceX*HT9#T3zw^w8`u*aOTnGuwSVdldR*9|LdRrax>1kd+j9W zdj3MlUihdwpI`^Su)ETRvd6MNi(XOev1gRoBj8~AWcr0rDVBc|@CbW*$Q?HO$|>wS z*(2Gb?*JX-)D%BS zY&?A|Y{4E#Pnnf)6D{ksH9n00fmr6^WBfe-1^%Dxyl(|N>**Ls|2)Q@Cg)?E`W}+^ zmoq)5$Jtll%$t1gbSPdxySU66FO z^D4i)zKY$NojranegW@@hg)0u95h!>xjL%D4Pvjr67lp~_UfLH`qDSK|F@R+5L980 zr?rIiMb4tk@$NY1&~fZ(u&><9%8PxzK7EMzrTnaum&ALpzlNOk&+}jMS*$PrAIP4) z6|I(MrSGzTbk~oE(6bLr6}tiF&e9s5=6?(IO7<>gcEc~07vJ;WC!TdVh`#~eLf$g? z3QmK+wd-{}OMg6HUfi!9rF}zt6}DA=6MVSXuXrRb$LFhSZ}^S<1}*zWb=aP^KpQrs z=U$(2|C>FX{VvYfE`t{IFKF3w7SWfoGnb!-JH>XPrLMEJ&IROr-&U+ zs{nVEnfc#!U_O-Mz^L2Z61IXTZ6J+1}PHwAG@tw!N?2XtL z;a$Y9$60T=bLLLGN4Yr+>5G=oQ@jE;6dOUG0PX2-(uPzP=iZ#yo;{sE5IV5){%^wn z#Ihe>2Q%0s%FCpa*Qvbv3m)R>I^^N7{Y^lW`uD>@`_ujAzy$UU#p&GMW z(GHN`oBbv22!7VmN;$9M%&P%nZ}SI~%vrfL@p@X;boSf4?<9Shwq@T=n;JnE_2r$m z6Hm^qw9K2Y=q>Q~{3qcKb);>7(68lZADoQy?^o`_PnSCzO2!)J``P*QH|1uJ_=}c) z-G`s^BxA5#PTn_j=EeWw+3)k2>Mz_)U3bu@@L%J1=Fi~2mU8$^dfwqY_F%Y7{8n5> ztBO_y`y;o_EhYY0J-?%bd@ic?dgm zvN}6+DEF(s@dstacP*7-HRR-bl|$KAuwM{Am7Tn;`4_<7;`t0amYy}Uf}iS zR&Gaj6Z#hH^|WWPUuVC+m+wf=LE#y2x>w)*BosDyDB$oc% z2(q@f(3U#*Ay`M7jB{>fPF@Zf-x5FjT7C7r!T*Q9M7hSa7ua8F+f;f#$T$tA-6odt zYsr2WuA`p{d&5Wc+_}DC?*};#$E`8X7DR4Vq3od1uD-$q}eO!k^Gu{^gw|5rGQ zeL?0myBAz3|5Dl!?2jt!Nq8il08fdv=l6k}N8KQGZHGUFqt!nTXMbIa|7UHU!=6Il zyyTps{S4`Y-7AXkC3@5A${C^V@9638X}F^@UqM^34dv`i-;F+ipM7!&|3>~}+CIR& z=RUcY(DI&>@VSsWD*5NbO_2N2={S4$e4M`_eP`K!?V!EGo<>_F?|Si0v|nhG@j}Ra zXybXE#5aRI-9?(vmP2wo;z{(>ll;RW^Q|*pPkROy@pFeB${q!?;VSsXbF0Zaa#eAM zp5{5X%S}DK#5*RBb`(E%&%ThgmG5~nFV2xWU3<2oy~xksu6ac4ALuNWJ6OstwRUgB zeQ9~_Javxb-vIfWqO<6Y;SS|K5kHZg`Y+{g!oHndqr&Gk_y%gzUZZt@>|N9OPvN%s z7QCP59g0^ge=aS5|MpYQ8Oh#HnK6+0G(tRUE^W&`lQ#c_(`VV+kHgtl7U`#p_$RVg z(GJ6zb5F}{hRd++Iv&*TpiU&YRPUJEPO|IqU@ zfXn&cu^*IQgUJIz;=kBqr!rB%eBfl;`b>)oCy1JE?vCH37oQONgJr}C>+f68qSPpPLmJ>Q?C zT_g3~1o7eU4rI+wgU7U^Li`rm7`TRio3h7?Wjqh2-Oqjx4-kUDZf&t2*h8Y1if8YPm+_ya-y)|m zKmC6tyu|Jgi^aCV=g~Sq&YqmPU(-)zzl~SolOX+Ymz*=irdPG5UT)s|clJy2K8B3J z<@{^dL)Z`E%&AkL9sPaUOm;_lCHsH0m+|iSPDmd;;aN{r=pTB`Y-%j2b@#^f|=*6ie1!A)Y(6_@Z|X?9 zn|hbr#gAkkPUEBnb+Yl_chdy8!*{|0*2=S}!lel>R9DeHPRtflRSe-wWRu3#_2 z<0^{&$o$Hh-@8=YTmH0HbrV0FpU>oJ&-I?US{uH$M>H18ocde&K4QDdX^7YGAH=Ki zxAL3F`4VpD*TEUjO7;z6X-7@DX~#1#iM}UPRo4>S2Ufx3<;7W%0gRTvGk+!hNp|}Adt6oS;p{H-L-~iXYvP=x)5JC@84qX*3+X9yXPL3Z z`K+0>bcA=@u(J4WY_a~%Jh%(ims>}W@6#^E*@rsIPy4eEwp3;(_Sfc2+O(CN%){kk zHSoi5p7;lN6fI}s=E}T|)6dB{fIou0CBDyder2bxw}Tq;?vc~BRIJ4__1BNIv*heR z&wlzUJD-O#mKn$FdAHLag&FW199vP$jSiK?{qhWTY(~#n*G>LW>=W^p(3icj{DIJy z{}z8NJLl^T$;D}3o_{Fboi>7f4x}xA=b2k{##*G4@ZU<-DCvDWxrv0v5{ zeRs9o6QL9T9?T!r2g{|VjLThg;{9L}EzXVb>v>@%}GZ?1UG#Y4Pv=2Y&H&G2RB)D!d`{EYPq z&sl;q|IScf?#3sx_ogqPuY#MPhS&t!^yIShoV;5V_R@;t9@rG;Zq`aH^I`};b7gD( zC+z7kJ3i#S>*2IH`%rsmtghj>l2(ho8#})XQsO_xo&u-Cwfq&ZMV^JTcjhd7-Mftx zpI%nvJvG(H^`$bFZeGx&5l*?V4;pZ3*(>)E%<$-3X3eJbr? zb>7F$7&fH;&fb~+A)L)lTOMX-AN^IGnLFFk|Dp9}_h3H&E%_}=`UrM}1@@%|;xolE zH+Q3Dyhp={^j&f8EYrkZgY;$gy?Wj|d&wGF=6%YK&bug+I zvL7tRSrdoS>nPKzqS({FQs;sEYxz6j|H-L}vv+lZhV(C#J+!2~IQRFba6Ne&;+J4d zMX}e+vS(&3<^J+I{Z;4yr@&LnwW%oXik+2h3^&lzt{L+4??lhUo5J1d$!C=GLF!r} z=R$kX^Y;p{DCv_o(`+OF)3SLRmc>muBcmUgGT)5_dA*gHe|VEIRmqQUMhYA9wI06x+i`cAC1?_IgEV@-dfI6 zIC=BndUkaf2M>7vb8+77lk(y_$Yt7h9=o48a~sYYNINz!74MLFK15CnoW6QT+me^} zZN6sJ*VJ#xEwZS$3GFW_C`SNjpySo%5SQk;p|ASS)59li=ztfa)u1&9o2hv~F-u7adhuN?5zNhgs$4`^@13z>B z70+GF4_4E%4@`oymB~EF^EdMUr5(pVjK2zRugq+`F+Xj4(DQesl|lBxp|mFWK6k-# zc74z3jsI2VUO0=LH9a1WRAwS%4)vyG{6E9#_Yc*ZpU>~be!9H4C)~--XSmKd^I#qB zFR#6L`e-UNgL`4E_}kj>_8Rkry;DW;%#6W$cIMCr^who7bJC~zP9$@&LCV0({M-rB zXMOSKcsc%?ekHpe+^lTw_H*QCk2)FOBYp<^ZG54etc_i8=E*U%&DqS{_tT1-+w#9#_v$@!>&>ixw%jn;W z=iTpuYt+%CWRFxnc{#sw7G)np7b!Gmd{5e!$CJRZV-iaiKto$QU5!i#Vn|6)j9*NWo%^1t}sv2&KT!_#oi z;p(1q5IgUcF*rf&H0?{@X05z|H^I&2WbRHD%R0LlM(tVbgPDsPLlgDYmA8}F?fmpn z)_xz_adN-p_u-Gj8UNSlKhU@H>|^O2@f7-P?5<*q*j3bd4?lBi0zYe_DSI6*iJc6? z%gi(F+8U>ipAf%X|Nq0@L_K5WWUpUHU;kg3-RP(A|CG~PxrcG~m6f!w_}$o};d=Ol z-$DJELwWuw>c~7h5*2RWI)H$FlA`HZ_t*?F{H?9{aka`)Z7WL%*;w4yZ? zUyfJmj}2*=6Zt%!_ume00q0c|cejVc>dQ%c(zjXPnWOhY?oh3jX)T`5kf(@c9OlAk zcE&AjP5HcM6*)K27r-iIzs7g!yYtnN{V{cAFU%Q`Jt%jL_VTVMFFyBtfV-Rb!*I@w ztnrs&3A<;hxOe6=Z{~IxB$jnH9kSWN_NZoZ zHls~|X;6ih@wx_2@SJ_|o$7s)_B76ZI~BHL_rzaR6y=Agr!&-2#|-_K@yfir)A*h% z?;CZUj2F{}i9N?2rjCqhA6j*JFTsG4zN0n3i66nvoUVbt!PDs*LwkOO=lnsNQ(p8- z3;H;I?qB!Neq|rvxv4Yb{UPN3avm*tAMvwKOo954`FkR?5wC*(p=|^AvFFpS#eeb# zsqYQXK1^K?(H?~K`o z3lZ4Q1_cAVy92@QI2PTdG>D*t2q+*J#Iq5*ySqD&9*=R<-)DSp-|Kq+7}uC{%(>RR z?$yt;_lBkH^y%6BUa%p*afLljEc2y0?O)o|L(cDOioNY|ysNsJ(R#`m3@h0$;|n2k zA@_-mDvQsbmz0X{B{R-HiapCe4*r6ko_9Cx4SXZ+Ehl?y1F^Tt?s2dOJL@iemFL}o z564d`(*dX7QrEZem$vnl$+Gts%NlFJ zo(i|Z_>wy_Z4x_kYYKnoX#ADhybC*b;J!HXC1XDvhOp1*Ud)4G<>F4XfxL6o@iMeN~hTRFji!&GYQ7&zti#y7>8E0R+1}3t{inYa;|4-wx09;Qu&d+F)ZoYAfD^|Tpy2CgG^B~IOWhn&F;)H{s77Qav1k+uOl z`RVuRcmnL}oiZOk5v#yw!iIADX+!q^^uyOIWQZtFWy})*2@(3 zX>zW0o@T$Qq3-OFqu`LW#qarzVP9S@_Leqsvw!AYccACp`{9rHTd?!pM&9cwWryK) z&{n$d+|4r|k6{m>r{8i;En#Ob>Lr%XZ7-_h8G63^&z^U$c-CcCcKUAs zK8pW?+*M*5(K5czetJV2~;1;F#8z<(K9vX6ml*pnf@i+@ka^X0AZevR=mdd48@ z{dG8m-I!Jv&V}4lbJu*5eOk#qp|bcav?2QldJFjrXt`VFd3W0%4idW_%5V}bXVuT; z;$EI{97A6UgXq)L@wax>ruWdMZdE=n(Ej2-FJ~V9y~5bwosv(>{?!N4AGK&D_Ot59 z{GBZ~=UHp^X8+_3VSijM)@{n?-)>o2WzMn>rZ0h>{Oy(94&R6CDsvCL_1dDI4{_#Q z&X~;g;kY}bU-Erb_VKz^#rF`O@Neh82Cu^5kTdUKxXiPw#Aj4^e|k0jwH>>?y3QyU z`%Cu9?9D47b1?n2k=RN+O76!nfc*olKfVvvK=y(6)R){I@H6dd<$Kc?vq#X<#?O`S zhf_ZLS$EtEXC0@XTeGuAW{#ZtiPXJ<^(|4ltNbq!=c4a?Lus$6{b zYa^#8|0e!0Nd9}!3^r3H-?QX7iBHqE3GB@6FYs!Z0ms4v>ex)~vhqLoGQ5@ioZ0_l zr;jJe`-q)6eLg%R{xD=;%38Y#vd*&~9D&nkX~VDVvG4|DzU~9V*+Y~~AJ6pM2g~LS zOn?{H7?-uh?_Ok$Enx47b7o9u_l1e_PUlZ774Oiu!nwwuNzcB}o1S@>GW*b4;hphX zdUN&frH;SZzrzG&uN7aw-VEo=ZdG_K;yinH?pIm5*V;jQItDb^pZhpy@vFYg)uX4G@|JUNr` zXV4H{R&EE+n=h}G`H}W*1G`q5Z?Fe@A33>~zpw1cFhseGe;Z|9;I9|UT|8r-F-|*L zLgvaCde(LNblXz#S@>@LRd6dzrPqh_%_rK?2WQ?rix0!Urd{mJu{Krun|+P^$@JWD zE~H(_PF?-vEXN1n%
<2#)stZQ+v=)lh1B>UM@V$Z^Qb!V^0Jw52j{z6@c(jSA*+547_U8T8)vyTji z>hLJ!>^)bR?cfEd23f~hOO49KJv(dgMQB`E>>*DWo3`pbk9N88Y4errth4#>KhNz* z-wyVMrnIzepfcI34uYHme<^dO+@skiLH4ktX(v||pBLJ&htR&q>6=UFQ*iF{Eg=0l zm|t7YP3q2?*#S$g=jzWr zan7zSs_cpQEjR$atZ)X&>y3Nj_my7+^Z1#I>ATF++->tc=Z*Zdbrx*MudFJ@=f8gT zQBQ0ky#;PAe`ogY?4L`;oxcY=WBeLF{q&r8&Y$!{BW=tYK8Bw?Wrmz<rQ#f3M!QbAj8zYuey@fvm-C*syr^6wvFruc(kH-3_PhE&pWV~P zfAU+(&Doou0nDS#S?x@r*JWp|WX^RLKM?MuH=zH88`IxaPsVr_E$zJ*CeicUg<^Tn zeQ@^Q>)F{`au+-tFQJ{qog!-~(vqs%H~?in_8+$H8|n z-+t5x??4}@{KM?&?9O;UWjE{o%if%QJ+3WxcXr-w5I=qNzUOYlPTmFd zZaC|#CVQ0ldUkTQQ1_ec?9mVLyNJKd-x}6H-X~*Rrstf_Jefq#yvrG#y)tL?0Q%AF zvCtK&;0m}zUI($~@Eve9+zaW)zIX(zf^~2()PxpgfiLs3-!@|(3R}TF{M-{i78}6M+)F?F*L&?xPurS89kGwg#l7ZL z{u}J9t%vX(V%N~R;tE>!?X0Qe#NNXfLC(6tIBWG&SODLxEzZ`_ICJe)$aiR+)seMz zSRn6bdd~6Iute^5(cpE@IT>d^cvnu=Q!94*qzS%2?m$}lHhWO|d!aI|JY&Ate0eph zihU^i(rvVxaW}b>#ImOk;onDJ#QzMk#}B9FGvpxt0sQRSjjFVfb}?>9JFlYn+4}Xg ztc|>TO|if6ws532>?|*zZyw=iE}pN?>&eNxUaNd9d>njOF3y_s)zyZ*Dde;63%D*^ zS6Q4NkFhskA1L+-K2_PDO2zk_FX6|dBbroEe3yDVKG<5_0$7hriT)Cij95X&b;|*tlGL z-?KG+41aI9iJi}0b(G1tm1&u~|E8_xccRb4)u9UZg8SjclKH32=NYRzJnKpJP4EDJ z1HZrUjx(zzdn|1jZVUUu1N?k1xGlWK9)cIZ4(zv-8OwfxJ%Qa+z0a}lf&3dssXPA< zz)*Tm_Pd^w@64j*=gB*=d+$-S>jOCtsbeZ_f!O)?`M!YmGOZbIpzMD1+TxShTd-@g zUxiQQ48l40?qpBcvv^J|{;F=pUGX>iN`C%6u8g;YBgE1tC$gu(#q2t;MDC0DdDKIgEsyCv81zm#*fS*vtHF;BEeL{{46g|5o-)xQ2a${N1}4C;nW1XXsBm zgP*>c%|8EvX`>YhF&m^mT|~D&O2|7o9u2~um`fAhVCY{EP|5fU4ql62f&-F3r?RbW^aXeg!!-qdKix1n!c7x_( zr`kJr6VIJ7chc;|!y&&9dW~52{)1s#+T;8yan9$zX@BAy_$B-1GdSn@iY`D$nAo6#TSd^-j}}#oWmbTU#yNjXt@vNj+p(rA*9ZckUFFh-xA6D0 z+h~2{zsb&dmAlE|cq~2lozd(E#d5CnW1oQ!rhm-79e)IOz^2-CH(psbXV`bKN3%D@ zU6mQj-k7}}Hc>WX(gKFUmHc(|Yw zsog(0-NaI65saX>{bI+82tDL>~HKF?78%mTP{EMy`1;?KBYfBb6~mL z%*R@C8}p;@k89C;(Ds35a1cFX`5-;d%)7M5yFl)vx2Yq2lKV%>r@ar!J-Vv+4s17^ zdt+<3H}i+TUd|N!voaa`zxhx9Q?5F$D)HrZ63;yRke2x{L+(=LuIFC|>Hn$xC6Kn| zxtV8A;eBCFMN#HPT0X=+d9=}6d0J;CXNm~ddvNYWZ^+3VD0{;yobM67Q%Anf%6pB#SHoMS zVx6wWyVGYu=G%At-PjMX>$1;MzOVf2^wTJuxipZTHFl!BpV-yd$*(4M72L=Egx*H{ zTf7IY30$hKe2>10ma#lqUcS@Y-uqq7&VG}Lc(zaRfO{1_~S8|3XLHW@~Uy#?>n$9Uf#*tt7D z!yhDXe|FyQR`s2TpTt=sAB!)7b7%+K%dcg>45x_gT`tD)!V2dMq_5|RJpj)tKLc`) z-2*pP{z90_&%ScEvKgGmzfe7ULQ8gA+MCc>xoDfS2jfln!|pl z`+aCy%#Z&3N8uZ7%K4Bn$UN9vZg0E_Pj+7BJL)(1 z&*1tvd%-)U;&X1+#SZ*U#HLr-KlnLUTHu56=`dfcqSBp1nf{Q^5o!Ab_8$0c=khJ$ zN3yd%&%_rfmwrp`wribh{BCds|2*6UmlI>>jL!SimUjmn&rX}C;K#(yXaA=pUg^Bx zS9UN- zV<+!S`Yz^a#^jZfeMamYITy2YXFQ7CNNiWw6EX%nR@(33V>#<#Zn?M@WbSkoUtD2t zq|FqoA@6Z~q$rcKYo0Q!G%#whdY1-bis z-iWesVE-3y=h<`cb?QvJxA@;ybDN_z-PN1#J^E@`YAu78pMABr=Faf9q`NQ9`X+!!1d*Pg0t6ej*nMo z_M6V^?6YIU=ZI~?&mJ|3b_V+x&)I_CR{jyRsrn`N$7m}&>r**3+1dN@PCane#=-Ek zvgw~UA$@YR*i^hdE-5#V{U5RU&WN6P2>%8=8CQdIBJ>^Z>z3SbHmS-=5 z)cGFmDmf>!52P*T|A9Zpui(M-7faSX`*yqnxAt%CcT#3LzdiqGIgj#Nu+#55!cgeh zt@!+~lKp|)o|W#iFhO3{aufFS3Ui&GKFM0$6rSNPmHz;HY*lfNJjBmA*opQbKAWC% zp|Ns(VIOvNT0QYm;v4eY@-JYoq)p?Wv$iV=ZxH*QQgLp!l2hgn;^#d4w|YJ? zk3Qq~5xW(qudA1fy|z0o>mm1s9=JB{23_Q2{$^h1E;$tcMjIv86V8J4P4=$LhZnTt zN%>j-*VD6SWeh4{34Jtw9QzQ+ychzl&96TE>)>YP_QM&^to5em)Ji!o(H8T^dxxy? z`7oY8*`7BGXFX4mmp;mQ&;_@kWuH2Ly(?agTjOuVb2bi@pMK4mlR5pRXKz8txdgk|e`1y+B9{UUZCh_y-*6iWD!aLGlgN@}i;NK^Hm;Cg>F0eObK3|Hj zsPuf#d<&llSyRj9R>PTt|7E`{ce!`oRbDgC$p4j7UF=@S-jsJ9!Jb^@e8VG@sUhb9 zJcj>^as%)=kai5ND$d8VlxqVy%iopL2_Gu9rMx!yT(Rr{2Ycoh{H@trl-7<=h5k$X?SpKCUVD5G<-J&aJ~~Pl^4;@6SFPrfJ7>a_+}J@>>|+ zVfZLHJIZb1nHze)?&8#|Q5 zy9*YvuZGRxpt8LcH^XE1EY7%`rL8Kp17A+xnVoyXa(32R)>Biwj=z@u1+F6}pDpIn zAC`OfTEFi?o5(*$&PaB1d3(Uc>~XaFU_-f?ucK)l63AAxV-7kr`sxv?=}XDXTJ4<%*(r~%mHnEOzO!hTU7Y^gM zsIs5oJKzgincoE8jBhBZSKsc&{}}R|Vf5Ot3k>4_)x9{g4`KI`KSJ9wuLgD?PvTt*D&iv}a{+yO)j9goMf6-IjpTm)26X7*|(GVX;%UGrFM&ZXi>vdY* ze>=#y943}MXDYu1`%K#X?3}9;>2G^R=41Su-m0?rzU>WMLp<%y`&M&CRItC1 zH<|wg&bd{eowYHI{VZ)De+%{(cqje}v4j4JAC2paWlbE6)7JCxOnLb^!UgnuqQMs?s@4)#yGXgiJ|KIwnaxbH0k31GKmrfJE7_z4C#MjDsk9~<) z*1#3CKiFBjH^MCbV4V5LUI-Yn|czvHX=d>*JQH z;(nL8Ux%G?FR-5%?;-C9{wDNi@nYJQxTSONLpf>R5!)8e^YJC@Ak};mvPHkx{jWo5&tOXPImUH63pZG z70=rE+gv*ukD-dl(F)50x_o-y>ch zKP7%N?nl3kb_DxQcJ`;}Pr;#34eqTh#w=qx02aUow7<*6XZmk&=JLsM4^`KA@$5Z4 zA?rW$v?Hx19*t*L+7swG)8@f&_SsM!(!Z@}8?f)f|52tsZDamP{5sagc=pAfbB0(a zu?2V+v18cR;u~=0;1YR__&e~&8IzmXsb?o>Ek2Z=^DlSefAJ^rA5-rT+Ee=fCj0?p z4c1LuUKcr8pA+dBk0VRPI2_FHC6+PUjrI_~I(sMe9mxI!vhOxjW+e2JGsH7$vyWvz zg_q!r`_Jqnp`H8>w7ZqECH_*poHj?D8RPrdWt=|ST z8JfR~Yp1Vn7Mn-QnKl>iA!m(P2X;Glb2(e$efXEceeg}mX9qc>=#NxdKeQG&bMi!5 z+Vng7D{Xm>eGNNnd`fij%kT-boE0@8-gV=obRG2J3b@rq;gQe^Zapre&v)-H2 z#=*Jl%#)_dXHR*VzZNpbKZeA<6U+P?!`~QZ9p4Y>+i2}z8M}{i8?#?$XB^UwP1re8 zda!5fkDYNH?^YXE(DPkD3pq#ghbWVKQ%!p6$^6fJ946;+_QA?crBBy?9p&_2Q#?0o zwigV7eCOL=EcciSdAVOsqPKGgn5KN@&)#?;t+SlQmBl?d-}`+=zm>jJ&S2aM?@!D8 z9LOKd-&y|Qco0mWU4gUD=g#^eJ`85j@=i6BdxxKWw<-HxoS&K8BsKz04%buwAb z6R;isLg=dOPPh+bjvws3v-dWKPPC!w$e8CdaSi29#fk3#KZ)&)k5$KFoOUh~{~U5g z_2y@v&ffy3&z5P+EwrUDMcJcqYkBq9X~XGI7g9&|$RpVom8}Q%zai(`a@xDJtl6xu zfAQPE5ZD90mvcCLP`17+ih2ErJ8*?q?$n>*d>5Os{*m2*eGl9RTUQqQaV`3$(2{mi z*_?*mAm0x(RIV+18}_~UME#XE<$L6e!FqPS`}+uQ3(a9Sx#_Fskh@ODvhS{F`G1Y_ zoUGXsAnWT=yp?v$VK>AZ@RviLmp;t9{~Nw88Jkt!fu6l#EiLo<9C*|-j`PmF*)Os` zlk)-$Diz=B?8R;)_B?+m&fjokK2>Mmi%*2Blj3yWYQvG;JkSD&MN z>OZq=F4Cq$-Xr}u92&Cg!k%!Iyo}v8`f;)~lDptB@I%R3qG!I`#?Bl{ee1+J@aMs1 z{9W)PV)6Ip|HQrou7K=Q$HUMyJ}co9XjO7DW&`+Xdrf`XfVL2F7R|xA`=5*-Divqz zwd^;Q9ZjFlei|pQwtBMH#1?!}*># z?;Py~TE1gFl%0D`O?p0qH7}d@w9{}E&e$9(Z<6?-wA^tnk(c}PF=Efs(uM^%-~I1J zJDuH5o!hgYqh)?{WxoU&hsTt?fjx=+k-ECDKV_$lxhLhGFjM<}!}x8ls*nY7IP+Vb~-XG-3I zzZf^sruW!Kuul`a0%!cshD+#Q(Awkl>oT#t(_`=<{X+V2?EPU&c6(ad@E!g|{*U-T z{vX=+6>g@lGH*^2`zBzI(T6YM%(E|O%gg!}^6$Mi#z%_1CU&COVE&U(Tfc0FvtOid z4uHICE2z%?T5iT6jcq)j_j+AH8GI2c;0cZOKn_CIC5Wv6Xb>@xm^p1V*>_GbEcha(Nod$KPioNI- zT7JJdb7DPhU-kYfH)EXlPd{ZIe!@RhyN7tMgJ?JNv+jPw`5gB=?u*aDvv9^VV|W%j z^K>j82yZBpd%~)J`e&}Z5#o8j@5K7>KZKQVDeZ>JVs9Lb`_i_9Q{i}6q^+0YoAIIe zIpurc^vg2Xi2sT@9>7x| zM%#z8kAvwY=Ok?mYy|1oS@g`0ZDAN>49~F|{H2}KY2U$9{N=cxSo-9Bu|wpp zp0pYlRj$S!`c+f9l1Vajh#f_%V+KG_#iwK^3L1J z-2|rbFH&Z6*oA)?|6%1e!E@x*qn{4x_mt1i`0io1ma_-04nLHfOI7v|u|L^&!5r9C ztO>0x9*=K<-Fo<3$ZyX6Rhg+c`|wEG-~5d06ghR^cXnqOQEANOWjs!TZOZmg{z7`z z*bM#JOdX5(FF<|(>mh@^mVj;{H(G2+1W4B-gU4&|9QNx+^mU1 z)RB4l1PtKkXPHgeb@5Z4yO!OFeS+9~uqpfrbE=9v*E8&!7GNKG&e4x?=F?r3MZf(ZXa9A@ zoni|6T>8ndqcRn6H+$c*F=pqvXT!Pdw(C3-cj0eK zOXby9_P@}GJr3v0$j^^2FI&I3gm)BsPJa5JF@Bo=guFHEOW1F~?qXZvzi{5~QEmnJN8t|QuZg|OUZ{Op1B>|id2BD7pRd-EvxNP<*cR+f zu8Urg%7xw#&L=+@9v=GfR8^0Cq2&vE7)xv%GQ89!^nz z=FLp@7|(sC${5N0zdf1VA5vE>u`lq$kbP=Te$KSs_#rsCWS%=eZuD*|AfF#zqIZzf z8RsteEdCSTD(NS92|l*pX3rlC`P{P!-VU-xE>mVk+1|-+i*rYPmo|Z&b$bRJ>U