From 6c89d125155bfb071619426bcd47e0d010189be9 Mon Sep 17 00:00:00 2001 From: Rohan Modi Date: Thu, 5 Sep 2024 11:38:03 -0400 Subject: [PATCH 1/7] Implemented LLM Fair Eval example using llments --- examples/llm_fair_eval/FairEval.ipynb | 531 ++++++++------------------ llments/lm/base/api.py | 82 ++-- 2 files changed, 201 insertions(+), 412 deletions(-) diff --git a/examples/llm_fair_eval/FairEval.ipynb b/examples/llm_fair_eval/FairEval.ipynb index 0beb7f3..871309e 100644 --- a/examples/llm_fair_eval/FairEval.ipynb +++ b/examples/llm_fair_eval/FairEval.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 48, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -10,21 +10,20 @@ "import os\n", "import time\n", "\n", - "import openai\n", + "from llments.lm.base.api import APIBasedLM\n", "from tqdm import tqdm" ] }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "MAX_API_RETRY = 10000\n", "REQ_TIME_GAP = 4\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key-here\"\n", - "openai.api_key = os.environ[\"OPENAI_API_KEY\"]" + "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\"" ] }, { @@ -37,125 +36,150 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def gen_prompt(ques, ans1, ans2):\n", - " sys_prompt = 'You are a helpful and precise assistant for checking the quality of the answer.'\n", " prompt_template = \"[Question]\\n{question}\\n\\n[The Start of Assistant 1's Answer]\\n{answer_1}\\n[The End of Assistant 1's Answer]\\n\\n[The Start of Assistant 2's Answer]\\n{answer_2}\\n[The End of Assistant 2's Answer]\\n\\n[System]\\n{prompt}\\n\"\n", - " default_prompt = \"\"\"We would like to request your feedback on the per- formance of two AI assistants in response to the user question displayed above.\n", - " Please rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\n", - " Please first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively.\n", - " The two scores are separated by a space. In the sub- sequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.\"\"\"\n", - " return sys_prompt, prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, prompt=default_prompt)\n", - "\n", - "def query_gpt(eval_model, system_prompt, user_prompt):\n", - " for i in range(MAX_API_RETRY):\n", - " try:\n", - " response = openai.ChatCompletion.create(\n", - " model=eval_model,\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": user_prompt},\n", - " ],\n", - " temperature=1,\n", - " max_tokens=512,\n", - " )\n", - " return response\n", - " except openai.error.RateLimitError:\n", - " print('rate limit')\n", - " time.sleep(30)\n", - " except Exception as e:\n", - " print('error')\n", - " raise RuntimeError(f\"Failed after {MAX_API_RETRY} retries.\")\n", - "\n", - "def get_eval(ques, ans1, ans2, eval_model):\n", - " system_prompt, user_prompt = gen_prompt(ques, ans1, ans2)\n", - " response = query_gpt(eval_model, system_prompt, user_prompt)\n", - " all_scores = []\n", - " contents = []\n", - " contents_bpc = []\n", - " choice = response[\"choices\"][0]\n", - " content = choice[\"message\"][\"content\"]\n", - " score1, score2 = parse_score_from_review(content)\n", - " if score1 != -1 and score2 != -1:\n", - " all_scores.append([score1, score2])\n", - " contents.append(content)\n", + " default_prompt = \"\"\"We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\n", + " Please rate the helpfulness, relevance, accuracy, level of details of their responses. \n", + "\n", + " Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\n", + " Please first provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment. \n", + " Then, output two lines indicating the scores for Assistant 1 and 2, respectively.\n", + "\n", + " Output with the following format:\n", + " Evaluation evidence: \n", + " Score of the Assistant 1: \n", + " Score of the Assistant 2: \"\"\"\n", + " return prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, prompt=default_prompt)\n", + "\n", + "def query_gpt(system_prompt, user_prompts, eval_model, num_sequences):\n", + " try:\n", + " responses = APIBasedLM(eval_model).chat_generate(\n", + " messages=[[{\"role\": \"system\", \"content\": system_prompt}, {\"role\": \"user\", \"content\": user_prompt}] for user_prompt in user_prompts],\n", + " temperature=1,\n", + " max_new_tokens=512,\n", + " num_return_sequences=num_sequences\n", + " )\n", + " return responses\n", + " except Exception as e:\n", + " print(f'Error: {e}')\n", + " raise RuntimeError(f\"Failed during query processing.\")\n", " \n", - " return contents, contents_bpc, [score1, score2]\n", + "def get_eval(question_jsons, answer1_jsons, answer2_jsons, eval_model, bpc, k):\n", + " system_prompt = 'You are a helpful and precise assistant for checking the quality of the answer.'\n", + " user_prompts = []\n", + " user_prompts_bpc = []\n", + "\n", + " for question_json, answer1_json, answer2_json in zip(question_jsons, answer1_jsons, answer2_jsons):\n", + " ques = question_json[\"text\"]\n", + " ans1 = answer1_json[\"text\"]\n", + " ans2 = answer2_json[\"text\"]\n", + "\n", + " user_prompt = gen_prompt(ques, ans1, ans2)\n", + " user_prompts.append(user_prompt)\n", + "\n", + " if bpc:\n", + " user_prompt_bpc = gen_prompt(ques, ans2, ans1)\n", + " user_prompts_bpc.append(user_prompt_bpc)\n", + "\n", + " responses = query_gpt(system_prompt, user_prompts, eval_model, k)\n", + "\n", + " if bpc == 1:\n", + " responses_bpc = query_gpt(system_prompt, user_prompts_bpc, eval_model, k)\n", + " else:\n", + " responses_bpc = [[] for _ in range(len(user_prompts))]\n", + "\n", + " results = []\n", + "\n", + " for i, response_choices in enumerate(responses):\n", + " question_id = question_jsons[i][\"question_id\"]\n", + " question_content = []\n", + " question_scores_list = []\n", + "\n", + " for content in response_choices:\n", + " score1, score2 = parse_score_from_review(content)\n", + " if score1 != -1 and score2 != -1:\n", + " question_scores_list.append([score1, score2])\n", + " question_content.append(content)\n", + "\n", + " if question_scores_list:\n", + " avg_score1 = sum([score[0] for score in question_scores_list]) / len(question_scores_list)\n", + " avg_score2 = sum([score[1] for score in question_scores_list]) / len(question_scores_list)\n", + " else:\n", + " avg_score1, avg_score2 = -1, -1\n", + "\n", + " result = {\n", + " \"question_id\": question_id,\n", + " \"question\": question_jsons[i][\"text\"],\n", + " \"review\": question_content,\n", + " \"score\": [avg_score1, avg_score2]\n", + " }\n", + "\n", + " if bpc == 1:\n", + " question_content_bpc = []\n", + " question_scores_list_bpc = []\n", + " for content_bpc in responses_bpc[i]:\n", + " score2_bpc, score1_bpc = parse_score_from_review(content_bpc)\n", + " if score1_bpc != -1 and score2_bpc != -1:\n", + " question_scores_list_bpc.append([score1_bpc, score2_bpc])\n", + " question_content_bpc.append(content_bpc)\n", + "\n", + " if question_scores_list_bpc:\n", + " avg_score1_bpc = sum([score[0] for score in question_scores_list_bpc]) / len(question_scores_list_bpc)\n", + " avg_score2_bpc = sum([score[1] for score in question_scores_list_bpc]) / len(question_scores_list_bpc)\n", + " result[\"score\"] = [(result[\"score\"][0] + avg_score1_bpc) / 2, (result[\"score\"][1] + avg_score2_bpc) / 2]\n", + " result[\"review_bpc\"] = question_content_bpc\n", + " else:\n", + " result[\"review_bpc\"] = []\n", + " \n", + " results.append(result)\n", + "\n", + " return results\n", "\n", "def parse_score_from_review(review):\n", " try:\n", - " scores = review.split(\"\\n\")[0]\n", - " score1 = scores.split(\" \")[0].strip()\n", - " score2 = scores.split(\" \")[1].strip()\n", + " score1 = review.split(\"\\n\")[-2]\n", + " score2 = review.split(\"\\n\")[-1]\n", + " score1 = score1.split(\":\")[-1].strip()\n", + " score2 = score2.split(\":\")[-1].strip()\n", " return [float(score1), float(score2)]\n", " except:\n", " return [-1, -1]\n", - "\n", + " \n", "def get_json_list(file_path):\n", " file_path = os.path.expanduser(file_path)\n", " with open(file_path, \"r\") as f:\n", " json_list = []\n", " for line in f:\n", " json_list.append(json.loads(line))\n", - " return json_list" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "def get_results(m1, m2, eval_model):\n", + " return json_list\n", + " \n", + "def get_results(m1, m2, eval_model, bpc=0, k=1):\n", " question_jsons = get_json_list(\"question.jsonl\")\n", " answer1_jsons = get_json_list(f\"answer/answer_{m1}.jsonl\")\n", " answer2_jsons = get_json_list(f\"answer/answer_{m2}.jsonl\")\n", - " output = f\"review/review_{m1}_{m2}_{eval_model}.json\"\n", + " output = f\"review/review_{m1}_vs_{m2}_eval={eval_model}_mec={k}_bpc={bpc}.json\"\n", "\n", " assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)\n", "\n", - " reviews = []\n", - " total_len = len(question_jsons)\n", - " question_idx_list = list(range(total_len))\n", - "\n", - " for i in tqdm(question_idx_list):\n", - " assert (\n", - " answer1_jsons[i][\"question_id\"]\n", - " == question_jsons[i][\"question_id\"]\n", - " == answer2_jsons[i][\"question_id\"]\n", - " )\n", - "\n", - " ques = question_jsons[i][\"text\"]\n", - " ans1 = answer1_jsons[i][\"text\"]\n", - " ans2 = answer2_jsons[i][\"text\"]\n", - " \n", - " reviews.append(get_eval(ques, ans1, ans2, eval_model))\n", - " \n", - " # To avoid the rate limit set by OpenAI\n", - " time.sleep(REQ_TIME_GAP)\n", + " reviews = get_eval(question_jsons, answer1_jsons, answer2_jsons, eval_model, bpc, k)\n", "\n", " model1_vs_model2 = {\n", " 'win': 0,\n", " 'tie': 0,\n", " 'loss': 0\n", " }\n", + " \n", " with open(f\"{output}\", \"w\") as output_review_file:\n", - " for idx, (contents, contents_bpc, [score1, score2]) in enumerate(reviews):\n", - " results = {\n", - " \"question_id\": question_jsons[idx][\"question_id\"],\n", - " \"question\": question_jsons[idx][\"text\"],\n", - " \"review\": contents,\n", - " \"review_bpc\": contents_bpc,\n", - " \"score\": [score1, score2],\n", - " }\n", - " output_review_file.write(json.dumps(results) + \"\\n\")\n", + " for review in reviews:\n", + " output_review_file.write(json.dumps(review) + \"\\n\")\n", " \n", + " score1, score2 = review['score']\n", " if score1 == score2:\n", " model1_vs_model2['tie'] += 1\n", - " \n", " elif score1 > score2:\n", " model1_vs_model2['win'] += 1\n", " else:\n", @@ -181,29 +205,15 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 4, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [07:37<00:00, 5.72s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 21, 'tie': 1, 'loss': 58}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 22, 'tie': 11, 'loss': 47}\n" ] } ], @@ -217,29 +227,15 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 5, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [07:31<00:00, 5.65s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 18, 'tie': 1, 'loss': 61}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 40, 'tie': 11, 'loss': 29}\n" ] } ], @@ -253,7 +249,7 @@ }, { "cell_type": "code", - "execution_count": 77, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -261,23 +257,23 @@ "output_type": "stream", "text": [ "Vicuna-13B v.s. ChatGPT | Evaluator: ChatGPT\n", - "Vicuna-13b win rate as assistant 1: 22.5%\n", - "Vicuna-13b win rate as assistant 2: 72.5%\n", - "Conflict rate: 56/80 (70.0%)\n" + "Vicuna-13b win rate as assistant 1: 50.0%\n", + "Vicuna-13b win rate as assistant 2: 58.75%\n", + "Conflict rate: 29/80 (36.25%)\n" ] } ], "source": [ "gpt35_vs_vicuna13b_results = []\n", "\n", - "with open('review/review_gpt35_vicuna-13b_gpt-3.5-turbo-0301.json', 'r') as file:\n", + "with open('review/review_gpt35_vs_vicuna-13b_eval=gpt-3.5-turbo-0301_mec=1_bpc=0.json', 'r') as file:\n", " for line in file:\n", " json_object = json.loads(line)\n", " gpt35_vs_vicuna13b_results.append(json_object)\n", "\n", "vicuna13b_vs_gpt35_results = []\n", "\n", - "with open('review/review_vicuna-13b_gpt35_gpt-3.5-turbo-0301.json', 'r') as file:\n", + "with open('review/review_vicuna-13b_vs_gpt35_eval=gpt-3.5-turbo-0301_mec=1_bpc=0.json', 'r') as file:\n", " for line in file:\n", " json_object = json.loads(line)\n", " vicuna13b_vs_gpt35_results.append(json_object)\n", @@ -315,29 +311,15 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [07:33<00:00, 5.67s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 7, 'tie': 0, 'loss': 73}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 6, 'tie': 3, 'loss': 71}\n" ] } ], @@ -351,29 +333,15 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [07:18<00:00, 5.49s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 55, 'tie': 0, 'loss': 25}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 73, 'tie': 3, 'loss': 4}\n" ] } ], @@ -387,7 +355,7 @@ }, { "cell_type": "code", - "execution_count": 78, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -395,23 +363,23 @@ "output_type": "stream", "text": [ "Vicuna-13B v.s. Alpaca-13B | Evaluator: ChatGPT\n", - "Vicuna-13b win rate as assistant 1: 68.75%\n", - "Vicuna-13b win rate as assistant 2: 91.25%\n", - "Conflict rate: 26/80 (32.5%)\n" + "Vicuna-13b win rate as assistant 1: 91.25%\n", + "Vicuna-13b win rate as assistant 2: 88.75%\n", + "Conflict rate: 10/80 (12.5%)\n" ] } ], "source": [ "alpaca13b_vs_vicuna13b_results = []\n", "\n", - "with open('review/review_alpaca-13b_vicuna-13b_gpt-3.5-turbo-0301.json', 'r') as file:\n", + "with open('review/review_alpaca-13b_vs_vicuna-13b_eval=gpt-3.5-turbo-0301_mec=1_bpc=0.json', 'r') as file:\n", " for line in file:\n", " json_object = json.loads(line)\n", " alpaca13b_vs_vicuna13b_results.append(json_object)\n", "\n", "vicuna13b_vs_alpaca13b_results = []\n", "\n", - "with open('review/review_vicuna-13b_alpaca-13b_gpt-3.5-turbo-0301.json', 'r') as file:\n", + "with open('review/review_vicuna-13b_vs_alpaca-13b_eval=gpt-3.5-turbo-0301_mec=1_bpc=0.json', 'r') as file:\n", " for line in file:\n", " json_object = json.loads(line)\n", " vicuna13b_vs_alpaca13b_results.append(json_object)\n", @@ -447,161 +415,6 @@ "### Calibrating the Positional Bias using Multiple Evidence Calibration (MEC) and Balanced Position Calibration (BPC)\n" ] }, - { - "cell_type": "code", - "execution_count": 65, - "metadata": {}, - "outputs": [], - "source": [ - "def gen_prompt(ques, ans1, ans2):\n", - " sys_prompt = 'You are a helpful and precise assistant for checking the quality of the answer.'\n", - " prompt_template = \"[Question]\\n{question}\\n\\n[The Start of Assistant 1's Answer]\\n{answer_1}\\n[The End of Assistant 1's Answer]\\n\\n[The Start of Assistant 2's Answer]\\n{answer_2}\\n[The End of Assistant 2's Answer]\\n\\n[System]\\n{prompt}\\n\"\n", - " default_prompt = \"\"\"We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\n", - " Please rate the helpfulness, relevance, accuracy, level of details of their responses. \n", - "\n", - " Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\n", - " Please first provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment. \n", - " Then, output two lines indicating the scores for Assistant 1 and 2, respectively.\n", - "\n", - " Output with the following format:\n", - " Evaluation evidence: \n", - " Score of the Assistant 1: \n", - " Score of the Assistant 2: \"\"\"\n", - " return sys_prompt, prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, prompt=default_prompt)\n", - "\n", - "def query_gpt(eval_model, k, system_prompt, user_prompt):\n", - " for i in range(MAX_API_RETRY):\n", - " try:\n", - " response = openai.ChatCompletion.create(\n", - " model=eval_model,\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": user_prompt},\n", - " ],\n", - " temperature=1,\n", - " max_tokens=512,\n", - " n=k\n", - " )\n", - " return response\n", - " except openai.error.RateLimitError:\n", - " print('rate limit')\n", - " time.sleep(30)\n", - " except Exception as e:\n", - " print('error')\n", - " raise RuntimeError(f\"Failed after {MAX_API_RETRY} retries.\")\n", - "\n", - "\n", - "def get_eval(ques, ans1, ans2, eval_model, k, bpc=1):\n", - " cost = 0\n", - " system_prompt, user_prompt = gen_prompt(ques, ans1, ans2)\n", - " response = query_gpt(eval_model, k, system_prompt, user_prompt)\n", - " all_scores = []\n", - " contents = []\n", - " contents_bpc = []\n", - " for choice in response[\"choices\"]:\n", - " content = choice[\"message\"][\"content\"]\n", - " score1, score2 = parse_score_from_review(content)\n", - " if score1 == -1 or score2 == -1:\n", - " continue\n", - " all_scores.append([score1, score2])\n", - " contents.append(content)\n", - " \n", - " if bpc == 1:\n", - " system_prompt, user_prompt_bpc = gen_prompt(ques, ans2, ans1)\n", - " response_bpc = query_gpt(eval_model, k, system_prompt, user_prompt)\n", - " for choice in response_bpc[\"choices\"]:\n", - " content = choice[\"message\"][\"content\"]\n", - " score2, score1 = parse_score_from_review(content)\n", - " if score1 == -1 or score2 == -1:\n", - " continue\n", - " all_scores.append([score1, score2])\n", - " contents_bpc.append(content)\n", - " \n", - " score1 = sum([score[0] for score in all_scores]) / len(all_scores)\n", - " score2 = sum([score[1] for score in all_scores]) / len(all_scores)\n", - " return contents, contents_bpc, [score1, score2]\n", - "\n", - "\n", - "def parse_score_from_review(review):\n", - " try:\n", - " score1 = review.split(\"\\n\")[-2]\n", - " score2 = review.split(\"\\n\")[-1]\n", - " score1 = score1.split(\":\")[-1].strip()\n", - " score2 = score2.split(\":\")[-1].strip()\n", - " return [float(score1), float(score2)]\n", - " except:\n", - " return [-1, -1]\n", - "\n", - "def get_json_list(file_path):\n", - " file_path = os.path.expanduser(file_path)\n", - " with open(file_path, \"r\") as f:\n", - " json_list = []\n", - " for line in f:\n", - " json_list.append(json.loads(line))\n", - " return json_list" - ] - }, - { - "cell_type": "code", - "execution_count": 66, - "metadata": {}, - "outputs": [], - "source": [ - "def get_results(m1, m2, eval_model, bpc, k):\n", - " question_jsons = get_json_list(\"question.jsonl\")\n", - " answer1_jsons = get_json_list(f\"answer/answer_{m1}.jsonl\")\n", - " answer2_jsons = get_json_list(f\"answer/answer_{m2}.jsonl\")\n", - " output = f\"review/review_{m1}_{m2}_{eval_model}_mec{k}_bpc{bpc}.json\"\n", - "\n", - " assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)\n", - "\n", - " reviews = []\n", - " total_len = len(question_jsons)\n", - " question_idx_list = list(range(total_len))\n", - "\n", - " for i in tqdm(question_idx_list):\n", - " assert (\n", - " answer1_jsons[i][\"question_id\"]\n", - " == question_jsons[i][\"question_id\"]\n", - " == answer2_jsons[i][\"question_id\"]\n", - " )\n", - "\n", - " ques = question_jsons[i][\"text\"]\n", - " ans1 = answer1_jsons[i][\"text\"]\n", - " ans2 = answer2_jsons[i][\"text\"]\n", - " \n", - " reviews.append(get_eval(ques, ans1, ans2, eval_model, k, bpc))\n", - " \n", - " # To avoid the rate limit set by OpenAI\n", - " time.sleep(REQ_TIME_GAP)\n", - "\n", - " model1_vs_model2 = {\n", - " 'win': 0,\n", - " 'tie': 0,\n", - " 'loss': 0\n", - " }\n", - " with open(f\"{output}\", \"w\") as output_review_file:\n", - " for idx, (contents, contents_bpc, [score1, score2]) in enumerate(reviews):\n", - " results = {\n", - " \"question_id\": question_jsons[idx][\"question_id\"],\n", - " \"question\": question_jsons[idx][\"text\"],\n", - " \"review\": contents,\n", - " \"review_bpc\": contents_bpc,\n", - " \"score\": [score1, score2],\n", - " }\n", - " output_review_file.write(json.dumps(results) + \"\\n\")\n", - " \n", - " if score1 == score2:\n", - " model1_vs_model2['tie'] += 1\n", - " \n", - " elif score1 > score2:\n", - " model1_vs_model2['win'] += 1\n", - " else:\n", - " model1_vs_model2['loss'] += 1\n", - "\n", - " print(f'Evaluation results (model1_vs_model2):\\n{model1_vs_model2}')" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -611,29 +424,15 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": 10, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [12:06<00:00, 9.08s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 35, 'tie': 6, 'loss': 39}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 32, 'tie': 3, 'loss': 45}\n" ] } ], @@ -649,29 +448,15 @@ }, { "cell_type": "code", - "execution_count": 68, + "execution_count": 11, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [10:45<00:00, 8.07s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 33, 'tie': 21, 'loss': 26}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 30, 'tie': 11, 'loss': 39}\n" ] } ], @@ -687,29 +472,15 @@ }, { "cell_type": "code", - "execution_count": 69, + "execution_count": 12, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 80/80 [13:21<00:00, 10.02s/it]" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Evaluation results (model1_vs_model2):\n", - "{'win': 38, 'tie': 10, 'loss': 32}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "{'win': 34, 'tie': 1, 'loss': 45}\n" ] } ], @@ -725,14 +496,14 @@ }, { "cell_type": "code", - "execution_count": 81, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "def get_MEC_BPC_results(k):\n", " gpt35_vs_vicuna13b_results = []\n", "\n", - " with open(f'review/review_gpt35_vicuna-13b_gpt-3.5-turbo-0301_mec{k}_bpc1.json', 'r') as file:\n", + " with open(f'review/review_gpt35_vs_vicuna-13b_eval=gpt-3.5-turbo-0301_mec={k}_bpc=1.json', 'r') as file:\n", " for line in file:\n", " json_object = json.loads(line)\n", " gpt35_vs_vicuna13b_results.append(json_object)\n", @@ -770,7 +541,7 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -778,16 +549,16 @@ "output_type": "stream", "text": [ "Vicuna-13B v.s. ChatGPT | Evaluator: ChatGPT | MEC (k=1) + BPC (k=1)\n", - "Vicuna-13b win rate: 32.5%\n", - "Accuracy in terms of closeness with human annotations: 37/80 (46.25%)\n", + "Vicuna-13b win rate: 48.75%\n", + "Accuracy in terms of closeness with human annotations: 41/80 (51.24999999999999%)\n", "\n", "Vicuna-13B v.s. ChatGPT | Evaluator: ChatGPT | MEC (k=3) + BPC (k=3)\n", - "Vicuna-13b win rate: 48.75%\n", - "Accuracy in terms of closeness with human annotations: 32/80 (40.0%)\n", + "Vicuna-13b win rate: 56.25%\n", + "Accuracy in terms of closeness with human annotations: 46/80 (57.49999999999999%)\n", "\n", "Vicuna-13B v.s. ChatGPT | Evaluator: ChatGPT | MEC (k=5) + BPC (k=5)\n", - "Vicuna-13b win rate: 40.0%\n", - "Accuracy in terms of closeness with human annotations: 31/80 (38.75%)\n", + "Vicuna-13b win rate: 56.25%\n", + "Accuracy in terms of closeness with human annotations: 47/80 (58.75%)\n", "\n" ] } @@ -800,12 +571,12 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 15, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAGwCAYAAACzXI8XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAABDfklEQVR4nO3dd3xUVd7H8c+kQjChRiESiqIUY6QEAZUimCjwIF0FFGQtqIgELBQLoiDIA1JEWRYWxEcgClJ0VxeilAiCBhAJoCigS2hGZEkoEoYwzx9nkxApJjCTMzP5vl+v89qbO5eTH+pNvnvOufc4XC6XCxEREREfFGC7ABEREZFLpSAjIiIiPktBRkRERHyWgoyIiIj4LAUZERER8VkKMiIiIuKzFGRERETEZwXZLsDTzpw5w/79+wkPD8fhcNguR0RERArB5XJx9OhRoqKiCAi48LiL3weZ/fv3Ex0dbbsMERERuQTp6elUrVr1gp/7fZAJDw8HzD+IiIgIt/XrdDpZvnw5CQkJBAcHu61fESk83YcidnnyHszKyiI6Ojrv9/iF+H2QyZ1OioiIcHuQCQsLIyIiQj9ARSzRfShiV3Hcg3+2LESLfUVERMRnKciIiIiIz1KQEREREZ+lICMiIiI+S0FGREREfJaCjIiIiPgsBRkRERHxWQoyIiIi4rMUZERERMRnKchcipwcHKtXc3VKCo7VqyEnx3ZFIiIiJZKCTFEtWgQ1ahAUH0/cG28QFB8PNWqY8yIiIlKsFGSKYtEi6NYN9u4teH7fPnNeYUZERKRYKcgUVk4ODBwILte5n+WeS0zUNJOIiEgxUpAprC++OHck5mwuF6Snm+tERESkWCjIFNaBA+69TkRERC6bgkxhVani3utERETksinIFFbz5lC1KjgcF76malVznYiIiBQLBZnCCgyEyZPN8YXCTHQ0BOgfqYiISHHRb92i6NIFFi6Eq68ueP7KK03QWbcOXn/dTm0iIiIlkIJMUXXpAj//zOnkZDYMHszp5GTYvx/eest8Pnw4fPqp3RpFRERKCAWZSxEYiKtlS/a1aIGrZUszGtOvn2kuF/ToAT/+aLtKERERv6cg405TpsCtt0JmJnTsCFlZtisSERHxa14TZMaOHYvD4SAxMbHA+XXr1tG6dWvKlClDREQELVq04Pfff7dT5J8JCTFraKKi4LvvoHdvOHPGdlUiIiJ+yyuCTGpqKtOnTyc2NrbA+XXr1nHXXXeRkJDA119/TWpqKk8++SQB3vxkUOXKsHixCTVLl8Krr9quSERExG8F2S7g2LFj9OrVixkzZjBq1KgCnw0aNIinnnqKoUOH5p2rXbv2RfvLzs4mOzs77+us/07vOJ1OnE6n2+rO7eu8fTZogOOttwh65BF4+WVOx8Tguvtut31vETEueh+KiMd58h4sbJ8Ol+t8uyAWnz59+lChQgUmTpxIq1atqF+/PpMmTSIjI4OrrrqKKVOmMH/+fHbt2kWdOnUYPXo0t9122wX7e/nllxk5cuQ55+fNm0dYWJgn/yrnuHHGDK755z85XaoUKf/7vxyNji7W7y8iIuKrTpw4Qc+ePcnMzCQiIuKC11kNMklJSYwePZrU1FRKlSpVIMisX7+eZs2aUaFCBcaPH0/9+vV59913efvtt9m6dSvXXXfdefs834hMdHQ0hw4duug/iKJyOp0kJycTHx9PcHDwhS4isG1bAlJScNWqxekvv4Ry5dxWg0hJV6j7UEQ8xpP3YFZWFpUqVfrTIGNtaik9PZ2BAweSnJxMqVKlzvn8zH8Xyfbr14++ffsC0KBBAz7//HNmzZrFmDFjzttvaGgooaGh55wPDg72yA+6i/YbHGwW/8bF4di5k+A+feDjj83j2iLiNp66v0WkcDxxDxa2P2urZjdu3EhGRgYNGzYkKCiIoKAgVq9ezZQpUwgKCuKqq64CoF69egX+XN26ddmzZ4+Nki9NZCQsWQKlS5sX5b34ou2KRERE/Ia1INOmTRvS0tLYvHlzXouLi6NXr15s3ryZa665hqioKHbs2FHgz/3www9Ur17dUtWXqEEDmDnTHI8ZAx98YLceERERP2Ftaik8PJyYmJgC58qUKUPFihXzzj/77LOMGDGCm266ifr16zNnzhy+//57Fi5caKPky9OzJ3zzDYwfD337Qp068IfHzUVERKRorD9+fTGJiYmcPHmSQYMGcfjwYW666SaSk5O59tprbZd2acaMgW+/heRk6NQJUlOhYkXbVYmIiPgsrwoyq1atOufc0KFDC7xHxqcFBUFSEjRuDLt3w333mXUzQV71r0FERMRnePErcv1UhQpm8W9YGHz2GfhLSBMREbFAQcaGG2+EOXPM8YQJMHeu3XpERER8lIKMLd26wfPPm+OHH4ZNm+zWIyIi4oMUZGwaORLat4eTJ83i34wM2xWJiIj4FAUZmwID4b334PrrIT0duncHbX4nIiJSaAoytpUrZxb/hodDSgoMHmy7IhEREZ+hIOMN6tY1IzMAU6fCrFl26xEREfERCjLe4u67zZoZgMcfh6++sluPiIiID1CQ8SYvvGAW/Z46BV26wIEDtisSERHxagoy3iQgAN59F+rVg/37oWtXyM62XZWIiIjXUpDxNuHhsHSpWQS8bh0MGAAul+2qREREvJKCjDeqVQvmzweHA2bMgOnTbVckIiLilRRkvNVdd5ndssGMynzxhd16REREvJCCjDd77jm45x44fdpsabB3r+2KREREvIqCjDdzOMw7ZWJjzfYFnTub7QxEREQEUJDxfmXKmDf/VqgAGzbAY49p8a+IiMh/Kcj4gpo14YMPzOPZc+bAm2/arkhERMQrKMj4ijZtYPx4czx4MKxcabceERERL6Ag40sSE+GBByAnx+yU/fPPtisSERGxSkHGlzgc5p0yjRrBb7+Z7QxOnLBdlYiIiDUKMr6mdGlYvBgiI+Hbb+Ghh7T4V0RESiwFGV8UHQ0LF0JQECQl5a+dERERKWEUZHxVixYwebI5HjoUli2zW4+IiIgFCjK+7PHHzdTSmTNw332wc6ftikRERIqVgowvczjgrbegaVM4csQs/j12zHZVIiIixUZBxteFhsKHH0KVKrBtG/Tpo8W/IiJSYijI+IOoKFi0CEJCzP+OHm27IhERkWKhIOMvmjY100wAL70EH39stx4REZFioCDjTx5+GJ54wkwt3X8/fP+97YpEREQ8SkHG30ycCM2bQ1aWWfybmWm7IhEREY9RkPE3ISGwYAFUrQo7dpiRmTNnbFclIiLiEQoy/uiqq8w2BqGh8I9/wMsv265IRETEIxRk/FVcHMyYYY5ffdU8zSQiIuJnvCbIjB07FofDQWJiYt65Vq1a4XA4CrTHHnvMXpG+5oEHYNAgc9y7N2zdarceERERN/OKIJOamsr06dOJjY0957NHHnmEAwcO5LVx48ZZqNCHjRsHrVvD8ePQsSMcPmy7IhEREbexHmSOHTtGr169mDFjBuXLlz/n87CwMCpXrpzXIiIiLFTpw4KC4P33oUYN2L0bevSAnBzbVYmIiLhFkO0C+vfvT/v27bnjjjsYNWrUOZ/PnTuX9957j8qVK9OhQwdefPFFwsLCLthfdnY22dnZeV9nZWUB4HQ6cTqdbqs7ty939ukxZcvCggUEtWiBY/lycoYM4cyYMbarErlsPnUfivghT96Dhe3TapBJSkpi06ZNpKamnvfznj17Ur16daKiotiyZQtDhgxhx44dLLrIwtUxY8YwcuTIc84vX778ogHoUiUnJ7u9T0+J6t+fxuPHEzhhAt8A+5o3t12SiFv40n0o4o88cQ+eOHGiUNc5XC47Owymp6cTFxdHcnJy3tqYVq1aUb9+fSZNmnTeP7NixQratGnDzp07ufbaa897zflGZKKjozl06JBbp6WcTifJycnEx8cTHBzstn49LWD4cALHj8dVujSnV6+G+vVtlyRyyXz1PhTxF568B7OysqhUqRKZmZkX/f1tbURm48aNZGRk0LBhw7xzOTk5pKSkMHXqVLKzswkMDCzwZ5o0aQJw0SATGhpKaGjoOeeDg4M98oPOU/16zNixkJaGY9kygrt3hw0boFIl21WJXBafuw9F/Iwn7sHC9mctyLRp04a0tLQC5/r27UudOnUYMmTIOSEGYPPmzQBUqVKlOEr0T4GBMH8+3Hwz7NwJ99wDy5ebRcEiIiI+xtpvr/DwcGJiYgqcK1OmDBUrViQmJoZdu3Yxb9482rVrR8WKFdmyZQuDBg2iRYsW531MW4qgfHlYssTsmL1yJTzzDFxgOk9ERMSbWX/8+kJCQkL47LPPSEhIoE6dOjz99NN07dqVjz/+2HZp/uGGG+Ddd83x5MkwZ47dekRERC6BV80nrFq1Ku84Ojqa1atX2yumJOjcGV56CV55Bfr1g3r1oHFj21WJiIgUmteOyEgxGTECOnSA7GwTbH75xXZFIiIihaYgU9IFBMB770GdOrBvH3TrBqdO2a5KRESkUBRkBCIizOLfiAhYswbO2rhTRETEmynIiFG7NsybBw4HTJsGM2bYrkhERORPKchIvvbtIXe/q/794csv7dYjIiLyJxRkpKBhw8w6GacTunY162ZERES8lIKMFORwwOzZEBMDBw9Cly5w8qTtqkRERM5LQUbOdcUVZvFv+fLw9ddmmsnO3qIiIiIXpSAj53fttZCUZB7PnjUL3n7bdkUiIiLnUJCRC0tIgNdfN8eJiaA3LYuIiJdRkJGLe/pp6NEDTp+G7t1hzx7bFYmIiORRkJGLczhg5kxo0AB+/dVsY/D777arEhERARRkpDDCwmDxYqhUCTZtgkce0eJfERHxCgoyUjjVq8OCBRAYCHPnwsSJtisSERFRkJEiaNUqP8A8+yx89pnVckRERBRkpGiefBIefBDOnIF774Xdu21XJCIiJZiCjBRN7qaSjRvD4cPQqRMcP267KhERKaEUZKToSpWCRYvgqqsgLQ369tXiXxERsUJBRi5N1arw4YcQHGwWAee+OE9ERKQYKcjIpbv1VnjzTXM8fDh8+qndekREpMRRkJHL06+faS6XeQPwDz/YrkhEREoQBRm5fFOmmNGZzEyz+Dcry3ZFIiJSQijIyOULCYGFCyEqCr77Dnr3No9ni4iIeJiCjLhH5cpmG4OQEFi6FF591XZFIiJSAijIiPvcfDP89a/m+OWXTaARERHxIAUZca++fWHAAHN8//2wfbvdekRExK8pyIj7TZgALVvCsWNm8e+RI7YrEhERP6UgI+6X+5K8atXgxx+hZ0/IybFdlYiI+CEFGfGMyEhYsgRKlzYvynvxRdsViYiIH1KQEc9p0ABmzjTHY8bABx/YrUdERPyOgox4Vs+e8Mwz5rhvX9iyxW49IiLiVxRkxPPGjIH4eDhxwiz+/e032xWJiIifUJARzwsKgqQkuOYa+OknuO8+OH3adlUiIuIHFGSkeFSoYBb/hoXBZ5/B0KG2KxIRET/gNUFm7NixOBwOEhMTz/nM5XLRtm1bHA4HS5YsKfbaxE1uvBHmzDHHEybA3Ll26xEREZ/nFUEmNTWV6dOnExsbe97PJ02ahMPhKOaqxCO6dYPnnzfHDz8MGzfarUdERHya9SBz7NgxevXqxYwZMyhfvvw5n2/evJkJEyYwa9YsC9WJR4wcCe3bw8mT0LkzZGTYrkhERHxUkO0C+vfvT/v27bnjjjsYNWpUgc9OnDhBz549eeutt6hcuXKh+svOziY7Ozvv66ysLACcTidOp9Ntdef25c4+S5TZswm69VYcP/7ImW7dyPnXv8wbgUWKQPehiF2evAcL26fVIJOUlMSmTZtITU097+eDBg3illtuoWPHjoXuc8yYMYwcOfKc88uXLycsLOySa72Q5ORkt/dZUlzx1FO0eO45gr/4gp+7dyft0UdtlyQ+SvehiF2euAdPnDhRqOusBZn09HQGDhxIcnIypUqVOufzjz76iBUrVvDNN98Uqd9hw4YxePDgvK+zsrKIjo4mISGBiIiIy647l9PpJDk5mfj4eII1knDJHFFR0LUr13zyCdU6dcL14IO2SxIfovtQxC5P3oO5Myp/xlqQ2bhxIxkZGTRs2DDvXE5ODikpKUydOpXHH3+cXbt2Ua5cuQJ/rmvXrjRv3pxVq1adt9/Q0FBCQ0PPOR8cHOyRH3Se6rfE6NLFrJkZMYKgJ5+E2Fho0sR2VeJjdB+K2OWJe7Cw/VkLMm3atCEtLa3Aub59+1KnTh2GDBlCpUqV6NevX4HPb7zxRiZOnEiHDh2Ks1TxtBdegG++Me+Z6dIFNmyAKlVsVyUiIj7AWpAJDw8nJiamwLkyZcpQsWLFvPPnW+BbrVo1atasWSw1SjEJCIB334WmTWH7dujaFVauhPOMrImIiJzN+uPXIgCEh8PSpVCuHKxbB08+CS6X7apERMTLWX/8+mwXWveSy6VfbP6tVi2YPx/atYOZM6FRI3jsMdtViYiIF9OIjHiXu+4yu2UDDBgAX3xhtx4REfFqCjLifZ57Du65x+yQ3a0b7N1ruyIREfFSCjLifRwOmDXLPIqdkWG2MTh50nZVIiLihRRkxDuVKWMex65QwTyO/dhjWvwrIiLnUJAR71WzJnzwgXk8e84cePNN2xWJiIiXUZAR79amDYwfb44HD4YVK+zWIyIiXkVBRrxfYiI88ADk5JhFwD//bLsiERHxEgoy4v0cDpg+3bxX5rffoFMnKOSuqCIi4t8UZMQ3lC4NixdDZCR8+y089JAW/4qIiIKM+JDoaFi4EIKCICkpf+2MiIiUWAoy4ltatIDJk83x0KGwbJndekRExCoFGfE9jz9uppbOnIH77oOdO21XJCIilijIiO9xOOCtt6BpUzhyxCz+PXbMdlUiImKBgoz4ptBQ+PBDqFIFtm2DPn3MCI2IiJQoCjLiu6KiYNEiCAkx//vaa7YrEhGRYqYgI76taVMzzQTw0kvw8cd26xERkWKlICO+7+GH4YknzHtl7r8fvv/edkUiIlJMFGTEP0ycCM2bQ1aWWfybmWm7IhERKQYKMuIfQkJgwQKoWhV27DAjM1r8KyLi9xRkxH9cdZXZxiA0FP7xD3j5ZdsViYiIhynIiH+Ji4MZM8zxq6+ap5lERMRvKciI/3ngARg0yBz37g1bt9qtR0REPEZBRvzTuHHQujUcPw4dO8Lhw7YrEhERD1CQEf8UFATvvw81asDu3dCjB+Tk2K5KRETcTEFG/FelSrBkCZQuDcuXw/DhtisSERE3U5AR/3bTTTB7tjkeNw6SkuzWIyIibqUgI/7v3nthyBBz/Je/wObNVssRERH3UZCRkmH0aLjzTvj9d/Pm30OHbFckIiJuoCAjJUNgIMyfD7Vqwb//DffcA06n7apEROQyKchIyVG+vFn8e8UVsHIlPPus7YpEROQyKchIyXLDDfDuu+Z48mSYM8duPSIiclkUZKTk6dwZXnrJHPfrB6mpdusREZFLVuQgU6NGDV555RX27NnjiXpEiseIEdChA2Rnm2Dzyy+2KxIRkUtQ5CCTmJjIokWLuOaaa4iPjycpKYns7GxP1CbiOQEB8N57UKcO7NsH3brBqVO2qxIRkSK6pCCzefNmvv76a+rWrcuAAQOoUqUKTz75JJs2bbrkQsaOHYvD4SAxMTHvXL9+/bj22mspXbo0kZGRdOzYke+///6Sv4dIARERZvFvRASsWQNn/bcnIiK+4ZLXyDRs2JApU6awf/9+RowYwcyZM2ncuDH169dn1qxZuFyuQveVmprK9OnTiY2NLXC+UaNGzJ49m++++45ly5bhcrlISEggR3vmiLvUrg3z5oHDAdOmwYwZtisSEZEiuOQg43Q6+eCDD7j77rt5+umniYuLY+bMmXTt2pXhw4fTq1evQvVz7NgxevXqxYwZMyhfvnyBzx599FFatGhBjRo1aNiwIaNGjSI9PZ2ff/75UssWOVf79vDqq+a4f3/48ku79YiISKEFFfUPbNq0idmzZzN//nwCAgLo3bs3EydOpE6dOnnXdO7cmcaNGxeqv/79+9O+fXvuuOMORo0adcHrjh8/zuzZs6lZsybR0dEXvC47O7vAmp2srCzABC+nG1+AltuXO/sUi559lsBNmwhYtAhX166cXrcOrr7adlXyJ3QfitjlyXuwsH0WOcg0btyY+Ph4pk2bRqdOnQgODj7nmpo1a3Lffff9aV9JSUls2rSJ1Is8/vr222/z3HPPcfz4cWrXrk1ycjIhISEXvH7MmDGMHDnynPPLly8nLCzsT2sqquTkZLf3KXYEdu9Oiw0biNizh2MJCawZPZozF/lvTbyH7kMRuzxxD544caJQ1zlcRVnMAvz73/+mevXql1TU2dLT04mLiyM5OTlvbUyrVq2oX78+kyZNyrsuMzOTjIwMDhw4wPjx49m3bx9r166lVKlS5+33fCMy0dHRHDp0iIiIiMuuO5fT6SQ5OZn4+PjzhjnxUbt2EXTLLTj+8x/OPPggOdOnm/Uz4pV0H4rY5cl7MCsri0qVKpGZmXnR399FHpHJyMjg4MGDNGnSpMD5r776isDAQOLi4grVz8aNG8nIyKBhw4Z553JyckhJSWHq1KlkZ2cTGBhI2bJlKVu2LNdddx1NmzalfPnyLF68mB49epy339DQUEJDQ885Hxwc7JEfdJ7qVyypUweSkqBtWwLeeYeAuDizbka8mu5DEbs8cQ8Wtr8iL/bt378/6enp55zft28f/YvwA79NmzakpaWxefPmvBYXF0evXr3YvHkzgYGB5/wZl8uFy+XSe2vEsxIS4PXXzXFiIqxebbUcERG5sCKPyGzfvr3AKEquBg0asH379kL3Ex4eTkxMTIFzZcqUoWLFisTExLB7927ef/99EhISiIyMZO/evYwdO5bSpUvTrl27opYtUjRPPw2bNpkds7t3hw0boFo121WJiMgfFHlEJjQ0lF/O8zr3AwcOEBRU5Fx0QaVKleKLL76gXbt21KpVi3vvvZfw8HC+/PJLrrzySrd9H5Hzcjhg5kxo0AB+/dVsY1DIhWciIlJ8ipw8EhISGDZsGEuXLqVs2bIAHDlyhOHDhxMfH39ZxaxatSrvOCoqik8++eSy+hO5LGFhsHgxxMWZ0ZlHH4X/+z8t/hUR8SJFHpEZP3486enpVK9endtvv53bb7+dmjVrcvDgQSZMmOCJGkXsqV4dFiyAwECYOxcmTrRdkYiInKXIQebqq69my5YtjBs3jnr16tGoUSMmT55MWlraRV9UJ+KzWrXKDzDPPguffWa1HBERyXdJi1rKlCnDo48+6u5aRLzXk0+a6aV33oF774XUVLjmGttViYiUeJe8Onf79u3s2bOHU6dOFTh/9913X3ZRIl4nd1PJbdtMiOnUCdatgzJlbFcmIlKiFTnI7N69m86dO5OWlobD4cjb5drx3wWQ2pla/FapUrBokVn8m5YGffvC++9r8a+IiEVFXiMzcOBAatasSUZGBmFhYWzbto2UlBTi4uIKPHUk4peqVoUPP4TgYLMIOPfFeSIiYkWRg8y6det45ZVXqFSpEgEBAQQEBHDbbbcxZswYnnrqKU/UKOJdbr0V3nzTHA8fDnpNgIiINUUOMjk5OYSHhwNQqVIl9u/fD0D16tXZsWOHe6sT8Vb9+pnmckHPnvDDD7YrEhEpkYocZGJiYvj2228BaNKkCePGjWPt2rW88sorXKOnOKQkmTLFjM5kZprFv1lZtisSESlxihxkXnjhBc6cOQPAK6+8wk8//UTz5s355JNPmDJlitsLFPFaISGwcCFERcF330Hv3vDfe0NERIpHkZ9auvPOO/OOa9Wqxffff8/hw4cpX7583pNLIiVG5cpmG4PmzWHpUnj1VRgxwnZVIiIlRpFGZJxOJ0FBQWzdurXA+QoVKijESMl1883w17+a45dfNoFGRESKRZGCTHBwMNWqVdO7YkT+qG9fGDDAHN9/P2zfbrceEZESoshrZJ5//nmGDx/O4cOHPVGPiO+aMAFatoRjx8zi3yNHbFckIuL3irxGZurUqezcuZOoqCiqV69OmT+8on3Tpk1uK07Ep+S+JC8uDn780TyW/fHHZudsERHxiCIHmU6dOnmgDBE/ERkJS5aYx7I//RRefBFee812VSIifqvIQWaEnsgQubgGDWDmTOjVC8aMgfr14Z57bFclIuKXirxGRkQKoWdPeOYZc9y3L2zZYrceERE/VeQgExAQQGBg4AWbiPzXmDEQHw8nTpjFv7/9ZrsiERG/U+SppcWLFxf42ul08s033zBnzhxGjhzptsJEfF5QECQlQePGsHs33HefWTcTVOTbTkRELqDIP1E7dux4zrlu3bpxww038P777/PQQw+5pTARv1Chgln827QpfPYZDB0K48fbrkpExG+4bY1M06ZN+fzzz93VnYj/uPFGmDPHHE+YAHPn2q1HRMSPuCXI/P7770yZMoWrr77aHd2J+J9u3eD5583xww/Dxo126xER8RNFnlr64+aQLpeLo0ePEhYWxnvvvefW4kT8ysiRsHkz/POf0LkzbNgAV15puyoREZ9W5CAzceLEAkEmICCAyMhImjRpQvny5d1anIhfCQyE996DJk3ghx+ge3ezbiY42HZlIiI+q8hB5sEHH/RAGSIlRLlyZvFvkyaQkgKDB8Obb9quSkTEZxV5jczs2bNZsGDBOecXLFjAnNwFjSJyYXXrmpEZgKlTYdYsu/WIiPiwIgeZMWPGUKlSpXPOX3nllbymPWVECufuu82aGYDHH4evvrJbj4iIjypykNmzZw81a9Y853z16tXZs2ePW4oSKRFeeMG88ffUKejSBQ4csF2RiIjPKXKQufLKK9lynn1jvv32WypWrOiWokRKhIAAePddqFcP9u+Hrl0hO9t2VSIiPqXIQaZHjx489dRTrFy5kpycHHJyclixYgUDBw7kvvvu80SNIv4rPByWLjWLgNetgyefBJfLdlUiIj6jyEHm1VdfpUmTJrRp04bSpUtTunRpEhISaN26tdbIiFyKWrVg/nxwOGDmTJg+3XZFIiI+o8hBJiQkhPfff58dO3Ywd+5cFi1axK5du5g1axYhISGeqFHE/911l9ktG2DAAPjiC7v1iIj4iEvehve6667juuuuc2ctIiXbc8/Bpk3wwQdmS4ONG6FqVdtViYh4tSKPyHTt2pXXX3/9nPPjxo2je/ful1zI2LFjcTgcJCYmAnD48GEGDBhA7dq1KV26NNWqVeOpp54iMzPzkr+HiFdzOMw7ZWJjISPDbGNw8qTtqkREvFqRg0xKSgrt2rU753zbtm1JSUm5pCJSU1OZPn06sbGxeef279/P/v37GT9+PFu3buWdd97hX//6Fw899NAlfQ8Rn1CmjHnzb4UKZi+mxx7T4l8RkYso8tTSsWPHzrsWJjg4mKysrCIXcOzYMXr16sWMGTMYNWpU3vmYmBg+/PDDvK+vvfZaRo8ezf3338/p06cJCjp/6dnZ2WSf9Qhrbk1OpxOn01nk+i4kty939ikCQNWqOObNI7BdOxxz5pATG8uZAQNsV+WVdB+K2OXJe7CwfRY5yNx44428//77vPTSSwXOJyUlUa9evaJ2R//+/Wnfvj133HFHgSBzPpmZmURERFwwxIB58/DI3DemnmX58uWEhYUVub4/k5yc7PY+RQCuefBBbpw1C8ezz/L18eMcOmvEUgrSfShilyfuwRMnThTquiIHmRdffJEuXbqwa9cuWrduDcDnn3/OvHnzWLhwYZH6SkpKYtOmTaSmpv7ptYcOHeLVV1/l0Ucfveh1w4YNY/DgwXlfZ2VlER0dTUJCAhEREUWq72KcTifJycnEx8cTrN2LxRPatuVMdjYBc+dyy+TJnF63DmrUsF2VV9F9KGKXJ+/Bws7yFDnIdOjQgSVLlvDaa6+xcOFCSpcuzU033cSKFSuoUKFCoftJT09n4MCBJCcnU6pUqYtem5WVRfv27alXrx4vv/zyRa8NDQ0lNDT0nPPBwcEe+UHnqX5FAJgxA77/HsfGjQR37w5ffgkeGFn0dboPRezyxD1Y2P6KvNgXoH379qxdu5bjx4+ze/du7rnnHp555hluuummQvexceNGMjIyaNiwIUFBQQQFBbF69WqmTJlCUFAQOTk5ABw9epS77rqL8PBwFi9erB9WUrKULg2LF0NkJHz7LTz0kBb/ioic5ZKCDJinl/r06UNUVBQTJkygdevWrF+/vtB/vk2bNqSlpbF58+a8FhcXR69evdi8eTOBgYFkZWWRkJBASEgIH3300Z+O3Ij4pehoWLgQgoIgKQnGj7ddkYiI1yjS1NLBgwd55513+Pvf/05WVhb33HMP2dnZLFmypMgLfcPDw4mJiSlwrkyZMlSsWJGYmJi8EHPixAnee+89srKy8ubLIiMjCQwMLNL3E/FpLVrA5MnQvz8MHWreNXPnnbarEhGxrtAjMh06dKB27dps2bKFSZMmsX//ft58802PFbZp0ya++uor0tLSqFWrFlWqVMlr6enpHvu+Il7r8cfN1NKZM3DffbBzp+2KRESsK/SIzKeffspTTz3F448/7rGtCVatWpV33KpVK1xaCyCSz+GAt96Cbdtg/Xro1MnsmB0ebrsyERFrCj0is2bNGo4ePUqjRo1o0qQJU6dO5dChQ56sTUT+KDQUPvwQqlQxgaZPHzNCIyJSQhU6yDRt2pQZM2Zw4MAB+vXrR1JSElFRUZw5c4bk5GSOHj3qyTpFJFdUFCxaBCEh5omm116zXZGIiDVFfmqpTJky/OUvf2HNmjWkpaXx9NNPM3bsWK688kruvvtuT9QoIn/UtKmZZgJ46SX4+GO79YiIWHLJj18D1K5dm3HjxrF3717mz5/vrppEpDAefhieeMK8V+b+++H7721XJCJS7C4ryOQKDAykU6dOfPTRR+7oTkQKa+JEaN4csrLM4t/MTNsViYgUK7cEGRGxJCQEFiyAqlVhxw4zMqPFvyJSgijIiPi6q64yi35DQ+Ef/4ARI2xXJCJSbBRkRPxBXJzZYBJg1CjziLaISAmgICPiLx54AAYNMsd9+sDWrXbrEREpBgoyIv5k3Dho3RqOH4eOHeHwYdsViYh4lIKMiD8JCoL334caNWD3bujRA3JybFclIuIxCjIi/qZSJViyBEqXhuXLYfhw2xWJiHiMgoyIP7rpJpg92xyPGwdJSXbrERHxEAUZEX91770wZIg5/stfYPNmq+WIiHiCgoyIPxs9Gu68E37/3bz599dfbVckIuJWCjIi/iwwEObPh1q14N//NqM0TqftqkRE3EZBRsTflS9vFv9ecQWsXAnPPmu7IhERt1GQESkJbrgB3n3XHE+eDHPm2K1HRMRNFGRESorOneGll8xxv36Qmmq3HhERN1CQESlJRoyADh0gO9sEm19+sV2RiMhlUZARKUkCAuC996BOHdi3D7p1g1OnbFclInLJFGRESpqICLP4NyIC1qyBgQNtVyQicskUZERKotq1Yd48cDjgr3+Fv/3NdkUiIpdEQUakpGrfHl591Rw/+SR8+aXdekRELoGCjEhJNny4WSfjdELXrmbdjIiID1GQESnJHA6zuWRMDBw8CF26wMmTtqsSESk0BRmRku6KK8zi3/Ll4euvoX9/cLlsVyUiUigKMiIC114LSUnm8exZs+Dtt21XJCJSKAoyImIkJMDrr5vjxERYvdpqOSIihaEgIyL5nn4aevSA06ehe3fYs8d2RSIiF6UgIyL5HA6YORMaNIBff4VOneDECdtViYhckIKMiBQUFgaLF0OlSvDNN/Doo1r8KyJeS0FGRM5VvTosWACBgTB3LkycaLsiEZHz8pogM3bsWBwOB4mJiXnn/va3v9GqVSsiIiJwOBwcOXLEWn0iJU6rVvkB5tln4bPPrJYjInI+XhFkUlNTmT59OrGxsQXOnzhxgrvuuovhw4dbqkykhHvySXjwQThzBu69F3bvtl2RiEgB1oPMsWPH6NWrFzNmzKB8+fIFPktMTGTo0KE0bdrUUnUiJZzDAdOmQePGcPiwWfx7/LjtqkRE8gTZLqB///60b9+eO+64g1GjRl12f9nZ2WRnZ+d9nZWVBYDT6cTpdF52/7ly+3JnnyJeKTAQ3n+foGbNcKSlcaZPH3Jyd862TPehiF2evAcL26fVIJOUlMSmTZtITU11W59jxoxh5MiR55xfvnw5YWFhbvs+uZKTk93ep4g3qpCYyK0vvkjAhx/y/UMP8WO3brZLyqP7UMQuT9yDJwr56gdrQSY9PZ2BAweSnJxMqVKl3NbvsGHDGDx4cN7XWVlZREdHk5CQQEREhNu+j9PpJDk5mfj4eIKDg93Wr4jXatcOV0QE9O9P3blzub57d1xt21otSfehiF2evAdzZ1T+jLUgs3HjRjIyMmjYsGHeuZycHFJSUpg6dSrZ2dkEBgYWud/Q0FBCQ0PPOR8cHOyRH3Se6lfEKz3xBGzZgmP6dIJ69zabTF5/ve2qdB+KWOaJe7Cw/VkLMm3atCEtLa3Aub59+1KnTh2GDBlySSFGRIrBlCmwdSusXWsW/65fD24c7RQRKQprQSY8PJyYmJgC58qUKUPFihXzzh88eJCDBw+yc+dOANLS0ggPD6datWpUqFCh2GsWESAkBBYuhEaN4LvvoHdvWLTI7JwtIlLMvPonz1//+lcaNGjAI488AkCLFi1o0KABH330keXKREq4ypXNNgYhIbB0Kbz6qu2KRKSE8qogs2rVKiZNmpT39csvv4zL5TqnPfjgg9ZqFJH/uvlm+OtfzfHLL5tAIyJSzLwqyIiIj+nbFwYMMMf33w/bt9utR0RKHAUZEbk8EyZAy5Zw7Bh07AjaE01EipGCjIhcnuBgs1N2tWqwcyf07Ak5ObarEpESQkFGRC5fZCQsWQKlS8Onn8KLL9quSERKCAUZEXGPBg1g5kxzPGYMfPCB3XpEpERQkBER9+nZE555xhz37QtbttitR0T8noKMiLjXmDEQHw8nTpg3//72m+2KRMSPKciIiHsFBUFSElxzDfz0E9x3H5w+bbsqEfFTCjIi4n4VKpjFv2Fh8NlnMGSI7YpExE8pyIiIZ9x4I8yZY47feAPee89uPSLilxRkRMRzunWD5583x488Ahs32q1HRPyOgoyIeNbIkdC+PZw8CZ07Q0aG7YpExI8oyIiIZwUGmmml66+H9HTo3h2cTttViYifUJAREc8rV84s/g0Ph5QUGDzYdkUi4icUZESkeNStm7/gd+pUmDXLbj0i4hcUZESk+Nx9t1kzA/D447B+vd16RMTnKciISPF64QXzxt9Tp6BLFzhwwHZFIuLDFGREpHgFBMC770K9eibEdO0K2dm2qxIRH6UgIyLFLzwcli41i4DXrYMnnwSXy3ZVIuKDFGRExI5atWD+fHA4YOZMmD7ddkUi4oMUZETEnrvuMrtlAwwYAF98YbceEfE5CjIiYtdzz8E995gdsrt1g717bVckIj5EQUZE7HI4zDtlYmPN9gWdO5vtDERECkFBRkTsK1PGvPm3QgXYsAH69dPiXxEpFAUZEfEONWvCBx/kP549ZYrtikTEByjIiIj3aNMGxo83x08/DStW2K1HRLyegoyIeJfERHjgAcjJMYuAf/7ZdkUi4sUUZETEuzgc5p0yjRrBb7+Z7QxOnLBdlYh4KQUZEfE+pUvD4sUQGQnffgsPPaTFvyJyXgoyIuKdoqNh4UIICoKkpPy1MyIiZ1GQERHv1aIFTJ5sjocOhWXL7NYjIl5HQUZEvNvjj5uppTNn4L77YOdO2xWJiBdRkBER7+ZwwFtvQdOmcOSIWfx79KjtqkTESyjIiIj3Cw2FDz+EKlVg2zbo0wecThyrV3N1SgqO1avN49oiUuJ4TZAZO3YsDoeDxMTEvHMnT56kf//+VKxYkSuuuIKuXbvyyy+/2CtSROyJioJFiyAkxDzRVKkSQfHxxL3xBkHx8VCjhvlcREoUrwgyqampTJ8+ndjY2ALnBw0axMcff8yCBQtYvXo1+/fvp0uXLpaqFBHrmjY162UAsrIKfrZvn9k9W2FGpESxHmSOHTtGr169mDFjBuXLl887n5mZyd///nfeeOMNWrduTaNGjZg9ezZffvkl69evt1ixiFiTkwMff3z+z3LfM5OYqGkmkRIkyHYB/fv3p3379txxxx2MGjUq7/zGjRtxOp3ccccdeefq1KlDtWrVWLduHU2bNj1vf9nZ2WRnZ+d9nfXf/9fmdDpxOp1uqzu3L3f2KSIX51i9mqC9ey98gcsF6emcXrkSV8uWxVeYSAnlyd+Fhe3TapBJSkpi06ZNpKamnvPZwYMHCQkJoVy5cgXOX3XVVRw8ePCCfY4ZM4aRI0eec3758uWEhYVdds1/lJyc7PY+ReT8rk5JIa4Q1/3epw/7b72V32JiOFy7NjmlSnm8NpGSzBO/C08UcmsSa0EmPT2dgQMHkpycTCk3/pAZNmwYgwcPzvs6KyuL6OhoEhISiIiIcNv3cTqdJCcnEx8fT3BwsNv6FZELc5QpA2+88afXhe/fT+0FC2DBAlxBQbgaN8bVooVpzZrBFVcUQ7Ui/s+Tvwuz/rgO7gKsBZmNGzeSkZFBw4YN887l5OSQkpLC1KlTWbZsGadOneLIkSMFRmV++eUXKleufMF+Q0NDCQ0NPed8cHCwRwKHp/oVkfO4/XaoWtUs7D3f3ksOB1SuDCNHwhdfwKpVONLTcaxbB+vWweuvmy0P4uKgVSvTbr1VwUbkMnnid2Fh+7MWZNq0aUNaWlqBc3379qVOnToMGTKE6OhogoOD+fzzz+natSsAO3bsYM+ePTRr1sxGySJiW2Cg2bKgWzcTWs4OMw6H+d+pU6FLF3jkEfP5zz/DqlX5bc8eWL/etLFjTZ9xcdCyZX6wcePorYh4lrUgEx4eTkxMTIFzZcqUoWLFinnnH3roIQYPHkyFChWIiIhgwIABNGvW7IILfUWkBOjSxWwmOXAgnL3wt2pVmDTJfJ7L4YCaNU3r29ec+/lnWL3ahJrVq+Gnn+Crr0wbNw4CAqBRIxNqWraE226DsmWL7+8nIkVi/amli5k4cSIBAQF07dqV7Oxs7rzzTt5++23bZYmIbV26QMeOnF65ks2ffkr9tm0Juv12M7ryZ2rUMK1PH/P1v/9tAk1uuNm9G1JTTfvf/zXBpkGD/Kmo226DPzyEICL2OFyu8000+4+srCzKli1LZmam2xf7fvLJJ7Rr105rZEQs8ch9mJ5ecMTmj5tUOhwm2ORORTVvDme9A0ukJPHk78LC/v726hEZEZFiFx0N999vGpiFxWcHmx9+gE2bTJs40QSbm27Kn4pq0QIqVLD5NxApURRkREQu5uqroWdP0wD27y84FbVjB2zebNqkSSbY3Hhj/lRUixZQsaKt6kX8noKMiEhRREVBjx6mARw4ACkp+SM2330HW7aYNmWKuebGG/Onolq0gMhIW9WL+B0FGRGRy1GlCtx7r2kAv/ySH2xWrYLt2yEtzbSpU801N9yQPxXVsiVceaWl4kV8n4KMiIg7XXUVdO9uGkBGhgk2uVNRW7fCtm2mvfWWuaZu3fypqJYtTR8iUigKMiIinnTlleYFft26ma8PHSo4FbVli5mO+u47mDbNXFOnTv5UVMuWZtRHRM5LQUZEpDhVqmTeg5P74r7ffsvbToFVq0yw+f5706ZPN9dcf33Bqairr7ZUvIj3UZAREbGpYkXo1Mk0gMOHTbDJnYravNk88v3DD/C3v5lrrruu4IhN1apWShfxBgoyIiLepEIF6NjRNID//AfWrMmfivrmG/jxR9NmzjTXXHttfqhp1cq8C0ekhFCQERHxZuXLQ4cOpgFkZuYHm1WrzIv5du0y7e9/N9fUrFkw2FSvbqd2kWKgICMi4kvKloX27U0DyMoywSZ3KmrjRrMR5k8/wezZ5poaNfJDTatW5msRP6EgIyLiyyIioF070wCOHoW1a/OnolJTzY7fP/8Mc+aYa6pVKzhiU7OmeSOxiA9SkBER8Sfh4XDXXaYBHDsGX36ZPxWVmgp79sC775oGZrHw2cHm2msVbMRnKMiIiPizK66AhATTAI4fN8Emdyrq669h71547z3TwDzeffZUVK1aCjbitRRkRERKkjJlID7eNIATJ2DduvypqPXrzY7f8+aZBuaFfGeP2Fx/vYKNeA0FGRGRkiwsDNq0MQ1MsFm/Pn/EZv16szHm/PmmAVSuXPA9NnXqKNiINQoyIiKSLywMWrc2DeD33+Grr/KDzbp1cPAgvP++aWC2YTh7KqpuXQUbKTYKMiIicmGlS+cHlBEj4ORJs64mdyrqyy/NxpgLFpgGEBmZv51Cq1ZQrx4EBNj7O4hfU5AREZHCK1UKWrQwDSA72wSb3BGbL7+EX3+FhQtNA7MNw9lTUTExCjbiNgoyIiJy6UJDoXlz0154AU6dMo945wabtWvNxpiLFpkGZhuGFi3yR3puvFHBRi6ZgoyIiLhPSAjceqtpw4ebYLNxY/5U1Jo1ZmPMJUtMA7MNQ4sW+aM2sbEQGGjv7yA+RUFGREQ8JyQEmjUzbdgwcDpNsMkdsVmzxmyMuXSpaWC2YcgdsWnZEurXV7CRC1KQERGR4hMcDE2bmjZkCJw+bTa+zA02X3xhNsb8+GPTwASb5s3zR2zq14cg/foSQ/8liIiIPUFBcPPNpj37rAk2mzfnT0WlpJhg849/mAZmG4azg03Dhgo2JZj+zYuIiPcICoK4ONOeeQZyckywOXvE5sgR+OQT08Bsw3DbbflTUY0amZEfKREUZERExHsFBppg0qgRDB5sgs2WLQVHbP7zH/jXv0wDsw3Dbbflj9jExSnY+DEFGRER8R2BgdCggWmDBsGZM5CWlr+7d0qKeSpq2TLTwLyt+NZb80dsGjc2i5DFLyjIiIiI7woIgJtuMm3gQBNstm7Nn4pavdq8xyY52TQwbyu+5Zb8YHPzzeZ9OOKTFGRERMR/BASY99DExsKAASbYbN+eH2pWrYJDh+Dzz00D87biW27Jn4q6+WZzTnyCgoyIiPivgACzJUJMDDz5JLhc8N13+VNRq1ebvaJWrDANzOhMs2b5IzZNmyrYeDEFGRERKTkcDrOJZb168MQTJth8/33BqaiDB/ODDphg06RJfrBp1sxMT4lXUJAREZGSy+GAunVNe+wxE2x++KHgVNSBA2YRcUqK+TMhISbY5E5FNWtmFhSLFQoyIiIiuRwOqF3btH79TLDZubPgVNS+feZ9Nl98AaNGmUe7b745f8TmllvMI+BSLBRkRERELsThgOuuM+2RR0yw2bUrf7Rm1SrYu9fs8r12LYwebV7q17hx/u7et9xiXtonHmF13/Rp06YRGxtLREQEERERNGvWjE8//TTv8127dtG5c2ciIyOJiIjgnnvu4ZdffrFYsYiIlGgOB9SqBQ89BP/3f7Bnjwk2f/879O4N1aqZbRbWrYMxY+DOO83u3s2awdCh5qV9R4/a/lv4FatBpmrVqowdO5aNGzeyYcMGWrduTceOHdm2bRvHjx8nISEBh8PBihUrWLt2LadOnaJDhw6cOXPGZtkiIiKGwwHXXAN/+QvMmQP//jf89BPMng19+kCNGibYrF8Pr78ObduaYNOkidk085NPICvL9t/Cp1mdWurQoUOBr0ePHs20adNYv349+/bt4+eff+abb74hIiICgDlz5lC+fHlWrFjBHXfcYaNkERGRi6tRAx580DQw4ebsp6J274avvzZt3DjziHjDhvlTUbfdZnb8lkLxmjUyOTk5LFiwgOPHj9OsWTN27dqFw+Eg9Ky3LZYqVYqAgADWrFlzwSCTnZ1NdnZ23tdZ/026TqcTp9Pptnpz+3JnnyJSNLoPxSdERUGPHqYB7NmDIyWFgC++wJGSgmPXLtiwwbTx43EFBOCqXx9Xixam3XYblCtn9a9wIZ68Bwvbp/Ugk5aWRrNmzTh58iRXXHEFixcvpl69ekRGRlKmTBmGDBnCa6+9hsvlYujQoeTk5HDgwIEL9jdmzBhGjhx5zvnly5cT5oHH45JzX3ktItboPhSfU6ECdOwIHTtS6tAhKm3bRsWtW6m0bRtX7N+PY9Mm2LQJJk3C5XCQWbMmh2Ji+O2GG/itXj2c4eG2/wYFeOIePHHiRKGuc7hcLpfbv3sRnDp1ij179pCZmcnChQuZOXMmq1evpl69eixfvpzHH3+cn376iYCAAHr06MH27du5+eabmTZt2nn7O9+ITHR0NIcOHcqbonIHp9NJcnIy8fHxBGtXVRErdB+KX9q/34zUpKQQkJKC44cfCnzscjggNpYzuSM2zZubYGSBJ+/BrKwsKlWqRGZm5kV/f1sfkQkJCaFWrVoANGrUiNTUVCZPnsz06dNJSEhg165dHDp0iKCgIMqVK0flypW55pprLthfaGhogemoXMHBwR75QeepfkWk8HQfil+pXh0eeMA0MC/kW706b52N4/vv4dtvCfz2W3jzTXNNbGz+e2xatIBKlYq1ZE/cg4Xtz3qQ+aMzZ84UGFEBqPTffyErVqwgIyODu+++20ZpIiIixa9KFbjvPtPAbKGQkpK/eHj7dtiyxbQpU8w1MTH5waZlS4iMtFW9x1kNMsOGDaNt27ZUq1aNo0ePMm/ePFatWsWyZcsAmD17NnXr1iUyMpJ169YxcOBABg0aRO3atW2WLSIiYk/lynDPPaaB2fQyN9isWgXbtsHWraZNnWquueGG/C0VWraEK6+0VLz7WQ0yGRkZ9O7dmwMHDlC2bFliY2NZtmwZ8fHxAOzYsYNhw4Zx+PBhatSowfPPP8+gQYNsliwiIuJdrrwSunUzDeDXX02wyX3kOy3NhJtt2+Dtt801desWHLGpXNlW9ZfN+mJfT8vKyqJs2bJ/ulioqJxOJ5988gnt2rXT3LyIJboPRQrh0CGzL1TuVNS33557Te3aBYNNVNSf95uTw+mVK9n86afUb9uWoNtvh8BAt5Vd2N/fXrdGRkRERNyoUiXo3Nk0gMOH84PNqlUm2OzYYdr06eaa668vOBV19dUF+1y0CAYOJGjvXuIA3ngDqlaFyZOhS5di+6uBgoyIiEjJctY7bAD4z39MsMmdivrmG/jhB9NmzDDX1KqVH2pOnIDHHjMbaJ5t3z4zvbVwYbGGGQUZERGRkqx8ebj7btMAjhyBNWvyp6I2bYKdO02bOfPC/bhcZu+pxEQTktw4zXQxCjIiIiKSr1w5+J//MQ0gMxPWrjXB5qOPzBTUhbhckJ5uRnhatSqGYhVkRERE5GLKloV27Uxr0AB69vzzP3ORrYTcLaDYvpOIiIj4tipV3HudGyjIiIiISOE0b26eTnI4zv+5wwHR0ea6YqIgIyIiIoUTGGgesYZzw0zu15MmFdtCX1CQERERkaLo0sU8Yv3Hd8tUrVrsj16DFvuKiIhIUXXpAh07evTNvoWlICMiIiJFFxiIq2VL9h0/zk0tW1oJMaCpJREREfFhCjIiIiLisxRkRERExGcpyIiIiIjPUpARERERn6UgIyIiIj5LQUZERER8loKMiIiI+CwFGREREfFZfv9mX5fLBUBWVpZb+3U6nZw4cYKsrCyCg4Pd2reIFI7uQxG7PHkP5v7ezv09fiF+H2SOHj0KQHR0tOVKREREpKiOHj1K2bJlL/i5w/VnUcfHnTlzhv379xMeHo7jj1uOX4asrCyio6NJT08nIiLCbf2KSOHpPhSxy5P3oMvl4ujRo0RFRREQcOGVMH4/IhMQEEDVqlU91n9ERIR+gIpYpvtQxC5P3YMXG4nJpcW+IiIi4rMUZERERMRnKchcotDQUEaMGEFoaKjtUkRKLN2HInZ5wz3o94t9RURExH9pREZERER8loKMiIiI+CwFGREREfFZCjIiIiLisxRkLkFKSgodOnQgKioKh8PBkiVLbJckUqJMmzaN2NjYvJdwNWvWjE8//dR2WSIlxssvv4zD4SjQ6tSpY6UWBZlLcPz4cW666Sbeeust26WIlEhVq1Zl7NixbNy4kQ0bNtC6dWs6duzItm3bbJcmUmLccMMNHDhwIK+tWbPGSh1+v0WBJ7Rt25a2bdvaLkOkxOrQoUOBr0ePHs20adNYv349N9xwg6WqREqWoKAgKleubLsMjciIiG/LyckhKSmJ48eP06xZM9vliJQYP/74I1FRUVxzzTX06tWLPXv2WKlDIzIi4pPS0tJo1qwZJ0+e5IorrmDx4sXUq1fPdlkiJUKTJk145513qF27NgcOHGDkyJE0b96crVu3Eh4eXqy16M2+l8nhcLB48WI6depkuxSREuXUqVPs2bOHzMxMFi5cyMyZM1m9erXCjIgFR44coXr16rzxxhs89NBDxfq9NSIjIj4pJCSEWrVqAdCoUSNSU1OZPHky06dPt1yZSMlTrlw5rr/+enbu3Fns31trZETEL5w5c4bs7GzbZYiUSMeOHWPXrl1UqVKl2L+3RmQuwbFjxwqkzp9++onNmzdToUIFqlWrZrEykZJh2LBhtG3blmrVqnH06FHmzZvHqlWrWLZsme3SREqEZ555hg4dOlC9enX279/PiBEjCAwMpEePHsVei4LMJdiwYQO333573teDBw8GoE+fPrzzzjuWqhIpOTIyMujduzcHDhygbNmyxMbGsmzZMuLj422XJlIi7N27lx49evDbb78RGRnJbbfdxvr164mMjCz2WrTYV0RERHyW1siIiIiIz1KQEREREZ+lICMiIiI+S0FGREREfJaCjIiIiPgsBRkRERHxWQoyIiIi4rMUZERERMRnKciIiM9p1aoViYmJtssQES+gICMiIiI+S0FGREREfJaCjIj4vH/+85+ULVuWuXPn2i5FRIqZdr8WEZ82b948HnvsMebNm8f//M//2C5HRIqZRmRExGe99dZbPPHEE3z88ccKMSIllEZkRMQnLVy4kIyMDNauXUvjxo1tlyMilmhERkR8UoMGDYiMjGTWrFm4XC7b5YiIJQoyIuKTrr32WlauXMnSpUsZMGCA7XJExBJNLYmIz7r++utZuXIlrVq1IigoiEmTJtkuSUSKmYKMiPi02rVrs2LFClq1akVgYCATJkywXZKIFCOHS5PLIiIi4qO0RkZERER8loKMiIiI+CwFGREREfFZCjIiIiLisxRkRERExGcpyIiIiIjPUpARERERn6UgIyIiIj5LQUZERER8loKMiIiI+CwFGREREfFZ/w85QlhesOb6FQAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAGwCAYAAACzXI8XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABC20lEQVR4nO3deVxVZeLH8c9lEURBVDBBATMVk9SscWvcqp9OVuSaRus0tk2LWumMZY3Sr6IpR63JctrMKc1da37ljJi5S42mJjmWUI6yOA4aIKB4hfv74wRIgAHey3Mv9/t+vXh1OPf08K3p4HfOec5zbA6Hw4GIiIiIB/IxHUBERESkvlRkRERExGOpyIiIiIjHUpERERERj6UiIyIiIh5LRUZEREQ8loqMiIiIeCw/0wFcrbS0lKysLIKDg7HZbKbjiIiISC04HA5OnjxJZGQkPj41X3dp9EUmKyuLqKgo0zFERESkHo4cOUL79u1r/LzRF5ng4GDA+hcREhLitHHtdjvr1q1j2LBh+Pv7O21cEak9nYciZrnyHMzPzycqKqr8z/GaGC0yM2fOJDExsdK+2NhYDhw4AEB6ejpTpkxh69atFBcXc9111/HnP/+Ziy66qNY/o+x2UkhIiNOLTFBQECEhIfoFKmKIzkMRsxriHPy5aSHGJ/vGxcWRnZ1d/rV161YACgsLGTZsGDabjQ0bNrBt2zbOnDlDfHw8paWlhlOLiIiIOzB+a8nPz4+2bdtW2b9t2zYOHTrE7t27y6+kLFy4kJYtW7Jhwwb+53/+p6GjioiIiJsxXmQOHjxIZGQkgYGB9O/fn6SkJKKjoykuLsZmsxEQEFB+bGBgID4+PmzdurXGIlNcXExxcXH59/n5+YB1+ctutzstd9lYzhxTROpG56GIWa48B2s7ptEi07dvX959911iY2PJzs4mMTGRgQMHkpqaSr9+/WjWrBm///3vef7553E4HEybNo2SkhKys7NrHDMpKanKvBuAdevWERQU5PR/huTkZKePKSJ1o/NQxCxXnINFRUW1Os7mcDgcTv/p9ZSbm0tMTAyzZ89mwoQJrFu3jt/+9rd8//33+Pj4kJCQwP79++nTpw+vv/56tWNUd0UmKiqKnJwcp0/2TU5OZujQoZpkKGKIzkMRs1x5Dubn5xMWFkZeXt55//w2fmvpXKGhoXTp0oW0tDQAhg0bRnp6Ojk5Ofj5+REaGkrbtm3p2LFjjWMEBARUuh1Vxt/f3yW/6Fw1rojUns5DEbNccQ7WdjzjTy2dq6CggPT0dCIiIirtDwsLIzQ0lA0bNnDs2DFuuukmQwlFRETEnRi9IjNlyhTi4+OJiYkhKyuLGTNm4OvrS0JCAgALFizg0ksvJTw8nB07djBp0iQeffRRYmNjTcYWERERN2G0yGRkZJCQkMDx48cJDw9nwIABpKSkEB4eDsA333zDE088wYkTJ+jQoQPTp0/n0UcfNRlZRERE3IjRIrNkyZLzfv7CCy/wwgsvNFAaERERqbWSEmybNtFu82ZszZrB1VeDr2+Dx3CrOTIiIiLiAVatgg4d8Bs6lF/Mno3f0KHQoYO1v4GpyIiIiEjtrVoFY8dCRkbl/ZmZ1v4GLjMqMiIiIlI7JSUwaRJUtwRd2b7Jk63jGoiKjIiIiNTOxo1Vr8Scy+GAI0dgy5YGi+RWC+KJiIiIGzl7FvbsgU2brBLz6ae1+/vO8yohZ1OREREREcvZs/DllxXFZetW+PHly3Xyk4VtXUlFRkRExFvZ7VZx2bjRKi9bt8LJk5WPCQmBQYNgyBAYMMCa0JuZWf08GZsN2reHgQMbIj2gIiMiIuI97HbYubPyFZfCwsrHhIZaxWXwYKu89OxZeX2Yl1+2yozNVrnM2GzWX+fObdD1ZFRkREREGqszZ+Cf/6woLtu3Vy0uLVtWXHEZMgS6dz9/ERk9GlassJ5eOnfib/v2VokZPdr5/xznoSIjIiLSWBQXW8Wl7FbRtm1w6lTlY1q1qrjaMniwVVx86vgQ8+jRMGIEZz/7jD1r13L58OH4GVrZV0VGRETEUxUXw+efV1xx2bGjanEJC7MKS1l5iYure3Gpjq8vjsGDySwspOfgwUZKDKjIiIiIeI7TpyElpaK4pKRY+84VHl5RWoYMgUsvdU5xcVMqMiIiIu7q1CmrrJTdKkpJsa7CnKtNm4rSMniwVVzKJt56ARUZERERd1FUZN0eKrvi8vnn1oTdc7VtW1FahgyB2FivKi4/pSIjIiJiSmGh9SRRWXH54gvrEelzRUZWvlXUubNXF5efUpERERFpKAUFVnEpu1X0xRfWarrnateu8q2iTp1UXM5DRUZERMRVTp60HoEuu+Kyc2fV4hIVVflWUceOKi51oCIjIiLiLPn5VnHZuNH62rULSkoqHxMdXXHFZcgQ6NBBxeUCqMiIiIjUV16etcx/2a2iXbugtLTyMR06VL5V1KFDg8dszFRkREREais3F7ZsqbhVtHt31eLSsWNFaRk8GGJiDAT1HioyIiIiNfnhB6u4lN0q2rOn6lufO3WqvOR/VFTD5/RiKjIiIiJlTpyAzZsrbhXt3Vu1uHTuXPlWUbt2BoJKGRUZERHxXjk5VnEpu1W0b1/V4hIbW/lWUWSkiaRSAxUZERHxHv/9b8UVl40bITW16jGXXlpxq2jQIIiIaOCQUhcqMiIi0ngdO1ZxtWXTJvj666rHdOtWcato0CC46KIGDikXQkVGREQaj6NHrcJSVl7+9a+qx1x2WcWtokGDrJcuisdSkREREc+VnV35isuBA1WP6dGj8q2isLCGTikupCIjIiKeIzOzcnH59tvKn9tsVnEpu1U0cCC0bm0gqDQUFRkREXFfGRkVpWXjRkhLq/y5zQaXX15xq2jgQGjVquFzijEqMiIi4j4OH658xSU9vfLnPj7Qq1fFraIBA6BlSxNJxU2oyIiIiDn//nfFo9CbNsH331f+3McHrrii4lbRgAHQokXD5xS3pSIjIiINw+GAQ4cq3yr6978rH+PrC1deWXGraMAACAlp+KziMVRkRETENRwO+O67yreKDh+ufIyvL/TuXXGr6Je/hOBgE2nFQ6nIiIiIczgc1pyWc6+4ZGRUPsbPzyouZbeKrroKmjdv+KzSaBgtMjNnziQxMbHSvtjYWA78uA7A0aNHmTp1KsnJyZw8eZLY2FimT5/OmDFjTMQVEZFzORxw8GDl4pKVVfkYf3/o06fiVtFVV0GzZgbCSmNl/IpMXFwc69evL//ez68i0p133klubi4fffQRYWFhLF68mHHjxrFz50569eplIq6IiPdyOOCbbyrfKsrOrnyMvz/061dxq6h/fwgKMpFWvITxIuPn50fbtm2r/Wz79u28/vrr9OnTB4CnnnqKOXPmsGvXLhUZERFXczislXLPveLyn/9UPqZJE6u4lF1x6ddPxUUalPEic/DgQSIjIwkMDKR///4kJSURHR0NwFVXXcXSpUu54YYbCA0NZdmyZZw+fZohQ4bUOF5xcTHFxcXl3+fn5wNgt9ux2+1Oy102ljPHFJG60XnoZA4H7N+Pz5Yt2DZtwrZlC7ZjxyofEhCAo18/HIMGWV99+kDTppXH0f8eXsOV52Btx7Q5HA6H0396La1du5aCggJiY2PJzs4mMTGRzMxMUlNTCQ4OJjc3l/Hjx7Nu3Tr8/PwICgpi+fLlDBs2rMYxq5t3A7B48WKC9P8SREQqlJYSfPgwYV9/TVhqKq337ycgL6/SISVNmnAiNpacyy7j+GWX8UPnzpQ2aWIosHiToqIibr31VvLy8gg5zyP4RovMT+Xm5hITE8Ps2bOZMGECjzzyCF988QXPP/88YWFhrFmzhjlz5rBlyxa6d+9e7RjVXZGJiooiJyfnvP8i6sput5OcnMzQoUPx9/d32rgiUns6D+uotBRSU/HZvBnb5s3WFZfjxysd4mjaFEf//hVXXHr3hoAAQ4HF3bnyHMzPzycsLOxni4zxW0vnCg0NpUuXLqSlpZGens6rr75KamoqcXFxAPTs2ZMtW7Ywb9485s+fX+0YAQEBBFRz0vn7+7vkF52rxhWR2tN5WIPSUvjqq4r5LZs3w4kTlY8JCrLWbvlxjoutd29suuIideSKc7C247lVkSkoKCA9PZ077riDoqIiAHx8fCod4+vrS2lpqYl4IiLuraQE9u6tKC5btsAPP1Q+plkza7XcsqeKrrzSmrAr4qGMFpkpU6YQHx9PTEwMWVlZzJgxA19fXxISEggNDaVTp07cf//9zJo1i9atW7NmzRqSk5P5v//7P5OxRUTcQ0kJ7NlT8VTR5s3wkzkuNG9uFZeyp4quvNJ6RFqkkTBaZDIyMkhISOD48eOEh4czYMAAUlJSCA8PB+CTTz5h2rRpxMfHU1BQQKdOnVi4cCHXX3+9ydgiImacPQu7d1e+4vLjk5nlgoNh4MCK4nLFFdZquiKNlNH/upcsWXLezzt37szKlSsbKI2IiJs5exZ27aooLlu3wsmTlY8JCYFBgypuFV1+uYqLeBX91y4i4i7sdqu4lN0q2roVCgoqHxMaWvmKy+WXWy9eFPFSKjIiIqacOQM7d1Zccdm2DQoLKx/TsqV1xaWsuPTooeIicg4VGRGRhnLmDHzxRUVx2b4dfnxCs1yrVlZhKbtV1L07/OTpTRGpoCIjIuIqxcVWcSm7VbR9O5w6VfmY1q0rSsvgwXDZZSouInWgIiMi4iynT8Pnn1dccdmxw9p3rvDwysWlWzcVF5ELoCIjIlJfp05BSkpFcUlJsa7CnKtNm4rSMmQIXHop2GwGwoo0TioyIiK1deqUdZWl7FZRSoo17+VcbdtWlJYhQyA2VsVFxIVUZEREalJUZM1rKbvi8sUXVYtLRETlKy5duqi4iDQgFRkRkTKFhVZx2bjR+vrnP621Xc7Vrl3l4tKpk4qLiEEqMiLivQoKrLVbym4V/fOf1mq652rfvuI20ZAh0LGjiouIG1GRERHPVFKCbdMm2m3ejK1ZM7j66p9fKO7kSWu13LJbRTt3Wi9ePFd0dOUrLhdfrOIi4sZUZETE86xaBZMm4ZeRwS8AZs+2rpy8/DKMHl1xXH6+VVzKbhV9+WXV4tKhQ+Xi0qFDw/wziIhTqMiIiGdZtQrGjgWHo/L+zExr/7Rp1oTcTZus4lJaWvm4jh0rr+MSE9Ng0UXE+VRkRMRzlJTApElVSwxU7EtKqrz/kksqSsvgwdatIxFpNFRkRMRzbNkCGRk/f9wNN0BCglVc2rd3fS4RMUZFRkQ8R3Z27Y677TaryIhIo6cXfIiI54iIcO5xIuLxVGRExHMMHAjBwTV/brNBVJR1nIh4BRUZEfEca9ZYa8FUp2ytl7lzf349GRFpNFRkRMQz7NsHd91lbd94Y9VJvO3bw4oVldeREZFGT5N9RcT9nTgBI0da70K69lpYvRpsNs5+9hl71q7l8uHD8avNyr4i0uioyIiIezt7Fm65Bb77zlp1d+lS8LN+dTkGDyazsJCegwerxIh4Kd1aEhH39uSTkJwMQUHWHJnWrU0nEhE3oiIjIu7rgw/gpZes7QULoGdPs3lExO2oyIiIe9q9GyZMsLanTYNx48zmERG3pCIjIu7nv/+1JveeOgXXXQfPPms6kYi4KRUZEXEvdrt19eXwYejUCRYv1kReEamRioyIuJcpU2DjRmje3Jrc27Kl6UQi4sZUZETEfbz7LrzyirX93nsQF2c0joi4PxUZEXEPX3wBDzxgbc+YYc2RERH5GSoyImLe0aPWqwWKi+Gmm+APfzCdSEQ8hIqMiJh15gyMHQuZmdC1q3VLyUe/mkSkdvTbQkTMmjQJtm2DkBD48EPrryIitaQiIyLmvPEGzJ8PNpv1mHWXLqYTiYiHMVpkZs6cic1mq/TVtWtXAA4dOlTls7Kv5cuXm4wtIs6wbRs8/LC1/eyzcMMNZvOIiEcy/vbruLg41q9fX/69349vtY2KiiI7O7vSsW+88QYvvfQSw4cPb9CMIuJkmZkwZoy1+N3YsfDEE6YTiYiHMl5k/Pz8aNu2bZX9vr6+VfavXr2acePG0bx58xrHKy4upri4uPz7/Px8AOx2O3a73UmpKR/LmWOKeIXTp/EdNQqf//wHx2WXcfaNN+Ds2XoNpfNQxCxXnoO1HdN4kTl48CCRkZEEBgbSv39/kpKSiI6OrnLcrl272LNnD/PmzTvveElJSSQmJlbZv27dOoKCgpyWu0xycrLTxxRptBwOev35z0T/85+cad6cTQ8/TNHmzRc8rM5DEbNccQ4WFRXV6jibw+FwOP2n19LatWspKCggNjaW7OxsEhMTyczMJDU1leDg4ErHPvjgg2zcuJH9+/efd8zqrshERUWRk5NDiBOfhrDb7SQnJzN06FD8/f2dNq5IY+bz2mv4Tp6Mw8eHkv/7Pxz/8z8XNJ7OQxGzXHkO5ufnExYWRl5e3nn//DZ6RebcuS49evSgb9++xMTEsGzZMiZMmFD+2alTp1i8eDFPP/30z44ZEBBAQEBAlf3+/v4u+UXnqnFFGp2NG+HxxwGwvfgifk6c66bzUMQsV5yDtR3PrR6/Dg0NpUuXLqSlpVXav2LFCoqKirjzzjsNJRORC3L4MNx8M5SUwK23wmOPmU4kIo2EWxWZgoIC0tPTiYiIqLT/7bff5qabbiI8PNxQMhGpt6Ii671JOTnQqxe8+aa1boyIiBMYLTJTpkxh06ZNHDp0iO3btzNq1Ch8fX1JSEgoPyYtLY3Nmzdzzz33GEwqIvXicMC998Lu3RAWBqtXgwsm3YuI9zI6RyYjI4OEhASOHz9OeHg4AwYMICUlpdKVl3feeYf27dszbNgwg0lFpF5mz7ZW7PX1heXLISbGdCIRaWSMFpklS5b87DHPP/88zz//fAOkERGnSk6G3/3O2p47F4YMMZlGRBopt5ojIyKNxHffwfjxUFoKd98NDz1kOpGINFIqMiLiXAUF1uTeH36APn3gtdc0uVdEXEZFRkScx+GwrsDs2wcXXQSrVkFgoOlUItKIqciIiPO88AKsWAH+/rByJbRrZzqRiDRyKjIi4hwffwzTp1vbr74Kv/yl2Twi4hVUZETkwn3zjbVir8MB998P991nOpGIeAkVGRG5MPn51uTe/HzrKswrr5hOJCJeREVGROqvtBTuuAMOHLDmw6xYAU2amE4lIl5ERUZE6u+ZZ+CjjyAgwHr9QNu2phOJiJdRkRGR+lmzBhITre3586F3b6NxRMQ7qciISN3t32/dUgKYOBF+/WujcUTEe6nIiEjd5ObCiBHWCr5DhsCsWaYTiYgXU5ERkdorKYGEBEhLg+hoWLbMWvxORMQQFRkRqb2nnoK//x2aNrXmyISHm04kIl5ORUZEamfZMusVBABvvw29epnNIyKCioyI1MbevdbLIAGmTrVuL4mIuAEVGRE5v+PHrZV7i4pg2DBISjKdSESknIqMiNTs7FkYPx4OHYKOHeGDD8DX13QqEZFyKjIiUrPf/x4+/RSaNbMm97ZqZTqRiEglKjIiUr3334fZs63thQuhe3ezeUREqqEiIyJV7doF995rbU+fDmPGmM0jIlIDFRkRqew//7Em954+DTfcYL0YUkTETanIiEgFux1uvhkyMiA2FhYtAh/9mhAR96XfUCJS4dFHYcsWCA62Jve2aGE6kYjIeanIiIjl7bdh3jxre9Ei6NrVbB4RkVpQkRERSEmBBx+0tp95BuLjzeYREaklFRkRb5edDaNHw5kzMGqU9ZSSiIiHUJER8WbFxdaj1dnZ0K2btV6MJveKiAfRbywRb+VwwMMPw44dEBoKH35oTfIVEfEgKjIi3mr+fHjrLesKzJIl0KmT6UQiInWmIiPijbZsgYkTre2kJPjVr8zmERGpJxUZEW9z5AiMHVvxZuupU00nEhGpNxUZEW9y6pT1ZNKxY9Czp7V2jM1mOpWISL2pyIh4C4cDHnjAeiFk69bWyr3NmplOJSJyQYwWmZkzZ2Kz2Sp9df3JaqI7duzgmmuuoVmzZoSEhDBo0CBOnTplKLGIB3vlFfjrX8HXF5YuhQ4dTCcSEblgfqYDxMXFsX79+vLv/fwqIu3YsYPrrruOJ554gj//+c/4+fmxd+9efLTOhUjdbNgAjz9ubc+aBddeazaPiIiTGC8yfn5+tG3bttrPHn30USZOnMi0adPK98XGxp53vOLiYoqLi8u/z8/PB8But2O3252QmPLxzv2riNv6/nv8xo3DVlJC6W23UfLgg9ZbrhsBnYciZrnyHKztmMaLzMGDB4mMjCQwMJD+/fuTlJREdHQ0x44d4/PPP+e2227jqquuIj09na5du/Lcc88xYMCAGsdLSkoiMTGxyv5169YRFBTk9PzJyclOH1PEWXxPn2bgtGm0OH6cHzp1YutNN1G6dq3pWE6n81DELFecg0VFRbU6zuZwOBxO/+m1tHbtWgoKCoiNjSU7O5vExEQyMzNJTU3l66+/pn///rRq1YpZs2Zx+eWX89e//pXXXnuN1NRUOnfuXO2Y1V2RiYqKIicnh5CQEKdlt9vtJCcnM3ToUPz9/Z02rojTOBz43n47PsuX42jThrM7dkBUlOlUTqXzUMQsV56D+fn5hIWFkZeXd94/v41ekRk+fHj5do8ePejbty8xMTEsW7aMSy+9FID777+fu+++G4BevXrx6aef8s4775CUlFTtmAEBAQQEBFTZ7+/v75JfdK4aV+SCvfgiLF8Ofn7YVqzAv2NH04lcRuehiFmuOAdrO55bzZoNDQ2lS5cupKWlERERAUC3bt0qHXPppZdy+PBhE/FEPMff/w5lc8teeQUGDjSbR0TERdyqyBQUFJCenk5ERAQdOnQgMjKSb775ptIx3377LTExMYYSiniAtDRISLDWjbnnHmvtGBGRRsroraUpU6YQHx9PTEwMWVlZzJgxA19fXxISErDZbEydOpUZM2bQs2dPLr/8chYuXMiBAwdYsWKFydgi7uvkSRg5EnJzoV8/ePVVrdwrIo2a0SKTkZFBQkICx48fJzw8nAEDBpCSkkJ4eDgAkydP5vTp0zz66KOcOHGCnj17kpyczCWXXGIytoh7Ki2Fu+6Cr7+GiAhYuRKqmS8mItKYGC0yS5Ys+dljpk2bVmkdGRGpwXPPwerV0KQJrFoFkZGmE4mIuJxbzZERkXr66CP4wx+s7ddft24riYh4ARUZEU934ADcfru1/dBD8JvfmM0jItKAVGREPFleHowYYU3yHTQI5swxnUhEpEGpyIh4qtJSuO02+PZbaN/eWvxOi8KJiJdRkRHxVDNmwMcfQ2AgrFkDbdqYTiQi0uBUZEQ80cqV8Oyz1vYbb8CVV5rNIyJiiIqMiKdJTbXWiwF49FG44w6zeUREDFKREfEkJ05Yk3sLC+Haa60XQ4qIeDEVGRFPcfYs3HILfPcddOgAS5eCn9E1LUVEjKtzkenQoQPPPPOM3kAt0tCefBKSkyEoyJrc27q16UQiIsbVuchMnjyZVatW0bFjR4YOHcqSJUsoLi52RTYRKfPBB/DSS9b2ggXQs6fZPCIibqJeRWbPnj188cUXXHrppTzyyCNERETw8MMP8+WXX7oio4h3270bJkywtqdNg3HjzOYREXEj9Z4jc8UVV/DKK6+QlZXFjBkzeOutt+jduzeXX34577zzDg6Hw5k5RbzTf/8LI0fCqVNw3XUVj1yLiAhwAW+/ttvtrF69mgULFpCcnEy/fv2YMGECGRkZPPnkk6xfv57Fixc7M6uId7HbYfx4OHwYOnWCxYvB19d0KhERt1LnIvPll1+yYMECPvjgA3x8fLjzzjuZM2cOXbt2LT9m1KhR9O7d26lBRbzO1Knw2WfQvLk1ubdlS9OJRETcTp2LTO/evRk6dCivv/46I0eOxL+ad7tcfPHF3HLLLU4JKOKVFi6El1+2tt97D+LizOYREXFTdS4y3333HTExMec9plmzZixYsKDeoUS82hdfwP33W9szZlhzZEREpFp1nux77NgxPv/88yr7P//8c3bu3OmUUCJe6+hRGD0aiovhppvgD38wnUhExK3Vucg89NBDHDlypMr+zMxMHnroIaeEEvFKZ87A2LGQmQldu1q3lHy0+LaIyPnU+bfk/v37ueKKK6rs79WrF/v373dKKBGvNGkSbNsGISHw4YfWX0VE5LzqXGQCAgL4z3/+U2V/dnY2fnrvi0j9vPEGzJ8PNpv1mHWXLqYTiYh4hDoXmWHDhvHEE0+Ql5dXvi83N5cnn3ySoUOHOjWciFfYvh0eftjafvZZuOEGs3lERDxInS+hzJo1i0GDBhETE0OvXr0A2LNnDxdddBHvvfee0wOKNGqZmTBmjLX43dix8MQTphOJiHiUOheZdu3a8dVXX7Fo0SL27t1L06ZNufvuu0lISKh2TRkRqcHp09YTSkePQvfu1ssgbTbTqUREPEq9JrU0a9aM++67z9lZRLyHwwG//a21ZkzLltbKvc2bm04lIuJx6j07d//+/Rw+fJgzZ85U2n/TTTddcCiRRm/ePHj3Xevx6qVLoWNH04lERDxSvVb2HTVqFPv27cNms5W/5dr24yXxkpIS5yYUaWw2boTJk63tF18ETZIXEam3Oj+1NGnSJC6++GKOHTtGUFAQX3/9NZs3b+YXv/gFGzdudEFEkUbk8GG4+WYoKYFbb4XHHjOdSETEo9X5isyOHTvYsGEDYWFh+Pj44OPjw4ABA0hKSmLixIns3r3bFTlFPF9RkfXepJwc6NUL3nxTk3tFRC5Qna/IlJSUEBwcDEBYWBhZWVkAxMTE8M033zg3nUhj4XDAfffB7t0QFgarV0NQkOlUIiIer85XZC677DL27t3LxRdfTN++fXnxxRdp0qQJb7zxBh01YVGkerNnw6JF4OsLK1bAz7xBXkREaqfOReapp56isLAQgGeeeYYbb7yRgQMH0rp1a5YuXer0gCIeLzkZfvc7a3vuXBg82GgcEZHGpM5F5le/+lX5dqdOnThw4AAnTpygZcuW5U8uiciPvvsOxo+H0lK4+27QG+JFRJyqTnNk7HY7fn5+pKamVtrfqlWrepWYmTNnYrPZKn117dq1/PMhQ4ZU+fyBBx6o888RMaKgwJrc+8MP0KcPvPaaJveKiDhZna7I+Pv7Ex0d7dS1YuLi4li/fn1FoJ+8Qfvee+/lmWeeKf8+SBMkxRM4HNYVmH374KKLYNUqCAw0nUpEpNGp862l6dOn8+STT/Lee+/RqlWrCw/g50fbtm1r/DwoKOi8n4u4pRdesCb1+vvDypXQrp3pRCIijVKdi8yrr75KWloakZGRxMTE0KxZs0qff/nll3Ua7+DBg0RGRhIYGEj//v1JSkoiOjq6/PNFixbx/vvv07ZtW+Lj43n66afPe1WmuLiY4uLi8u/z8/MB67aY3W6vU7bzKRvLmWNK42Bbuxbf6dOxAWdffhlHnz7W263F6XQeipjlynOwtmPWuciMHDmyrn9Ljfr27cu7775LbGws2dnZJCYmMnDgQFJTUwkODubWW28lJiaGyMhIvvrqK37/+9/zzTffsGrVqhrHTEpKIjExscr+devWueS2VHJystPHFM/VLDOTwVOnYnM4+P666/gqMhI++cR0rEZP56GIWa44B4uKimp1nM1R9rIkN5Cbm0tMTAyzZ89mwoQJVT7fsGED1157LWlpaVxyySXVjlHdFZmoqChycnIICQlxWla73U5ycjJDhw7F39/faeOKB8vPx2/AAGwHDlB61VWUrFsHTZqYTtWo6TwUMcuV52B+fj5hYWHk5eWd98/ver/92hVCQ0Pp0qULaWlp1X7et29fgPMWmYCAAAICAqrs9/f3d8kvOleNKx6mtBR+8xs4cADatcNn5Up8fnLbVVxH56GIWa44B2s7Xp1fUeDj44Ovr2+NXxeioKCA9PR0IiIiqv18z549ADV+LmLMM8/ARx9BQID1+gFNUBcRaRB1viKzevXqSt/b7XZ2797NwoULq52bcj5TpkwhPj6emJgYsrKymDFjBr6+viQkJJCens7ixYu5/vrrad26NV999RWPPvoogwYNokePHnWNLeI6a9ZA2X/78+dD795G44iIeJM6F5kRI0ZU2Td27Fji4uJYunRptXNbapKRkUFCQgLHjx8nPDycAQMGkJKSQnh4OKdPn2b9+vXMnTuXwsJCoqKiGDNmDE899VRdI4u4zv79cMcd1vbEifDrXxuNIyLibZw2R6Zfv37cd999dfp7lixZUuNnUVFRbNq06UJjibhObi6MGGGt4DtkCMyaZTqRiIjXqfMcmeqcOnWKV155hXZa9Eu8RUkJ3HorpKVZb7Jetsxa/E5ERBpUna/I/PTlkA6Hg5MnTxIUFMT777/v1HAibuvpp2HtWmja1JrcGx5uOpGIiFeqc5GZM2dOpSLj4+NDeHg4ffv2pWXLlk4NJ+KWli2DpCRr++23oVcvs3lERLxYnYvMrzWZUbzZ3r3WyyABpk6FhASzeUREvFyd58gsWLCA5cuXV9m/fPlyFi5c6JRQIm7p+HEYORKKimDYsIqrMiIiYkydi0xSUhJhYWFV9rdp04bnn3/eKaFE3M7ZszB+PBw6BB07wgcfwAUuACkiIheuzkXm8OHDXHzxxVX2x8TEcPjwYaeEEnE7v/89fPopNGtmLYDXqpXpRCIiQj2KTJs2bfjqq6+q7N+7dy+tW7d2SigRt/L++zB7trW9cCF07242j4iIlKtzkUlISGDixIl89tlnlJSUUFJSwoYNG5g0aRK33HKLKzKKmLNrF9x7r7U9fTqMGWM2j4iIVFLnp5b+93//l0OHDnHttdfi52f97aWlpdx5552aIyONy7FjMGoUnD4NN95ovRhSRETcSp2LTJMmTVi6dCnPPvsse/bsoWnTpnTv3p2YmBhX5BMxw26Hm2+GI0cgNta6veTjlIWwRUTEier9rqXOnTvTuXNnZ2YRcR+PPgqbN0NwsDW5t0UL04lERKQadf6/mGPGjOGPf/xjlf0vvvgiN998s1NCiRj19tswb561vWgRdO1qNo+IiNSozkVm8+bNXH/99VX2Dx8+nM2bNzsllIgxKSnw4IPW9jPPQHy82TwiInJedS4yBQUFNGnSpMp+f39/8vPznRJKxIjsbBg9Gs6csSb5Tp9uOpGIiPyMOheZ7t27s3Tp0ir7lyxZQrdu3ZwSSqTBFRdbj1ZnZ0NcnLVejCb3ioi4vTpP9n366acZPXo06enpXHPNNQB8+umnLF68mBUrVjg9oIjLORzw8MOwYweEhlqTe4ODTacSEZFaqHORiY+PZ82aNTz//POsWLGCpk2b0rNnTzZs2EArLdsunugvf4G33rKuwCxZAp06mU4kIiK1VK/Hr2+44QZuuOEGAPLz8/nggw+YMmUKu3btoqSkxKkBRVxqyxZ45BFrOykJfvUrs3lERKRO6j0JYPPmzdx1111ERkbypz/9iWuuuYaUlBRnZhNxrSNHYOzYijdbT51qOpGIiNRRna7IHD16lHfffZe3336b/Px8xo0bR3FxMWvWrNFEX/Esp05ZTyYdOwY9e1prx9hsplOJiEgd1fqKTHx8PLGxsXz11VfMnTuXrKws/vznP7sym4hrOBzwwAPWCyFbt7Ym9zZrZjqViIjUQ62vyKxdu5aJEyfy29/+Vq8mEM/2yivw17+Cry8sXQodOphOJCIi9VTrKzJbt27l5MmTXHnllfTt25dXX32VnJwcV2YTcb4NG+Dxx63tWbPg2mvN5hERkQtS6yLTr18/3nzzTbKzs7n//vtZsmQJkZGRlJaWkpyczMmTJ12ZU+TCHToE48ZBSQnceSdMmmQ6kYiIXKA6P7XUrFkzfvOb37B161b27dvH448/zgsvvECbNm246aabXJFR5MIVFcHIkXD8OPziFzB/vib3iog0Ahe0BntsbCwvvvgiGRkZfPDBB87KJOJcDgf85jewdy+0aQOrVkHTpqZTiYiIEzjlZTK+vr6MHDmSjz76yBnDiTjXSy9Zk3r9/GDFCoiKMp1IREScRG/Fk8bt73+HadOs7VdegYEDzeYRERGnUpGRxistDRISrFtL99xjrR0jIiKNioqMNE4nT1qTe3NzoX9/ePVVTe4VEWmEVGSk8Skthbvugq+/hogIWLkSAgJMpxIRERdQkZHG5/nnYfVqaNLEekIpIsJ0IhERcREVGWlc/vY3+MMfrO3XX4d+/czmERERlzJaZGbOnInNZqv01bVr1yrHORwOhg8fjs1mY82aNQ0fVDzDgQNw223W5N6HHrLWjhERkUat1i+NdJW4uDjWr19f/r2fX9VIc+fOxaaJmnI+eXkwYoQ1yXfQIJgzx3QiERFpAMaLjJ+fH23btq3x8z179vCnP/2JnTt3EqG5DlKd0lLrSsy330L79rB8Ofj7m04lIiINwHiROXjwIJGRkQQGBtK/f3+SkpKIjo4GoKioiFtvvZV58+adt+ycq7i4mOLi4vLv8/PzAbDb7djtdqflLhvLmWNK/fjMmIHvxx/jCAzk7IoV0LIl6H8Xr6DzUMQsV56DtR3T5nA4HE7/6bW0du1aCgoKiI2NJTs7m8TERDIzM0lNTSU4OJj777+fkpIS3nrrLSuszcbq1asZOXJkjWPOnDmTxMTEKvsXL15MUFCQq/5RxJCI7dvp8+KLAOyaPJmMIUPMBhIREacou5iRl5dHSEhIjccZLTI/lZubS0xMDLNnzyY8PJzHH3+c3bt307x5c6B2Raa6KzJRUVHk5OSc919EXdntdpKTkxk6dCj+uo1hRmoqfgMHYisspGTyZEp/LDTiPXQeipjlynMwPz+fsLCwny0yxm8tnSs0NJQuXbqQlpbGvn37SE9PJzQ0tNIxY8aMYeDAgWzcuLHaMQICAgioZvEzf39/l/yic9W48jNOnICxY6GwEK69Ft+XXsK3moni4h10HoqY5YpzsLbjudVv/oKCAtLT07njjjsYN24c99xzT6XPu3fvzpw5c4iPjzeUUNxCSYn1DqXvvoMOHSrebC0iIl7H6G//KVOmEB8fT0xMDFlZWcyYMQNfX18SEhIIDw+vdoJvdHQ0F198sYG04jaeeALWrYOgIFizBlq3Np1IREQMMVpkMjIySEhI4Pjx44SHhzNgwABSUlIIDw83GUvc2QcfwEsvWdsLFkDPnmbziIiIUUaLzJIlS+p0vBvNSxYTdu+GCROs7WnTYNw4s3lERMQ4vWtJPMN//wsjR8KpUzB8ODz7rOlEIiLiBlRkxP3Z7TB+PBw+DJ07w+LF4OtrOpWIiLgBFRlxf1OnwmefQfPm1uTenzySLyIi3ktFRtzbwoXw8svW9nvvQbduZvOIiIhbUZER9/XFF3D//db2jBnWHBkREZFzqMiIezp6FEaPhuJiuOkm+MMfTCcSERE3pCIj7ufMGev1A5mZ0LWrdUvJR/+piohIVfrTQdzPpEmwbRuEhMCHH1p/FRERqYaKjLiXN96A+fPBZrNW8e3SxXQiERFxYyoy4j62b4eHH7a2n3sOrr/ebB4REXF7KjLiHjIzYcwYa/G7sWOtVxCIiIj8DBUZMe/0aesJpaNHoXt362WQNpvpVCIi4gFUZMQshwMefNBaM6ZlS2vl3ubNTacSEREPoSIjZs2bZ12B8fGBpUuhY0fTiURExIOoyIg5GzfC5MnW9osvwtChJtOIiIgHUpERMw4fhptvhpISuPVWeOwx04lERMQDqchIwysqst6blJMDvXrBm29qcq+IiNSLiow0LIcD7rsPdu+G8HBrcm9QkOlUIiLioVRkpGHNmQOLFoGvLyxfDtHRphOJiIgHU5GRhrN+PUydam3PnQuDBxuNIyIink9FRhrGd9/B+PFQWgp33w0PPWQ6kYiINAIqMuJ6BQXW5N4TJ6BPH3jtNU3uFRERp1CREddyOKwrMPv2wUUXwapVEBhoOpWIiDQSKjLiWi+8ACtWgL+/VWLatTOdSEREGhEVGXGdTz6B6dOt7Xnz4KqrzOYREZFGR0VGXOPbb60Vex0OeOABuPde04lERKQRUpER58vPtyb35uXBL38JL79sOpGIiDRSKjLiXKWlcOed8K9/WfNhVqyAJk1MpxIRkUZKRUac65ln4MMPISAAVq+Gtm1NJxIRkUZMRUacZ80aSEy0tufPh969jcYREZHGT0VGnGP/frjjDmt74kT49a+NxhEREe+gIiMXLjcXRoywVvC9+mqYNct0IhER8RIqMnJhSkqsx6zT0iAmBpYutRa/ExERaQAqMnJhnn4a1q6Fpk2tyb3h4aYTiYiIFzFaZGbOnInNZqv01bVr1/LP77//fi655BKaNm1KeHg4I0aM4MCBAwYTSyXLlkFSkrX99tvQq5fZPCIi4nWMX5GJi4sjOzu7/Gvr1q3ln1155ZUsWLCAf/3rX/zjH//A4XAwbNgwSkpKDCYWAL76ynoZJMDUqZCQYDaPiIh4JT/jAfz8aFvDWiP33Xdf+XaHDh149tln6dmzJ4cOHeKSSy5pqIjyU8ePWyv3FhXBsGEVV2VEREQamPEic/DgQSIjIwkMDKR///4kJSURHR1d5bjCwkIWLFjAxRdfTFRUVI3jFRcXU1xcXP59fn4+AHa7Hbvd7rTcZWM5c0yPcPYsvuPG4fP99zg6duTsX/9qreZbWmo6mXghrz0PRdyEK8/B2o5pczgcDqf/9Fpau3YtBQUFxMbGkp2dTWJiIpmZmaSmphIcHAzAa6+9xu9+9zsKCwuJjY3l448/Pu/VmJkzZ5JYtijbORYvXkxQUJDL/lm8Rdw779Dpo484GxjI5j/+kZMxMaYjiYhII1RUVMStt95KXl4eISEhNR5ntMj8VG5uLjExMcyePZsJEyYAkJeXx7Fjx8jOzmbWrFlkZmaybds2AgMDqx2juisyUVFR5OTknPdfRF3Z7XaSk5MZOnQo/l7yuLFt0SL8fpwXc3bpUhyjRhlOJN7OG89DEXfiynMwPz+fsLCwny0yxm8tnSs0NJQuXbqQlpZWvq9Fixa0aNGCzp07069fP1q2bMnq1atJqGFyaUBAAAEBAVX2+/v7u+QXnavGdTu7dsFvf2ttP/UUfuPGmc0jcg6vOQ9F3JQrzsHajmf8qaVzFRQUkJ6eTkRERLWfOxwOHA5HpSsu0gCOHYNRo+D0abjxxor3KYmIiBhmtMhMmTKFTZs2cejQIbZv386oUaPw9fUlISGB7777jqSkJHbt2sXhw4fZvn07N998M02bNuX66683Gdu72O1w881w5AjExsL774OPW/VfERHxYkZvLWVkZJCQkMDx48cJDw9nwIABpKSkEB4ejt1uZ8uWLcydO5cffviBiy66iEGDBrF9+3batGljMrZ3eewx2LwZgoOtt1u3aGE6kYiISDmjRWbJkiU1fhYZGcknn3zSgGmkinfegVdftbYXLYJzVl0WERFxB7pHINVLSamY3PvMMxAfbzaPiIhINVRkpKrsbBg9Gs6csSb5Tp9uOpGIiEi1VGSksuJiGDPGKjNxcbBwoSb3ioiI29KfUFLB4YCHH4YdOyA01Jrc++MKyyIiIu5IRUYq/OUv8NZb1hWYJUugUyfTiURERM5LRUYsW7bAI49Y20lJ8Ktfmc0jIiJSCyoyAhkZMHYsnD0L48fD1KmmE4mIiNSKioy3O3XKejLp2DHo2RPefhtsNtOpREREakVFxps5HPDAA7BzJ7RubU3ubdbMdCoREZFaU5HxZq+8An/9K/j6wrJl0KGD6UQiIiJ1oiLjrTZsgMcft7b/9Ce45hqzeUREROpBRcYbHToE48ZBSQnceSdMnGg6kYiISL2oyHiboiIYORKOH4df/ALmz9fkXhER8VgqMt7E4YAJE2DvXmjTBlatgqZNTacSERGpNxUZbzJrlrVir58frFgBUVGmE4mIiFwQFRlv8Y9/wLRp1vYrr8DAgWbziIiIOIGKjDdIS4NbboHSUrjnHmvtGBERkUZARaaxO3nSmtybmwv9+8Orr2pyr4iINBoqMo1ZaSncdRd8/TVERMDKlRAQYDqViIiI06jINGbPPw+rV0OTJtYTShERphOJiIg4lYpMY/W3v8Ef/mBtv/469OtnNo+IiIgLqMg0RgcOwO23W+vGPPQQ/OY3phOJiIi4hIpMY5OXZ03uzc+HQYNgzhzTiURERFxGRaYxKS2F226Db76B9u1h+XLw9zedSkRExGVUZBqTGTPg448hMBDWrLFeQyAiItKIqcg0FitXwrPPWttvvglXXmk2j4iISANQkWkMUlOt9WIAHnvMmugrIiLiBVRkPN2JEzBiBBQWwrXXwh//aDqRiIhIg1GR8WQlJZCQAN99Bx06wNKl1putRUREvISKjCd78klYtw6CgqzJva1bm04kIiLSoFRkPNUHH8CLL1rbCxZAz55m84iIiBigIuOJdu+GCROs7WnTYNw4s3lEREQMUZHxNP/9r7Vy76lTMHx4xSPXIiIiXkhFxpPY7TB+PBw+DJ07w+LF4OtrOpWIiIgxRovMzJkzsdlslb66du0KwIkTJ3jkkUeIjY2ladOmREdHM3HiRPLy8kxGNmvqVPjsM2je3JrcGxpqOpGIiIhRxp/VjYuLY/369eXf+/34+HBWVhZZWVnMmjWLbt268e9//5sHHniArKwsVqxYYSquOQsXwssvW9vvvQfdupnNIyIi4gaMFxk/Pz/atm1bZf9ll13GypUry7+/5JJLeO6557j99ts5e/ZseeH5qeLiYoqLi8u/z8/PB8But2O3252Wu2wsZ45ZE9vOnfjefz82oOSppyi94QbrNpOIl2vI81BEqnLlOVjbMY0XmYMHDxIZGUlgYCD9+/cnKSmJ6Ojoao/Ny8sjJCSkxhIDkJSURGJiYpX969atIygoyGm5yyQnJzt9zHMF5OYy+PHH8SsuJrtPH7644gr45BOX/kwRT+Pq81BEzs8V52BRUVGtjrM5HA6H0396La1du5aCggJiY2PJzs4mMTGRzMxMUlNTCQ4OrnRsTk4OV155JbfffjvPPfdcjWNWd0UmKiqKnJwcQkJCnJbdbreTnJzM0KFD8ff3d9q4lZw5g++wYfhs344jNpaz27aBE/8ZRDxdg5yHIlIjV56D+fn5hIWFlV/EqInRKzLDhw8v3+7Rowd9+/YlJiaGZcuWMaFsnRSsf5gbbriBbt26MXPmzPOOGRAQQEBAQJX9/v7+LvlF56pxAZg4EbZvhxYtsH30Ef5auVekWi49D0XkZ7niHKzteG71+HVoaChdunQhLS2tfN/Jkye57rrrCA4OZvXq1d7zy+qNN2D+fLDZrMesu3QxnUhERMTtuFWRKSgoID09nYiICMC6EjNs2DCaNGnCRx99RGBgoOGEDWT7dnj4YWv7uefg+uvN5hEREXFTRovMlClT2LRpE4cOHWL79u2MGjUKX19fEhISyktMYWEhb7/9Nvn5+Rw9epSjR49SUlJiMrZrZWbCmDHWU0ljx1qvIBAREZFqGZ0jk5GRQUJCAsePHyc8PJwBAwaQkpJCeHg4Gzdu5PPPPwegU6dOlf6+77//ng4dOhhI7GKnT8Po0XD0KHTvbr0M0mYznUpERMRtGS0yS5YsqfGzIUOGYPCBqobncMBDD8EXX0DLltbKvc2bm04lIiLi1txqjoxXe+01eOcd8PGBpUuhY0fTiURERNyeiow72LQJJk+2tl98EYYONRpHRETEU6jImHb4sDWp9+xZuO02eOwx04lEREQ8hoqMSUVFMHIk5OTAFVfAm29qcq+IiEgdqMiY4nDAfffB7t0QHg6rV0PTpqZTiYiIeBQVGVPmzIFFi8DXF5YvhxpelCkiIiI1U5ExYf16mDrV2p47FwYPNhpHRETEU6nINLTvvoPx46G0FO6+21o7RkREROpFRaYhFRZak3tPnIA+fay1YzS5V0REpN5UZBqKw2Fdgdm3D9q2hVWrwFtegikiIuIiKjIN5YUXrEm9/v6wciW0a2c6kYiIiMdTkWkIn3wC06db2/PmwVVXmc0jIiLSSKjIuNq338Ktt1q3lh54AO6913QiERGRRkNFxpXy863JvXl58Mtfwssvm04kIiLSqKjIuEppKdx5J/zrX9Z8mBUroEkT06lEREQaFRUZV/nf/4UPP4SAAOv1A23bmk4kIiLS6KjIuMKHH8LMmdb2/PnQu7fROCIiIo2Vioyz7d8Pt99ubU+cCL/+tdE4IiIijZmKjDPl5sKIEVBQAFdfDbNmmU4kIiLSqKnIOEtJifWYdVoaxMTA0qXW4nciIiLiMioyzvL007B2LTRtak3uDQ83nUhERKTR8zMdwCOVlGDbtIl2mzdja9YMcnIgKcn67O23oVcvs/lERES8hIpMXa1aBZMm4ZeRwS8AZs+ueIP11KmQkGAynYiIiFdRkamLVatg7FjrdQPnKvu+T5+GzyQiIuLFNEemtkpKYNKkqiWmjM0Gjz1mHSciIiINQkWmtrZsgYyMmj93OODIEes4ERERaRAqMrWVne3c40REROSCqcjUVkSEc48TERGRC6YiU1sDB0L79hVPKP2UzQZRUdZxIiIi0iBUZGrL1xdeftna/mmZKft+7lzrOBEREWkQKjJ1MXo0rFgB7dpV3t++vbV/9GgzuURERLyU1pGpq9GjYcQIzn72GXvWruXy4cPxu/pqXYkRERExQEWmPnx9cQweTGZhIT0HD1aJERERMUS3lkRERMRjGS0yM2fOxGazVfrq2rVr+edvvPEGQ4YMISQkBJvNRm5urrmwIiIi4naMX5GJi4sjOzu7/Gvr1q3lnxUVFXHdddfx5JNPGkwoIiIi7sr4HBk/Pz/atm1b7WeTJ08GYOPGjbUer7i4mOLi4vLv8/PzAbDb7djt9nrn/KmysZw5pojUjc5DEbNceQ7WdkzjRebgwYNERkYSGBhI//79SUpKIjo6ut7jJSUlkZiYWGX/unXrCAoKupCo1UpOTnb6mCJSNzoPRcxyxTlYVFRUq+NsDkdNr3N2vbVr11JQUEBsbCzZ2dkkJiaSmZlJamoqwcHB5cdt3LiRq6++mh9++IHQ0NDzjlndFZmoqChycnIICQlxWna73U5ycjJDhw7F39/faeOKSO3pPBQxy5XnYH5+PmFhYeTl5Z33z2+jV2SGDx9evt2jRw/69u1LTEwMy5YtY8KECfUaMyAggICAgCr7/f39XfKLzlXjikjt6TwUMcsV52BtxzM+2fdcoaGhdOnShbS0NNNRRERExAO4VZEpKCggPT2dCL1BWkRERGrB6K2lKVOmEB8fT0xMDFlZWcyYMQNfX18SEhIAOHr0KEePHi2/QrNv3z6Cg4OJjo6mVatWtfoZZVOAyp5echa73U5RURH5+fm6pC1iiM5DEbNceQ6W/bn9s1N5HQaNHz/eERER4WjSpImjXbt2jvHjxzvS0tLKP58xY4YDqPK1YMGCWv+MI0eOVDuGvvSlL33pS1/6cv+vI0eOnPfPeaNPLTWE0tJSsrKyCA4OxmazOW3csqehjhw54tSnoUSk9nQeipjlynPQ4XBw8uRJIiMj8fGpeSaM8XVkXM3Hx4f27du7bPyQkBD9AhUxTOehiFmuOgdbtGjxs8e41WRfERERkbpQkRERERGPpSJTTwEBAcyYMaPaxfdEpGHoPBQxyx3OwUY/2VdEREQaL12REREREY+lIiMiIiIeS0VGREREPJaKjIiIiHgsFZl62Lx5M/Hx8URGRmKz2VizZo3pSCJe5fXXX6dHjx7li3D179+ftWvXmo4l4jVmzpyJzWar9NW1a1cjWVRk6qGwsJCePXsyb94801FEvFL79u154YUX2LVrFzt37uSaa65hxIgRfP3116ajiXiNuLg4srOzy7+2bt1qJEejf0WBKwwfPpzhw4ebjiHiteLj4yt9/9xzz/H666+TkpJCXFycoVQi3sXPz4+2bduajqErMiLi2UpKSliyZAmFhYX079/fdBwRr3Hw4EEiIyPp2LEjt912G4cPHzaSQ1dkRMQj7du3j/79+3P69GmaN2/O6tWr6datm+lYIl6hb9++vPvuu8TGxpKdnU1iYiIDBw4kNTWV4ODgBs2ilX0vkM1mY/Xq1YwcOdJ0FBGvcubMGQ4fPkxeXh4rVqzgrbfeYtOmTSozIgbk5uYSExPD7NmzmTBhQoP+bF2RERGP1KRJEzp16gTAlVdeyT//+U9efvll/vKXvxhOJuJ9QkND6dKlC2lpaQ3+szVHRkQahdLSUoqLi03HEPFKBQUFpKenExER0eA/W1dk6qGgoKBS6/z+++/Zs2cPrVq1Ijo62mAyEe/wxBNPMHz4cKKjozl58iSLFy9m48aN/OMf/zAdTcQrTJkyhfj4eGJiYsjKymLGjBn4+vqSkJDQ4FlUZOph586dXH311eXfP/bYYwDcddddvPvuu4ZSiXiPY8eOceedd5KdnU2LFi3o0aMH//jHPxg6dKjpaCJeISMjg4SEBI4fP054eDgDBgwgJSWF8PDwBs+iyb4iIiLisTRHRkRERDyWioyIiIh4LBUZERER8VgqMiIiIuKxVGRERETEY6nIiIiIiMdSkRERERGPpSIjIiIiHktFRkQ8zpAhQ5g8ebLpGCLiBlRkRERExGOpyIiIiIjHUpEREY/38ccf06JFCxYtWmQ6iog0ML39WkQ82uLFi3nggQdYvHgxN954o+k4ItLAdEVGRDzWvHnzePDBB/nb3/6mEiPipXRFRkQ80ooVKzh27Bjbtm2jd+/epuOIiCG6IiMiHqlXr16Eh4fzzjvv4HA4TMcREUNUZETEI11yySV89tlnfPjhhzzyyCOm44iIIbq1JCIeq0uXLnz22WcMGTIEPz8/5s6dazqSiDQwFRkR8WixsbFs2LCBIUOG4Ovry5/+9CfTkUSkAdkcurksIiIiHkpzZERERMRjqciIiIiIx1KREREREY+lIiMiIiIeS0VGREREPJaKjIiIiHgsFRkRERHxWCoyIiIi4rFUZERERMRjqciIiIiIx1KREREREY/1//OmGOTOmg+3AAAAAElFTkSuQmCC", "text/plain": [ "
" ] diff --git a/llments/lm/base/api.py b/llments/lm/base/api.py index 8cb7f57..4872fbb 100644 --- a/llments/lm/base/api.py +++ b/llments/lm/base/api.py @@ -6,7 +6,8 @@ from llments.lm.lm import LanguageModel from litellm import completion, batch_completion, ModelResponse -class APIBasedLM(): + +class APIBasedLM: """Base class for API-Based Language Models. Represents a language model that interacts with an API for generating responses. @@ -52,8 +53,8 @@ def generate( max_length: int | None = None, max_new_tokens: int | None = None, temperature: float = 1.0, - num_return_sequences: int = 1 - ) -> list[str]: + num_return_sequences: int = 1, + ) -> list[str]: """Generate a response based on the given prompt. This method sends a prompt to the language model API and retrieves @@ -67,77 +68,94 @@ def generate( max_length (int): The maximum length of the output sequence, (defaults to model max). max_new_tokens (float): The maximum number of tokens to generate in the chat completion. - temperature (float): The sampling temperature to be used, between 0 and 2. + temperature (float): The sampling temperature to be used, between 0 and 2. num_return_sequences (int): The number of chat completion choices to generate for each input message. Returns: ModelResponse: The generated response object from the language model. """ if condition is not None: - warnings.warn("A non-default value for 'condition' was provided.", UserWarning) + warnings.warn( + "A non-default value for 'condition' was provided.", UserWarning + ) if do_sample: - warnings.warn("A non-default value for 'do_sample' was provided.", UserWarning) + warnings.warn( + "A non-default value for 'do_sample' was provided.", UserWarning + ) if max_length is not None: - warnings.warn("A non-default value for 'max_length' was provided.", UserWarning) - + warnings.warn( + "A non-default value for 'max_length' was provided.", UserWarning + ) + responses = [] response = completion( - model = self.model_name, - temperature = temperature, - max_tokens = max_new_tokens, - n = num_return_sequences, - messages=[{"content": message, "role": "user"}] + model=self.model_name, + temperature=temperature, + max_tokens=max_new_tokens, + n=num_return_sequences, + messages=[{"content": message, "role": "user"}], ) - for choice in response['choices']: - responses.append(choice['message']['content']) + for choice in response["choices"]: + responses.append(choice["message"]["content"]) return responses @abc.abstractmethod def chat_generate( self, - messages: list[str], - condition: str | None, + messages: list[list[dict[str, str]]], + condition: str | None = None, do_sample: bool = False, max_length: int | None = None, max_new_tokens: int | None = None, temperature: float = 1.0, - num_return_sequences: int = 1 - ) -> list[str]: + num_return_sequences: int = 1, + ) -> list[list[str]]: """Generate responses to multiple prompts using the batch_completion function. This method sends multiple prompts to the language model API and retrieves the generated response for each of the prompts. Args: - messages (list): The list of prompts for generating responses. + messages (list of list): The list of prompts, where each prompt contains a sequence of messages + with roles (either 'system' or 'user') and their corresponding content. condition (str): The conditioning sequence for the output. If None, the output is not conditioned. do_sample (bool): Whether to use sampling or greedy decoding. max_length (int): The maximum length of the output sequence, (defaults to model max). max_new_tokens (float): The maximum number of tokens to generate in the chat completion. - temperature (float): The sampling temperature to be used, between 0 and 2. + temperature (float): The sampling temperature to be used, between 0 and 2. num_return_sequences (int): The number of chat completion choices to generate for each input message. Returns: list: List of responses generated by the language model for all the prompts. """ if condition is not None: - warnings.warn("A non-default value for 'condition' was provided.", UserWarning) + warnings.warn( + "A non-default value for 'condition' was provided.", UserWarning + ) if do_sample: - warnings.warn("A non-default value for 'do_sample' was provided.", UserWarning) + warnings.warn( + "A non-default value for 'do_sample' was provided.", UserWarning + ) if max_length is not None: - warnings.warn("A non-default value for 'max_length' was provided.", UserWarning) - + warnings.warn( + "A non-default value for 'max_length' was provided.", UserWarning + ) + responses = batch_completion( - model = self.model_name, - temperature = temperature, - max_tokens = max_new_tokens, - n = num_return_sequences, - messages=[[{"content": content, "role": "user"}] for content in messages] + model=self.model_name, + temperature=temperature, + max_tokens=max_new_tokens, + n=num_return_sequences, + messages=messages, ) - return [response['choices'][0]['message']['content'] for response in responses] - + + return [ + [choice["message"]["content"] for choice in response["choices"]] + for response in responses + ] + @abc.abstractmethod def set_seed(self, seed: int) -> None: """Set the seed for the language model. From 1115ec5ca59980689e473ede3d003e7b5c7eb091 Mon Sep 17 00:00:00 2001 From: Rohan Modi Date: Thu, 5 Sep 2024 12:22:13 -0400 Subject: [PATCH 2/7] Fix ruff errors --- examples/llm_fair_eval/FairEval.ipynb | 83 +++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/examples/llm_fair_eval/FairEval.ipynb b/examples/llm_fair_eval/FairEval.ipynb index 871309e..615952a 100644 --- a/examples/llm_fair_eval/FairEval.ipynb +++ b/examples/llm_fair_eval/FairEval.ipynb @@ -41,6 +41,18 @@ "outputs": [], "source": [ "def gen_prompt(ques, ans1, ans2):\n", + " \"\"\"\n", + " Generates a prompt that compares two AI assistants' answers to a question.\n", + "\n", + " Args:\n", + " ques (str): The question being asked.\n", + " ans1 (str): The first assistant's answer to the question.\n", + " ans2 (str): The second assistant's answer to the question.\n", + "\n", + " Returns:\n", + " str: A formatted prompt including the question, both answers, and instructions \n", + " for evaluation (how to score both assistants).\n", + " \"\"\"\n", " prompt_template = \"[Question]\\n{question}\\n\\n[The Start of Assistant 1's Answer]\\n{answer_1}\\n[The End of Assistant 1's Answer]\\n\\n[The Start of Assistant 2's Answer]\\n{answer_2}\\n[The End of Assistant 2's Answer]\\n\\n[System]\\n{prompt}\\n\"\n", " default_prompt = \"\"\"We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\n", " Please rate the helpfulness, relevance, accuracy, level of details of their responses. \n", @@ -56,6 +68,18 @@ " return prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, prompt=default_prompt)\n", "\n", "def query_gpt(system_prompt, user_prompts, eval_model, num_sequences):\n", + " \"\"\"\n", + " Queries language model API with the provided prompts.\n", + "\n", + " Args:\n", + " system_prompt (str): The system-level prompt setting the context for the responses.\n", + " user_prompts (list): A list of prompts (for the user part of the interaction).\n", + " eval_model (str): The name of the model to be queried.\n", + " num_sequences (int): The number of response sequences to generate for each input.\n", + "\n", + " Returns:\n", + " list: A list of responses generated by the language model.\n", + " \"\"\"\n", " try:\n", " responses = APIBasedLM(eval_model).chat_generate(\n", " messages=[[{\"role\": \"system\", \"content\": system_prompt}, {\"role\": \"user\", \"content\": user_prompt}] for user_prompt in user_prompts],\n", @@ -69,6 +93,21 @@ " raise RuntimeError(f\"Failed during query processing.\")\n", " \n", "def get_eval(question_jsons, answer1_jsons, answer2_jsons, eval_model, bpc, k):\n", + " \"\"\"\n", + " Evaluates multiple questions and their corresponding answers from two assistants, \n", + " optionally performing BPC.\n", + "\n", + " Args:\n", + " question_jsons (list): A list of question JSON objects.\n", + " answer1_jsons (list): A list of JSON objects for the first assistant's answers.\n", + " answer2_jsons (list): A list of JSON objects for the second assistant's answers.\n", + " eval_model (str): The model to be used for generating responses.\n", + " bpc (bool): If True, perform BPC (swapping answers).\n", + " k (int): Number of response sequences to generate.\n", + "\n", + " Returns:\n", + " list: A list of evaluation results for each question and the corresponding scores.\n", + " \"\"\"\n", " system_prompt = 'You are a helpful and precise assistant for checking the quality of the answer.'\n", " user_prompts = []\n", " user_prompts_bpc = []\n", @@ -140,6 +179,16 @@ " return results\n", "\n", "def parse_score_from_review(review):\n", + " \"\"\"\n", + " Parses the score for two assistants from the review text.\n", + "\n", + " Args:\n", + " review (str): The review text that includes the scores.\n", + "\n", + " Returns:\n", + " list: A list containing the scores for Assistant 1 and Assistant 2.\n", + " If parsing fails, returns [-1, -1].\n", + " \"\"\"\n", " try:\n", " score1 = review.split(\"\\n\")[-2]\n", " score2 = review.split(\"\\n\")[-1]\n", @@ -150,6 +199,15 @@ " return [-1, -1]\n", " \n", "def get_json_list(file_path):\n", + " \"\"\"\n", + " Reads a JSON lines file and returns a list of JSON objects.\n", + "\n", + " Args:\n", + " file_path (str): Path to the JSONL file.\n", + "\n", + " Returns:\n", + " list: A list of JSON objects from the file.\n", + " \"\"\"\n", " file_path = os.path.expanduser(file_path)\n", " with open(file_path, \"r\") as f:\n", " json_list = []\n", @@ -158,6 +216,20 @@ " return json_list\n", " \n", "def get_results(m1, m2, eval_model, bpc=0, k=1):\n", + " \"\"\"\n", + " Retrieves and processes results for multiple questions and answers from two assistants,\n", + " performs evaluation, and writes the results to a file.\n", + "\n", + " Args:\n", + " m1 (str): Identifier for the first model or assistant.\n", + " m2 (str): Identifier for the second model or assistant.\n", + " eval_model (str): The evaluation model to be used.\n", + " bpc (bool): If True, perform back-and-forth comparisons.\n", + " k (int): Number of response sequences to generate.\n", + "\n", + " Returns:\n", + " None\n", + " \"\"\"\n", " question_jsons = get_json_list(\"question.jsonl\")\n", " answer1_jsons = get_json_list(f\"answer/answer_{m1}.jsonl\")\n", " answer2_jsons = get_json_list(f\"answer/answer_{m2}.jsonl\")\n", @@ -501,6 +573,17 @@ "outputs": [], "source": [ "def get_MEC_BPC_results(k):\n", + " \"\"\"\n", + " Compares the evaluation results between two language models\n", + " based on automatic evaluations (MEC + BPC) and human annotations.\n", + "\n", + " Args:\n", + " k (int): The number of response sequences to generate for MEC\n", + " and BPC during the model evaluation.\n", + "\n", + " Returns:\n", + " float: The percentage accuracy of the model's evaluations in terms of closeness to human annotations.\n", + " \"\"\"\n", " gpt35_vs_vicuna13b_results = []\n", "\n", " with open(f'review/review_gpt35_vs_vicuna-13b_eval=gpt-3.5-turbo-0301_mec={k}_bpc=1.json', 'r') as file:\n", From b720c53f685deb86688256f18e64b6e9b3fa1db3 Mon Sep 17 00:00:00 2001 From: Rohan Modi Date: Thu, 5 Sep 2024 12:27:30 -0400 Subject: [PATCH 3/7] Fix ruff errors --- examples/llm_fair_eval/FairEval.ipynb | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/examples/llm_fair_eval/FairEval.ipynb b/examples/llm_fair_eval/FairEval.ipynb index 615952a..f5eb4bf 100644 --- a/examples/llm_fair_eval/FairEval.ipynb +++ b/examples/llm_fair_eval/FairEval.ipynb @@ -41,8 +41,7 @@ "outputs": [], "source": [ "def gen_prompt(ques, ans1, ans2):\n", - " \"\"\"\n", - " Generates a prompt that compares two AI assistants' answers to a question.\n", + " \"\"\"Generates a prompt that compares two AI assistants' answers to a question.\n", "\n", " Args:\n", " ques (str): The question being asked.\n", @@ -68,8 +67,7 @@ " return prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, prompt=default_prompt)\n", "\n", "def query_gpt(system_prompt, user_prompts, eval_model, num_sequences):\n", - " \"\"\"\n", - " Queries language model API with the provided prompts.\n", + " \"\"\"Queries language model API with the provided prompts.\n", "\n", " Args:\n", " system_prompt (str): The system-level prompt setting the context for the responses.\n", @@ -93,8 +91,7 @@ " raise RuntimeError(f\"Failed during query processing.\")\n", " \n", "def get_eval(question_jsons, answer1_jsons, answer2_jsons, eval_model, bpc, k):\n", - " \"\"\"\n", - " Evaluates multiple questions and their corresponding answers from two assistants, \n", + " \"\"\"Evaluates multiple questions and their corresponding answers from two assistants, \n", " optionally performing BPC.\n", "\n", " Args:\n", @@ -179,8 +176,7 @@ " return results\n", "\n", "def parse_score_from_review(review):\n", - " \"\"\"\n", - " Parses the score for two assistants from the review text.\n", + " \"\"\"Parses the score for two assistants from the review text.\n", "\n", " Args:\n", " review (str): The review text that includes the scores.\n", @@ -199,8 +195,7 @@ " return [-1, -1]\n", " \n", "def get_json_list(file_path):\n", - " \"\"\"\n", - " Reads a JSON lines file and returns a list of JSON objects.\n", + " \"\"\"Reads a JSON lines file and returns a list of JSON objects.\n", "\n", " Args:\n", " file_path (str): Path to the JSONL file.\n", @@ -216,8 +211,7 @@ " return json_list\n", " \n", "def get_results(m1, m2, eval_model, bpc=0, k=1):\n", - " \"\"\"\n", - " Retrieves and processes results for multiple questions and answers from two assistants,\n", + " \"\"\"Retrieves and processes results for multiple questions and answers from two assistants,\n", " performs evaluation, and writes the results to a file.\n", "\n", " Args:\n", @@ -573,8 +567,7 @@ "outputs": [], "source": [ "def get_MEC_BPC_results(k):\n", - " \"\"\"\n", - " Compares the evaluation results between two language models\n", + " \"\"\"Compares the evaluation results between two language models\n", " based on automatic evaluations (MEC + BPC) and human annotations.\n", "\n", " Args:\n", From f7daba4ebec507c3ba6638917fcfd63abb91a352 Mon Sep 17 00:00:00 2001 From: Rohan Modi Date: Thu, 5 Sep 2024 12:33:10 -0400 Subject: [PATCH 4/7] Fix ruff errors --- examples/llm_fair_eval/FairEval.ipynb | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/examples/llm_fair_eval/FairEval.ipynb b/examples/llm_fair_eval/FairEval.ipynb index f5eb4bf..67c6a9e 100644 --- a/examples/llm_fair_eval/FairEval.ipynb +++ b/examples/llm_fair_eval/FairEval.ipynb @@ -91,8 +91,7 @@ " raise RuntimeError(f\"Failed during query processing.\")\n", " \n", "def get_eval(question_jsons, answer1_jsons, answer2_jsons, eval_model, bpc, k):\n", - " \"\"\"Evaluates multiple questions and their corresponding answers from two assistants, \n", - " optionally performing BPC.\n", + " \"\"\"Evaluates questions and their corresponding answers from two assistants.\n", "\n", " Args:\n", " question_jsons (list): A list of question JSON objects.\n", @@ -211,8 +210,7 @@ " return json_list\n", " \n", "def get_results(m1, m2, eval_model, bpc=0, k=1):\n", - " \"\"\"Retrieves and processes results for multiple questions and answers from two assistants,\n", - " performs evaluation, and writes the results to a file.\n", + " \"\"\"Processes results for multiple questions and answers from two assistants.\n", "\n", " Args:\n", " m1 (str): Identifier for the first model or assistant.\n", @@ -567,8 +565,7 @@ "outputs": [], "source": [ "def get_MEC_BPC_results(k):\n", - " \"\"\"Compares the evaluation results between two language models\n", - " based on automatic evaluations (MEC + BPC) and human annotations.\n", + " \"\"\"Compares the evaluation results.\n", "\n", " Args:\n", " k (int): The number of response sequences to generate for MEC\n", From 5766624f8aa516ab356c5b0a84a0812bc7d04ed5 Mon Sep 17 00:00:00 2001 From: Zaid Sheikh Date: Thu, 12 Sep 2024 14:36:44 -0400 Subject: [PATCH 5/7] ignore ipynb docstring errors (#66) --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 1108bab..ccfd3e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,3 +39,6 @@ select = ["D"] [tool.ruff.lint.pydocstyle] convention = "google" + +[tool.ruff.lint.per-file-ignores] +"*.ipynb" = ["D103", "D200", "D212"] From 2e4782b6f7caeb33402f87c45979ff5053f8ed6e Mon Sep 17 00:00:00 2001 From: "Xinran(Joy) Wan" <65579349+wanxinran@users.noreply.github.com> Date: Fri, 13 Sep 2024 08:49:48 -0400 Subject: [PATCH 6/7] feat: arguments added for LORA (#67) --- dist/llments-0.0.0a1-py3-none-any.whl | Bin 0 -> 6410 bytes dist/llments-0.0.0a1.tar.gz | Bin 0 -> 6052 bytes llments/lm/base/hugging_face.py | 28 ++++++++++++++++++++++++++ 3 files changed, 28 insertions(+) create mode 100644 dist/llments-0.0.0a1-py3-none-any.whl create mode 100644 dist/llments-0.0.0a1.tar.gz diff --git a/dist/llments-0.0.0a1-py3-none-any.whl b/dist/llments-0.0.0a1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..a953152baf366230aa3f818c330bb3a31a9bc6f0 GIT binary patch literal 6410 zcmaKw1yq#X7KVpmB!@;ir6h+?LTW&|8v$vCZjh8_C=r1H0TB?TTN()^r9`BpQ)wg= z2?6imz5d9o|9V*GgT-3=S!bW~e&62b)lfzUkpch!Eaa5dKt2G#^9OQ}Ag80F6U^Du z17d1w=WOR`YRcp4iw@AZ6e1%uo_!8DpTvKJc){E~>|C6Y5x5?BIN!e=XjkZJ@DHN5 z$=qh)L17$9mExo!V|S60<`0IIorkBnvn34j`wt||_#q#D8!uS);Az`Bh*&1{CAx)K zIGnmZTAM=zJH~kbIH$S@WSW_IniY9CAd4nyqjFsY=#i6SAu<+XZZOBU&-DFUcjh?7 z5{2x%V3dXX2vtFYUSy|Q^|N7xoHmbG8HK_0D$7(QgU{2cS5G~ zP=hb74GP{oxcZ`$IQjvpQcQ9EVh%Lq@J*udUA5VphSZFK>Oi!$f`KRYU6zmX-V?Xd zu&3$K$eTs6^+d4dEQ!$AG10#CDEs^@CeMVxp0P%!dm+V;Qy+iHXFHv0rep!?#i&&A zAb90=)G$kFm#+7L=g4asIPn3lYl(i4r26#UL(bjZ=uDRJr6U?Md9MLQqO+`}wE=UD zHoZ;J%+x%=f*-vG?h6N!VHPsP=|#Rp^@+MIoydmOHC3Kg68^Pw$M`Exid~#av;&pVe5Cr`Hhe7s- z_8gu5b`bGNhnbsD8QsxDK!;N~CtgtMJI{N9ZitHIgy z`DH0+ji89^JJqGSkHLykh6M#`)d>Lwc`y#oq+IASNNm94rq1h=l%(0Wr9rg1I^ ziP^zjNzUZ1|BSNU#%seE{;-(UyoRBdwLryO)Nh)c;(G5S>MT=vp{_e)1ro6)^2?IzjjPow`TWHP-F*;;LbFZI5w|UMneCPFK!Q{qpFL?) zfhf0qwLyQEJ-vA0Ug-0!r_E`p()dICrU4vGtV;gw?U+7X1^`c7X?#1WiC27nQuc(p8-Nrx7^|>SQUsvAy`ZW$fv$}rEAf=9QOg9Z zLOw3Bf)i=-3@Ni+px0fvKLH$uY&8vyB%a8AaiVhawpr*g8XLNLp&M& z+SSKjk3Q9Hd31gllLO~(-IJg{D>e7)OG=62VF>{cXoK^p4l7}wMm4Xz&c;&>=|JQg zbaKwsClfS1x8-|K4E4;B<33hsk*b_9&fcIrylrwN_8pC(48e`sYYgwMA(jJ)g%H69 z&+pQ8tD48Vv6HSPWg)&>2CCmGJ0oAi@=5q6?A5Hjux6!t))H)OUsf9#8YV|Qek}2| zTPJAthTg&Sjf5S@<#$DMqGsk5@&kkb0RU8&Rd0LG#>URs#?;!}5{CQ?m1?)zFYyvK z|KP+hT~8XLC$wdZ+^UWLV!kGReQca8fhC)AnC$zKz9pO5rZofxulS(g_jNX1lQsZz z%xg7OX(|^x4_AWUG1$+ggx7Q0x7Oda^tM+-0OngIs9hd1NbLpv?* z^f6TL&r}yR4M#lmc3_Cx!3^|`T-lnMYHy6edzf%_TC(@$es$K$k-8_vRi+W?EF+M= z6);1iA#6Une>N}^k&Uqpm5REP{)j(YB}$XBts~Slyw)e=SUy&Q?8wLgYsfRRGj*8f z{#^JOGcOjYVv2(O=FtlI58PfA5ORQWLOLS``AZ@k$xz)c&Co|! z_S^oMjA%`LFCDaA^Cn2!WlfUc6jp5m%XvJ{3%hUyA|jtbHB_89Zc=c$%AbCvGG!(? zm2%_EU}F7Avhct^y*MJH|F}gn2<`HTU*D%5I6K>6JJmYd60#|EFtvQg?=vPQ^jyFeOaN`^{$$Oyl* z3pFKlHZ)Dp#0}j%duq^rYAL37cilHrQXOAIbUoMzBv6B8yy=E3?pv<>^=5iDu#zF$ z`_2Xei-TO2QRX{Dw^RfTi0^cbJRX9P^fpxTKF7D@;Q$;Qf+hZTmOAoh{_li#g4u(g z&LD;|FmuDwcp`1Z+&ahM2?LXSfm1*0%?Fh*=7!8jlPD%c`X4DW1KF3I`&)ItaKZCu z6`TU%y(!Mf@R~G$!d;ei8b3XOw|En4@k$mQv_5`4j`>RK>nbSqFp8u@%k-|g|1M)7 zN+a|U#G+Dy+-5H1mZOvvM<-K97jr9^J95|W#f#%0cOCAXC2eINxeTDhp}a*pgrDor z`|>G;m1uYGsYAYG4jd{@J`DtL;AtntH-KJds)I#`56<`M{tb{_tcRDmtjT)NNUMgOqpXJ<;lMN0J}b zsA^}@*b7K05=QGkt&i=Ezg*^s>DzA^ysNY(P836cdFr7B`%Jiz?>Zfpn)KuQXS|UT zsEdGG&Qn;h3EBHvPAb1=V$<(Z=r1oZCOjHJi7f3mkRQ2g7n0}ZVT` zXsiGFonSfXL2vt#ux`wi@%}#rt zl6`%vb)nLs{i}mIevB7~cVl(qSk3l~N2eEjJbeQM8r9aKJ7CHm=wZ{B1-mqoc z+tRB6Xt7+vqG3#|bkUsP2K_i6#zbX@C<4~By0}QGewrPV65JoQ;li7Bw`u6wt~yuL z@&NDWacSa9lfam7j4r1Vx7PVe&cL%6DPT5BOd$Z9j?dx zI&JJ9S40%HMc%cC_;d!v$x3)@X3NLl$K+Xo;T`DYgRop44k9Ps@ zm|4W>i52$%xAm&QMsp*+IbIt#Ckv9J%{V&P?i^az7(#)=UE$*Jp@BWAD}hbAl^S(r zTkxlJjODzh2Ma47WE9n>Lj0o*3pd2awjcBy!go=$KmXb~Ko2P>edMJ7tFZJHWMx(G zdid~z8Nh%y&E@mxEYGRwd7Zl`GCtgZN*ijs&L8+~WJDLtOBq&G2o2!#k{}{j*Hv-0 z)L1-DEIr3sS%+K;>sypqOlNNdV1FlU_B{^gQlY4C$Gog{vzG>PzDTX#LTa7ouS;^Y zvxGT&z&s!-innFev}JibeLRh|R9zO&i@biUaE;bT^Y%ODygSs3-Fz^Aa5SR{oMIv` zD(JO9h}URxeEeL0)1$Q1);2k+>&7IRM`Y7(3Ogg>w`U`I=xD!>yL}^ZWIHgw z?YY)<$Em%ibCuAG2!nd_4CC#fTF-Y%n$VhgfR$=i-aU$QkPQGOw` zQ3++&dv-f1pw=$ZyYe)Y^@Y?Baom~kX<$ETE2cH?A3REac*-@0Pt%V}gr)pWVoQMleV*(zWqNIxKH zaWIZ)bIa1aV1rd|(B>(0k8feYShQ^-(qQ{%Dx}~w*4oDrm#Kb(>Be_l8U=j0S zjvN}cL_v;ilp~X8OBb7Iuhx@;*wUQBD39D5t_2N({Bw`*-fkYPu?O{%D;V|I=W5D2 z%D_xCuJRGO90psz>r2d`n0xN8dd8r?-}t!P0o_5D4A`0rwYhH)(?}Z8tIQ)rNqWEC zv{NiMURJ=qu`z7MkWj{tCa58y=s870QEZn@1bfX9)l83^)teeeTSm@OLCP2R7Q>$? zx|R`plNsK;--uN^=sy(&k-iRsB;dZ+4iKHg{egQ~#L8#-JQYY0e@2R!;jbdrlD)02 zB~v=o;WqI$s;VCM$yK&5- zv)WJjolfrL`p+=A!R)38&PbO=v+ZozgUW28gc(}1`E~ObPv*sVs~%-hz5o1qjEXq+ zICO|+pHJn@ilwami=1!DViEgg&l4azlV7qUtk3~EiV4g%goGHX+a4sC!swEVH#in$HS`*FfW423-ppI|OuU`NwrYvUgy{%V{|VRED?4;Ei!L6to!D2m znnGwv0z!JdL+vYejSF4cBXcyi$JW z9ip4bok(k|xM@k7R++I|75tLx>r8a-Ej6jSMbEo>;+`I`{7FN9czHuSTph<)9nnaR z&3He3sL|JYpeXVKXxE5m9c}%is13C$Q|>e#4jkAnqZ>9=fb&e8KhA9#vbbDOg@Kka z)~;~uQ7OE;7ih8f`6JJgNYiL}#3oMSJPy6PU*nS#Vn7Q&c#?15Ep(GY!jRkYerE92 znf;Nu*IMm?ciQ3csmtmEj=o5OkE7~4+*_hHfK5`lHUoAli5-OI`Z}lIrj|?|x|5p9 z6SL_oV2Ur00l2#VEV^>78*(}lM7kFB?N)m}6uz=WBu6#ODw`BLNWUgQ+~D)pDOzq% z=PY=(-D8Blcf8W4ng#shyo#U!Nzwnm?}(KAuaOgS{_D3X^oI+H3P5!s{~Hbf~f9dj3d8qB&1&fg@kc5OHxx84``twYG`kn)TSIKrM#`=2WL2ct{ZL?8ZZ{YJGyvx zP*J2J6={WhAnN);UH_EVhbsREb^S5#2RxKj@%lnt|CHB`!D^b@M z>iVa=DxvjIIUf&I%MX=@@=$pp4~e?IDCk(EBCSgQCNDpu{%>FI{}-u9Mf#TY{Es_F zJ1-N5V5K4OK6QVU5%7Zy7XC-7Zea)%sYpdC(pQqK{QBS6q956_J8k3Y^*_wq$gTg? zHrI>wzk5l?lHPr7M$al1w^_wv_9U2+=j7QljhzL|9##0nc02};+V%AftzepOng3mn zRZM3#;{jakF`pO})FC336$t;0t1gf1j$?3XLH)`Vd)Nwuvh?pvbkD3IlEj_4= z9dAa1tD8>Rs?NCQIi6pcE!;(_;hOQxSih`cRrZPocIYV|wQS$9t263-9656vBq7Xy zi`O&5t6ed^RW+}|=g8yCK4sp7RguB4T4qEWjmw9Ws&GXUA02AZ8MUkalv$QJW#&hu z+X}M%a(Pop-A^V`N`eT0siqpn=H*SHrg30OaErm8Gv;M-W&b|_k+QRh{cp%UCWTmV z7ObcV6PE+FCYAY=2h4Y=$qF5Isr;8TtE{I}_hHOaPP2iy*w&?g-`U=xo99GK0qn}j@yMGhHppALaT(N+W*Zn0+Ig~1d> ztQ-1M$QDA2&$^3x1sn$Xt0a6o305cdxhZ6=bPB-Eba?BkYsBc;1P&7rNY?1@u zvmkUUvOftBLgv6L&G=W`W02}T$A0P?Mr~ccq|KB<6hmtgm%D^7=SCH~p}nFKK9Rza z7zxr)>3vH`ixqy-PDRcl*wW zU5Pgv3S5OX%ujGuduE*FT8a&dyTVxBzFCDnloFo{b$30WSfZ^M2bvjwmvJP9(W;RM z`>o=e9(RMk%ZL)2Gc%&XSohlzx%cSx{}T$qWoX8 z`~o^jNf7a_*p^bg!bGy5Z2`{xmPe7cQ+%rWJ`BK#I#Ns^kkr!0Ku3auiYDhuH)!_&?6i z0r#fI0^c}%ci0)U;9u|HxAs1{xF`hsS0w-08J)SHZL2dj;&d?LK(h3`!QF*Dyi@uA zbfcD!|88w<6#oCcq;rkvCYW2k4h}f*?m6kiSj3}vLSTn+BuHyxS&0OwrlUijm4JQ+BPX=RZvV?e< zdSFTf)Ha!6J6;T8x3;4mUpv5?Psv?}js$gvXyarl_hk`^9U1^4pJxzfSkU1CB5mNi z%v^h{iiuwadV7ycp9@WX=$*0!5kWefhh;+TQT($?ZGX-@n6u814!Okx4%{Kg%P80* zHkM1OFe zs(hiQN*4Oq@zO1T=}DGQmDITS4D;-edARf^IFg;Y%;P8+K){hPwYm}ey2_VlaneS4*c=tvo9<+ACVn4M$ZYm70Ev`hySS@IGEfC&eiOQ z%840rkz_~D2jkEdwmIY{;&ZLEN_!&FQ&e6_^k#ORfF6&;CXN?eBeIr|06XFFp}Eg$Rw2r;W9sWIlkTDoIP zkoUmB3B4S}qNylm?q$;vryn>?;Zn{DK9tP+=|sVlpB~4lO63g1J`8wiwydp?L1_^E za)d6#$!Fkx590}GwBD9E>$l*{6})!bPr9QKtd{`|kM)ntv%%;CZLFRMu5>NB5LCk+ zD{Dt}I^vwBPuQ*Zs-vX;u-^PkDa&`#Dm-iF+l4jdY-`HY3M{(@d3yY(GZ0e zVSuz-MGs;e8S zK+;uQ1gaUEx+DPh85)qAMIg74{yms+up)5|XhkUD?)-n3dI1O#DL6(fU_x?S^7!hg zz%~B=W}{Kh`v3JtvH$b$?f>t{nPE@Jkur36FeP1v@={nkhYmUps5%c}M)9=0ncVfz zo&XyIy>AEi9(m3vG^IlJm>*iw!#*BUE=|56wXn6(e<$Akd_J#y@B_!IfQN6Gv(Vt9 znccc@AENdUV@KD z@isn%4}lKTsVaDW^@s&vTvyTF1+9KcEhYD>r<{SsR8?AF$}Nj}i@fzy$OIry`3%^ea1;rQk|}!(A^>SN<0y0SkitKV{yp+dfnWb7eXt#y zUDbh(h*DxFY#{y|mx*x7yZ{#y91fQ;@RL10nFf$QkBZJXOc5xB4)e%!TtKod#Os{< zBWGsZq=RF6>UfzRmX@Z+k}sB>He~{rk6>cRy$di+p2G-}I4M#7n1fTql_Qs0#!Ive zmJ390jT8M#^YON1!bQ|`m=_q^#SM2!j987t|E!^a+sDPE8JMcO!)A_&TLw*8q07WV zp-Jsby>B%L(YC`G!*v1~7qA14%!iTCmBEM@4VddX=nY4hP~}%R{a7i%J61?hVMTed z1gpY}G0(wZCxaw(yI2F-McfKxOQahTU5J@}nT%7iiDX;SX)bHv5aL9wf zz;GA0ZsXeb|KL9N{tw&~{@<6RzY4|N1^KV1e?7nds{z-7{`Zl7Z+DM62m7ERbTtL^ zB8XP-e>E~I;%&sJlj;Q#wbDt<$Url#e>L#YT+wA)5(w$U_A#1ly5 zWXL^#F93!?hqztF48Ss*I&zcYh>hcAo;9CY<1(>kwPqG3KC9=Rl}P1RG6v&3NH!Jf zI(QZsWaK*@CB;^sV`|DRaZ@!)Bq8LqM=734@`0GuY) zFB6ZA#ocwKUpBVjRi*gI(u20-itX@{{0?E2~ z%~%6Oa!6?ap|OYsHIJckzbw}{)e|B4`{ZBX%Gr2eAzn&}$mV0UA`1Q-W@UwWv{-sv zBUNHCdrc^mK6#S-h|6PNgl2{E?#S4IyH_5av9)utWma#BewK-jK=r1CeNjpr--%GM zv0gJBd&C3rCNYdk4WLY(Joz}6havz~gl>b}jAlC$`!%EOK>1gj06Pw-;GwlhT99wa zQGDra%+E4zFyJRHwi-vCP^60BhBpPv|(913r@$2l8qlcX`NHL{wih0LG#^8x2NdT=|aMU+Sa*xHJSv zA3?6g;7a`rK)^~0rU7y>SY-R0gb;oQ3sTV&9Nppg?Ux$OVt`n~2ivwJaa5rlGz~ZCmV12!tWIauW(pTWZVq zjxM1MNC6EyziQt}d?#I*B)Asufl0R|xSkTMgj8TLGYb}RTo79^K#)bAUP4JcP7PQR zWwk&a8y=%0Jnq3?D=l|Ac?B)LyQ@ye09STeiCrut_U^2WOP@BN3^k9>q6sG~{xjq= zgbec!v()U)W~E5@?c0BuZL+s90k5(D>h=24`F|T*h5i3k#{b{QN%0Ei#O~guCYvve zlC_MrRE~eGwsqp{$mWqxRvJC8mGN{(X3t-b{$~_XcDT3DNXfu6bu%c4UVF?ATxcKF zaxUw!pVdcKg*BqC_f|9@?69d5T4Jv?j2qDrI7SXYevmEkjHI4PQ0&aLaL*xt$8FKW z^szqbSQ#u9d`eG{P$NeKPmsp!6ObNYW{W2;#IyZ8M+(Gvf%&bcZ;9|to2F4|r!bX+&8EA+XZ zL6Ck!jh?Gr==3kI)4%6!VOPrk0oo|c`^@qG^~UDX{_m}gV*l4y=Kn3*tTOns!f;)o zLB0XQRr_A~e}Jd4e#ZUZ_1g0O@6BTW&%Gq%Y9#Mq6>BGv+#VE>$~jvuW;j(rWcy28 zZhW82^*7{yJpWsM?(Oc{xOV@?()xdWqgI^%dM{}(a2F;ur))6LW%7ysASx9WNRuWc6bpLf~T4c7i8j5)ahlg z>zO#5!<=H=p)8lh7sQ3yP{A1nl-qJ^s0l4_e1o@flYwh$E)N zE(A@U%~=-xETcWc$9QF8Sn4?ZauSDuhrT%a0`V!1e5v9wkAOc7AmCY6VysE;0B%B6mE}+Ivjjt?5C-jXTf;}uaPSbzA?{EOIJF2mk6axclkPfr zW4$+Ra&+*j_eZPSCY>X4*gg2Yv(w%odg};&>t*ssr}yUIZI3{SZfn2yjvTxqt^If8 zx6b}fnY91su-iU5A_rZqvv;`LX~VP5{`T(MozDJi@)GLpAHY=Zz=Q^%y#s;`Do~yF z5dzz5cemfb&(_P%Zm0LIti9^=_7Uc*gDyCRhpld}v;B6r)g^~-yN3ryZD@W6!0vbU zUv;6C_Fj9xXF#j)jI@7;ALQswYj+o$(pqn!_g(BC**-XY*X_K1(<5&Vc6ZwF@MRl% z)_S?ymQBH+ws%{dy)xNp?X_OFMXdvX($%n->>c^zO&cF!doB2XyNB;;!!WiF_Iq9U zS%y(|dvVo2I!Em?X>~hCh>%y^gT1nb=!6;v0tnRGZ%ZhMYLcM{6v5wbkJ>RnveRzu z0xTfUeujL-iS|L!F{ih_Td_6Uz{JY|;S zinF*owmb|6w^wRCNA0~F$r7Mi%JCO##15}i%@K2%Jz}=WnLG+P`5IXC-B<1X`}afY zPqp-yZ*=1r!1Q%3RbJ&`d7;15(P=(3=Yg_0&Vx=4eoS)22Wm?)d@<+Z;||0a<<@AaqVbqBd~Ze5o{;zW-NpB$f?s9yGc{EB)JHYAR1+%Vrw8MT zc=DM1oKc*e`p#ZlEP9v$6n&T}F7>Xpx0{ssYy9snT+cyM!U&vOe)juQ`MwOs;s0^w35m*~fi-|K0k)>x6CHr9(GU!5Pe2u6!1JiYYmZLpjO?>;$R}X`u^HtSX4vA%iwXWl{?#waT3<`A zG^XL>K2-@Y)?a!&7IkReLe&{*iy?`(PvpN2}OtTq%=M?TlX4QTm^}r%8-AyJwW#!gi>u?}tX=t{nWzTxJCehJE>M>e%JG=x z>lwPh?E;9xO+oqSAntxbRl^Zzr&c8m3E`fmz{N5bn@sA4p5^!DcjkHEcndLsmOmm6 zM7QX$8k1E!4jjia#8fj>pZc0u^hv0nI;u)9?77?8e}})lu7Im^aF=ad8~=HlU;nK& z>c#rcy`<>9*~;(HhYagRP1~2>byb#OoTVm-b`;LQB3UFQ4ke#g-gbAJBw_P1xt7Vb zmQW29A^4jlqYub`<+Z@Cnv#8$S7Hq~6|K~^W0~G0pC&*vRj!w!nc|*7z#etb`lj_ z!B55mVk$kbVw>WHj%sA7f;&-o7T>jJQKTXjsYpdCQjv;Oq#_lmNJT1Ak&0BLA{D7f eMJiH}id3W`6{$!?DpHZ|EB!w^Jr3&t$N&H$W$PsX literal 0 HcmV?d00001 diff --git a/llments/lm/base/hugging_face.py b/llments/lm/base/hugging_face.py index a1d4cc3..a9d9737 100644 --- a/llments/lm/base/hugging_face.py +++ b/llments/lm/base/hugging_face.py @@ -216,6 +216,8 @@ def fit( prediction_loss_only: bool = False, optim: str = "adamw_torch", logging_steps: int = 500, + lora_r: int | None = None, + lora_alpha: int | None = None, ) -> LanguageModel: """Fit the language model to a target language model's distribution. @@ -239,6 +241,8 @@ def fit( prediction_loss_only: When performing evaluation and generating predictions, only returns the loss. optim: The optimizer to use. Can only choose from a list of names. logging_steps: Number of update steps between two logs if logging_strategy="steps". + lora_r: Lora attention dimension (the “rank”). + lora_alpha: The alpha parameter for Lora scaling. Returns: The fitted language model. @@ -285,6 +289,30 @@ def fit( ) eval_dataset = Dataset.from_dict(eval_inputs) + # wrap the base model with peft + if lora_r and lora_alpha: + try: + from peft import ( + LoraConfig, + get_peft_model, + prepare_model_for_kbit_training, + ) + except ImportError: + raise ImportError( + "You need to install 'peft' package to use this LORA functionality." + ) + lora_config = LoraConfig( + r=lora_r, + lora_alpha=lora_alpha, + # trainable layers: all linear layers between multihead attention + target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + model = prepare_model_for_kbit_training(base.model) + base.model = get_peft_model(model, lora_config) + training_args = TrainingArguments( output_dir=output_dir, do_train=do_train, From bac169f6f7d44f8c064eb9cc0e624261e8035889 Mon Sep 17 00:00:00 2001 From: Aakriti Kinra <52823721+AakritiKinra@users.noreply.github.com> Date: Mon, 23 Sep 2024 10:40:23 -0400 Subject: [PATCH 7/7] Added API Proxy functionality to API LM (#68) * added base_url * Updated function descriptions * Added api_base to the constructor * matched structure with lm class Pull latest changes --- llments/lm/base/api.py | 112 +++++++++++++++++++---------------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/llments/lm/base/api.py b/llments/lm/base/api.py index 4872fbb..eb2be5f 100644 --- a/llments/lm/base/api.py +++ b/llments/lm/base/api.py @@ -6,19 +6,19 @@ from llments.lm.lm import LanguageModel from litellm import completion, batch_completion, ModelResponse - -class APIBasedLM: +class APIBasedLM(LanguageModel): """Base class for API-Based Language Models. Represents a language model that interacts with an API for generating responses. Usage: - - Instantiate this class with the model name. + - Instantiate this class with the model name and the api endpoint - Set the API key of the language model as an environment variable for secure access. Attributes: model_name (str): The name of the language model. + api_base (str): The API endpoint to call the model. """ @abc.abstractmethod @@ -36,126 +36,118 @@ def calculate_probability(self, condition: str | None, output: str) -> float: raise NotImplementedError @abc.abstractmethod - def __init__(self, model_name: str) -> None: + def __init__(self, model_name: str, api_base: str) -> None: """Initialize the APIBasedLM instance. Args: model_name (str): The name of the language model. + api_base (str): The API endpoint to call the model. """ self.model_name = model_name + self.api_base = api_base @abc.abstractmethod def generate( self, - message: str, condition: str | None, do_sample: bool = False, max_length: int | None = None, max_new_tokens: int | None = None, temperature: float = 1.0, - num_return_sequences: int = 1, - ) -> list[str]: + num_return_sequences: int = 1 + ) -> list[str]: """Generate a response based on the given prompt. This method sends a prompt to the language model API and retrieves the generated response. Args: - message (str): The prompt for generating a response. condition (str): The conditioning sequence for the output. If None, the output is not conditioned. do_sample (bool): Whether to use sampling or greedy decoding. max_length (int): The maximum length of the output sequence, (defaults to model max). max_new_tokens (float): The maximum number of tokens to generate in the chat completion. - temperature (float): The sampling temperature to be used, between 0 and 2. + temperature (float): The sampling temperature to be used, between 0 and 2. num_return_sequences (int): The number of chat completion choices to generate for each input message. Returns: - ModelResponse: The generated response object from the language model. + str: Sampled output sequences from the language model. """ if condition is not None: - warnings.warn( - "A non-default value for 'condition' was provided.", UserWarning - ) + warnings.warn("A non-default value for 'condition' was provided.", UserWarning) if do_sample: - warnings.warn( - "A non-default value for 'do_sample' was provided.", UserWarning - ) + warnings.warn("A non-default value for 'do_sample' was provided.", UserWarning) if max_length is not None: - warnings.warn( - "A non-default value for 'max_length' was provided.", UserWarning - ) - + warnings.warn("A non-default value for 'max_length' was provided.", UserWarning) + responses = [] response = completion( - model=self.model_name, - temperature=temperature, - max_tokens=max_new_tokens, - n=num_return_sequences, - messages=[{"content": message, "role": "user"}], + model = self.model_name, + temperature = temperature, + max_tokens = max_new_tokens, + n = num_return_sequences, + api_base = self.api_base, + messages=[{"content": condition, "role": "user"}] ) - for choice in response["choices"]: - responses.append(choice["message"]["content"]) + for choice in response['choices']: + responses.append(choice['message']['content']) return responses @abc.abstractmethod def chat_generate( self, - messages: list[list[dict[str, str]]], - condition: str | None = None, + messages: list[dict[str, str]], do_sample: bool = False, max_length: int | None = None, max_new_tokens: int | None = None, temperature: float = 1.0, - num_return_sequences: int = 1, - ) -> list[list[str]]: + num_return_sequences: int = 1 + ) -> list[list[dict[str, str]]]: """Generate responses to multiple prompts using the batch_completion function. This method sends multiple prompts to the language model API and retrieves the generated response for each of the prompts. Args: - messages (list of list): The list of prompts, where each prompt contains a sequence of messages - with roles (either 'system' or 'user') and their corresponding content. - condition (str): The conditioning sequence for the output. - If None, the output is not conditioned. + messages: A list of dictionaries, each representing a message in the chat context. Each dictionary should contain the following keys: + - "role": The role of the entity sending the message. This can be "system", "user", etc. + - "content": The actual content of the message. Example: + [ + { + "role": "system", + "content": "You are a friendly chatbot", + }, + { + "role": "user", + "content": "How many helicopters can a human eat in one sitting?" + }, + ] do_sample (bool): Whether to use sampling or greedy decoding. max_length (int): The maximum length of the output sequence, (defaults to model max). max_new_tokens (float): The maximum number of tokens to generate in the chat completion. - temperature (float): The sampling temperature to be used, between 0 and 2. + temperature (float): The sampling temperature to be used, between 0 and 2. num_return_sequences (int): The number of chat completion choices to generate for each input message. Returns: - list: List of responses generated by the language model for all the prompts. + list[list[dict[str, str]]]: list of chat contexts with the generated responses. """ - if condition is not None: - warnings.warn( - "A non-default value for 'condition' was provided.", UserWarning - ) if do_sample: - warnings.warn( - "A non-default value for 'do_sample' was provided.", UserWarning - ) + warnings.warn("A non-default value for 'do_sample' was provided.", UserWarning) if max_length is not None: - warnings.warn( - "A non-default value for 'max_length' was provided.", UserWarning - ) - + warnings.warn("A non-default value for 'max_length' was provided.", UserWarning) + responses = batch_completion( - model=self.model_name, - temperature=temperature, - max_tokens=max_new_tokens, - n=num_return_sequences, - messages=messages, + model = self.model_name, + temperature = temperature, + max_tokens = max_new_tokens, + n = num_return_sequences, + api_base = self.api_base, + messages=messages ) - - return [ - [choice["message"]["content"] for choice in response["choices"]] - for response in responses - ] - + return [messages + [{"role": "assistant", "content": r}] for r in responses] + @abc.abstractmethod def set_seed(self, seed: int) -> None: """Set the seed for the language model. @@ -163,4 +155,4 @@ def set_seed(self, seed: int) -> None: Args: seed: The seed to set for the language model. """ - raise NotImplementedError + raise NotImplementedError \ No newline at end of file