diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..6e56931 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,120 @@ +# Contributing + +Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. + +You can contribute in many ways: + +## Types of Contributions + +### Report Bugs + +Report bugs at https://github.com/cubenlp/openai_api_call/issues. + +If you are reporting a bug, please include: + +- Your operating system name and version. +- Any details about your local setup that might be helpful in troubleshooting. +- Detailed steps to reproduce the bug. + +### Fix Bugs + +Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. + +### Implement Features + +Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. + +### Write Documentation + +Openai API call could always use more documentation, whether as part of the +official Openai API call docs, in docstrings, or even on the web in blog posts, +articles, and such. + +### Submit Feedback + +The best way to send feedback is to file an issue at https://github.com/cubenlp/openai_api_call/issues. + +If you are proposing a feature: + +- Explain in detail how it would work. +- Keep the scope as narrow as possible, to make it easier to implement. +- Remember that this is a volunteer-driven project, and that contributions are welcome :) + +## Get Started! + +Ready to contribute? Here's how to set up `openai_api_call` for local development. + +1. Fork the `openai_api_call` repo on GitHub. +2. Clone your fork locally: + + ```shell + $ git clone git@github.com:your_name_here/openapi_api_call.git + ``` + +3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development: + + ```shell + $ mkvirtualenv openai_api_call + $ cd openai_api_call/ + $ python setup.py develop + ``` + +4. Create a branch for local development: + + ```shell + $ git checkout -b name-of-your-bugfix-or-feature + ``` + + Now you can make your changes locally. + +5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox: + + ```shell + $ flake8 openai_api_call tests + $ python setup.py test or pytest + $ tox + ``` + + To get flake8 and tox, just pip install them into your virtualenv. + +6. Commit your changes and push your branch to GitHub: + + ```shell + $ git add . + $ git commit -m "Your detailed description of your changes." + $ git push origin name-of-your-bugfix-or-feature + ``` + +7. Submit a pull request through the GitHub website. + +## Pull Request Guidelines + +Before you submit a pull request, check that it meets these guidelines: + +1. The pull request should include tests. +2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. +3. The pull request should work for Python 3.8 and 3.9, and for PyPy. Check https://github.com/cubenlp/openai_api_call/actions and make sure that the tests pass for all supported Python versions. + +## Tips + +To run a subset of tests: + +```shell +$ pytest tests.test_openai_api_call +``` + +## Deploying + +A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run: + +```shell +$ bump2version patch # possible: major / minor / patch +$ git push +$ git push --tags +``` + +Travis will then deploy to PyPI if tests pass. + +--- + +Thank you for reading and considering contributing to `openai_api_call`! \ No newline at end of file diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 0000000..0672273 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,17 @@ +# Contributors List + +- Rex (Main Developer) + - GitHub: [Rex's GitHub Profile](https://github.com/RexWzh) + +- Zhouhao (Code Tester) + - GitHub: [Zhouhao's GitHub Profile](https://github.com/zh-i9) + +- Shangqing (Code Tester) + - GitHub: [Shangqing's GitHub Profile](https://github.com/Qing25) + +- Binxuan (Code Tester) + - GitHub: [Binxuan's GitHub Profile](https://github.com/LuaMarke) + +Thank you all for your contributions! + +License: MIT \ No newline at end of file diff --git a/README.md b/README.md index 6e5b99d..f3419d3 100644 --- a/README.md +++ b/README.md @@ -155,4 +155,5 @@ This package is licensed under the MIT license. See the LICENSE file for more de ## update log - Since version `0.2.0`, `Chat` type is used to handle data -- Since version `0.3.0`, you can use different API Key to send requests. \ No newline at end of file +- Since version `0.3.0`, you can use different API Key to send requests. +- Since version `0.4.0`, this package is mantained by [cubenlp](https://github.com/cubenlp). \ No newline at end of file diff --git a/README_zh_CN.md b/README_zh_CN.md index f048eed..2f149d5 100644 --- a/README_zh_CN.md +++ b/README_zh_CN.md @@ -154,4 +154,5 @@ chat.show_usage_status() - 版本 `0.2.0` 改用 `Chat` 类型作为中心交互对象 - 版本 `0.3.0` 开始不依赖模块 `openai.py` ,而是直接使用 `requests` 发送请求 - 支持对每个 `Chat` 使用不同 API 密钥 - - 支持使用代理链接 \ No newline at end of file + - 支持使用代理链接 +- 版本 `0.4.0` 开始,工具维护转至 [CubeNLP](https://github.com/cubenlp) 组织账号 \ No newline at end of file diff --git a/openai_api_call/__init__.py b/openai_api_call/__init__.py index b8b44c4..6a575ba 100644 --- a/openai_api_call/__init__.py +++ b/openai_api_call/__init__.py @@ -2,11 +2,14 @@ __author__ = """Rex Wang""" __email__ = '1073853456@qq.com' -__version__ = '0.4.0' +__version__ = '0.4.1' import os from .chattool import Chat, Resp, chat_completion, usage_status from .proxy import proxy_on, proxy_off, proxy_status +import requests +from . import request + # read API key from the environment variable if os.environ.get('OPENAI_API_KEY') is not None: @@ -33,3 +36,50 @@ def default_prompt(msg:str): List[Dict]: default prompt message """ return [{"role": "user", "content": msg},] + +def show_base_url(): + """Show the base url of the API call""" + print(f"Base url:\t{request.base_url}") + +def debug_log( net_url:str="https://www.baidu.com" + , timeout:int=5 + , test_usage:bool=True + , test_response:bool=True): + # 1. Test whether the network is available + try: + requests.get(net_url, timeout=timeout) + except: + print("Warning: Network is not available.") + return False + + print("Your network is available.") + + # 2. Check the API key + print("\nPlease verify the API key:") + show_apikey() + + # 3. Check the proxy status + print("\nYour proxy status:") + proxy_status() + print("Note that, you don't need to set proxy if your `base_url` has done it!") + + # 4. Base url + print("\nCheck your base url:") + show_base_url() + if request.url is not None: + print("Warning: the `url` parameter is deprecated, please use `base_url` instead.") + + # 5. Get usage status + if test_usage: + print("\nThe usage status of your API key:") + Chat().show_usage_status(recent=3) + + # 6. Test hello world + if test_response: + print("\nTest hello world:") + chat = Chat("hello world") + chat.getresponse(max_requests=3) + chat.print_log() + + print("\nDebug is finished.") + return True \ No newline at end of file diff --git a/openai_api_call/chattool.py b/openai_api_call/chattool.py index 8f49317..136bd8f 100644 --- a/openai_api_call/chattool.py +++ b/openai_api_call/chattool.py @@ -192,7 +192,7 @@ def save(self, path:str, mode:str='a', end:str='\n'): assert mode in ['a', 'w'], "mode should be 'a' or 'w'" data = self.chat_log with open(path, mode, encoding='utf-8') as f: - f.write(json.dumps(data) + end) + f.write(json.dumps(data, ensure_ascii=False) + end) return True def print_log(self, sep: Union[str, None]=None): diff --git a/openai_api_call/request.py b/openai_api_call/request.py index 638c0b9..6eb87e3 100644 --- a/openai_api_call/request.py +++ b/openai_api_call/request.py @@ -81,7 +81,7 @@ def chat_completion(api_key:str, messages:List[Dict], model:str, **options) -> D chat_url = normalize_url(chat_url) # get response - response = requests.post(chat_url, headers=headers, data=json.dumps(payload)) + response = requests.post(chat_url, headers=headers, data=json.dumps(payload, ensure_ascii=False)) if response.status_code != 200: raise Exception(response.text) return response.json() diff --git a/setup.py b/setup.py index a3687d5..5128e03 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ with open('README.md') as readme_file: readme = readme_file.read() -VERSION = '0.4.0' +VERSION = '0.4.1' requirements = ['Click>=7.0', 'requests>=2.20'] diff --git a/tests/test_openai_api_call.py b/tests/test_openai_api_call.py index 03fe6b9..16f2449 100644 --- a/tests/test_openai_api_call.py +++ b/tests/test_openai_api_call.py @@ -5,6 +5,7 @@ import pytest from click.testing import CliRunner from openai_api_call import cli +from openai_api_call import debug_log @pytest.fixture @@ -32,3 +33,6 @@ def test_command_line_interface(): help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output + +def test_debug_log(): + assert debug_log(test_response=False, test_usage=False) \ No newline at end of file