{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"Language-Model-SAEs","owner":"OpenMOSS","isFork":false,"description":"For OpenMOSS Mechanistic Interpretability Team's Sparse Autoencoder (SAE) research.","allTopics":["sparse-autoencoders","interpretability","sparse-dictionary","mechanistic-interpretability"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":3,"starsCount":31,"forksCount":6,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T17:15:54.747Z"}},{"type":"Public","name":"CoLLiE","owner":"OpenMOSS","isFork":false,"description":"Collaborative Training of Large Language Models in an Efficient Way","allTopics":["nlp","deep-learning","deepspeed","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":19,"starsCount":407,"forksCount":58,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-28T11:48:33.861Z"}},{"type":"Public","name":"AnyGPT","owner":"OpenMOSS","isFork":false,"description":"Code for \"AnyGPT: Unified Multimodal LLM with Discrete Sequence Modeling\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":13,"starsCount":745,"forksCount":59,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,14,1,0,5,0,11,5,4,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,9,0,0,0,1,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-27T13:34:35.711Z"}},{"type":"Public","name":"MOSS","owner":"OpenMOSS","isFork":false,"description":"An open-source tool-augmented conversational language model from Fudan University","allTopics":["natural-language-processing","deep-learning","text-generation","dialogue-systems","large-language-models","chatgpt"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":233,"starsCount":11923,"forksCount":1145,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-13T14:52:59.832Z"}},{"type":"Public","name":"HalluQA","owner":"OpenMOSS","isFork":false,"description":"Dataset and evaluation script for \"Evaluating Hallucinations in Chinese Large Language Models\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":106,"forksCount":4,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T13:32:51.620Z"}},{"type":"Public","name":"GAOKAO-MM","owner":"OpenMOSS","isFork":false,"description":"[ACL'2024 Findings] GAOKAO-MM: A Chinese Human-Level Benchmark for Multimodal Models Evaluation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":33,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-13T02:37:30.279Z"}},{"type":"Public","name":"Say-I-Dont-Know","owner":"OpenMOSS","isFork":false,"description":"[ICML'2024] Can AI Assistants Know What They Don't Know?","allTopics":["alignment","truthfulness","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":62,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-05T06:39:57.411Z"}}],"repositoryCount":7,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"OpenMOSS repositories"}