forked from AJaySi/AI-Writer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain_config
165 lines (125 loc) · 7.27 KB
/
main_config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
###################################################
#
# Define Blog Content charateristics:
# This is the main config file which drives the code.
# This config will restrict code modifications and hence ease of usuability.
#
###################################################
[blog_characteristics]
# Length of blogs Or word count. Note: It wont be exact and depends on GPT providers and Max token count.
blog_length = 3000
# company/brand-name
# professional, how-to, begginer, research, programming, casual, etc
blog_tone = "professional"
# Target Audience, Gen-Z, Tech-savvy, Working professional, students, kids etc
blog_demographic = "All"
# informational, commercial, company, news, finance, competitor, programming, scholar etc
blog_type = "Informational"
# Spanish, German, Chinese, Arabic, Nepali, Hindi, Hindustani etc
blog_language = "English"
# Specify the output format of the blog as: HTML, markdown, plaintext. Defaults to markdown.
blog_output_format = "markdown"
############################################################
#
# Blog Images details.
# Note: The images are created from the blog content. Blog title is used,
# the title is modified for image generation prompt.
#
############################################################
[img_details]
# Options are dalle2, dalle3, stable-diffusion.
image_gen_model = "stable-diffusion"
# Number of blog images to include.
num_images = 1
###########################################################
#
# Define LLM and its charateristics for fine control on output
# Note:
###########################################################
[llm_options]
# Choose one of following: Openai, Google, Minstral
gpt_provider = google
# Mention which model of the above provider to use.
model = gpt-3.5-turbo-0125
# Temperature is a parameter that controls the “creativity” or randomness of the text generated by GPT.
# greater determinism and higher values indicating more randomness.
# while a lower temperature (e.g., 0.2) makes the output more deterministic and focused (thus, getting flagged as AI content).
temperature = 0.6
# Top-p sampling is particularly useful in scenarios where you want to control the level of diversity in the generated text.
# By adjusting the threshold p, you can influence the diversity of the generated sequences.
# A lower top_p will lead to more diverse but potentially less coherent outputs,
# while a higher top_p will produce more conservative outputs with higher probability tokens.
top_p = 0.9
# "Max tokens" is a parameter that determines the maximum length of the output sequence generated by a model,
# usually measured in the number of tokens (words or subwords).
# It helps control the length of generated text and manage computational resources during text generation tasks.
max_tokens = 4096
# "n" represents the number of words or characters grouped together in a sequence when analyzing text.
# For example, if "n" is 2, we're looking at pairs of words (bigrams),
# if "n" is 3, we're looking at groups of three words (trigrams), and so on.
# It helps us understand patterns and relationships between words in a piece of text.
n = 1
# The frequency penalty parameter, ranging from -1 to 1, influences word selection during text generation.
# Higher values favor less common words, promoting diversity, while lower values favor common words, leading to more predictable text.
frequency_penalty = 1
# Presence Penalty encourages the use of diverse words by discouraging repetition.
# It encourages the model to avoid using the same words repeatedly and prompts it to generate varied text by suggesting,
# "Try using different words instead of repeating the same ones."
# from -2 (more flexible while generating text) to 2 (strong discouragement in repetition).
presence_penalty = 1
######################################################
#
# Search Engine Paramters.
# Alwrity does comprehensive web research for given content topic.
# Choose search engine parameters below, this finetunes search results
# and makes the generated content more accurate.
#
######################################################
# Visit https://serper.dev/playground and provide values from there.
# https://api.serper.dev/locations
[web_research]
# Geographic location(gl): This values restricts the web search to given country.
# Examples are us for United States, in for India, fr for france, cn for china etc
geo_location = us
# Locale:hl:language : Define the language you want to search results in.
# Example: en for english, zn-cn for chinese, de for german, hi for hindi etc
search_language: en
# num_results: Default 10 - Number of google search results to fetch.
num_results = 10
# time_range: Acceptable values, past day, past week, past month, past year
# This limits the search results for given time duration, from today.
time_range = anytime
# include_domains (Give Full URLs, separate by comma): A list of domains to specifically include in the search results.
# Default is None, which includes all domains. Example: https://wikipedia.com,https://stackoverflow.com,google schalor,reddit etc
include_domains =
# similar_url : A single URL, this will instruct search engines to give results similar to the given URL.
similar_url =
###########################################################
#
# Creating Your Virtual content writing Team.
#
# One can choose from the following roles and make a virtual team.
#
# Chief Editor - Oversees the research process and manages the team.
# Researcher (gpt-researcher) - A specialized autonomous agent that conducts in depth research on a given topic.
# Editor - Responsible for planning the research outline and structure.
# Reviewer - Validates the correctness of the research results given a set of criteria.
# Revisor - Revises the research results based on the feedback from the reviewer.
# Writer - Responsible for compiling and writing the final report.
# Publisher - Responsible for publishing the final report in various formats.
#
###########################################################
# Choose multi-agent framework, for now its default to Crewai. More options for langgraph, agentgpt, autogpt etc.
# Step1: Choose the team members from this list: chief_editor, researcher, editor, reviewer, writer, publisher
# Only the mentioned team members/agents will be included in your dream team.
your_content_team_members = chief_editor, researcher, editor, reviewer, writer
# Step2: Edit team members for your need in workspace/my_content_team folder.
# Personlization of AI team members for your needs/requirements. Imagine a real team and think along those lines.
# To get you started, refer these files in workspace folder, these are template Agents, one can modify for their company's content needs.
# Tip: Start small, define roles, goals and give backstory which makes Agents work for you.
# Run the example and keep a quality control. Change Persona of the agent, tone of voice, personality etc.
# Check if researcher is giving right results and check out of each Agent. Iterate and refine each agents, until they write to your
# requirements. This will take time but you will end up with a AI Agents content writing team for your own needs(Free, Free, Free).
# where_your_team_at = Provide the directory location where Team members are defined.
# Check with template files in workspace/my_content_team folder.
# where_your_team_at =