Skip to content

Commit

Permalink
Added abstract functions for LM and distmod
Browse files Browse the repository at this point in the history
  • Loading branch information
rohanmodi2810 committed Feb 26, 2024
1 parent 17c7ae0 commit 3604178
Show file tree
Hide file tree
Showing 2 changed files with 116 additions and 0 deletions.
95 changes: 95 additions & 0 deletions llments/lm/distmod.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from llments.lm.lm import LanguageModel


class DistributionModifier:
def fit(
self,
base_model: LanguageModel,
target: LanguageModel,
task_description: str | None = None,
) -> LanguageModel:
"""Fit a language model to match another language model's probability distribution.
Args:
base_model (LanguageModel): The language model to be fitted.
target (LanguageModel): The target language model whose probability distribution the base model should match.
task_description (str | None, optional): A task description providing additional context about the fitting process. Defaults to None.
Returns:
LanguageModel: The fitted language model.
"""
base_model.fit(target, task_description)
return base_model

def prompt(self, base_model: LanguageModel, prompt_text: str) -> LanguageModel:
"""Modify the base model's distribution using a textual prompt.
Args:
base_model (LanguageModel): The language model to be modified.
prompt_text (str): The textual prompt to influence the model's output.
Returns:
LanguageModel: The modified language model.
"""
# Prompting implementation
# Integrate prompt_text into base_model generation process
return base_model

def reinforcement_learning(
self, base_model: LanguageModel, reward_function
) -> LanguageModel:
"""Apply reinforcement learning to modify a model based on the provided reward function.
Args:
base_model (LanguageModel): The language model to be modified.
reward_function (_type_): The reward function for the reinforcement learning process.
Returns:
LanguageModel: The modified language model.
"""
# Reinforcement Learning implementation
return base_model

def retrieval_augmented_generation(
self, base_model: LanguageModel, data
) -> LanguageModel:
"""Apply retrieval-augmented generation over a dataset to enhance the model's generation.
Args:
base_model (LanguageModel): The language model to be enhanced.
data (_type_): The dataset to be used for retrieval-augmented generation.
Returns:
LanguageModel: The enhanced language model.
"""
# RAG implementation
return base_model

def ensemble(
self, models: list[LanguageModel], weights: list[float]
) -> LanguageModel:
"""Combine several models into one by ensembling their outputs based on specified weights.
Args:
models (list[LanguageModel]): A list of language models to be ensembled.
weights (list[float]): A list of weights corresponding to each model.
Returns:
LanguageModel: A new ensembled language model.
"""
# ensembled_model = LanguageModel()
# Ensembling implementation
return models[0] # Ideally return the ensembled model

def filter(self, base_model: LanguageModel, filtering_rule) -> LanguageModel:
"""Filter down the model's space according to a filtering rule.
Args:
base_model (LanguageModel): The language model to be filtered.
filtering_rule (_type_): A rule or criteria to filter the model's space.
Returns:
LanguageModel: The filtered language model.
"""
# Filtering implementation
return base_model
21 changes: 21 additions & 0 deletions llments/lm/lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,24 @@ def fit(
The fitted language model.
"""
...

@abc.abstractmethod
def calculcate_probability(self, output: str) -> float:
"""Calculate the probability of an output given the language model.
Args:
output (str): The output sequence for which the probability is calculated.
Returns:
float: The probability of output x given the language model.
"""
...

@abc.abstractmethod
def sample(self) -> str:
"""Sample an output given the language model.
Returns:
str: A sampled output sequence from the language model.
"""
...

0 comments on commit 3604178

Please sign in to comment.