Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,10 @@ build
dist
.nyunservice
tests/
nyuntam/text_generation/
*text_generation
/.nyunservices
/datasets
/jobs
/logs
/models
/.cache
13 changes: 13 additions & 0 deletions cli/cli.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import typer
import time
import psutil
from pathlib import Path
from version import __version__
from cli.docs import NYUN_TRADEMARK
Expand Down Expand Up @@ -85,6 +87,9 @@ def run(
You need to provide the path to the YAML or JSON script file you want to run.
The script will be executed within the initialized workspace.
"""
start_time = time.time()
process = psutil.Process(os.getpid())
before_memory = process.memory_info().rss / (1024 ** 2)
if not file_paths:
typer.echo("Please provide the path(s) to the script file.")
raise typer.Abort()
Expand Down Expand Up @@ -152,6 +157,14 @@ def run(
except Exception as e:
typer.echo(f"Failed: {str(e)}")
raise typer.Abort()
end_time = time.time()

after_memory = process.memory_info().rss / (1024 ** 2)
memory_used = after_memory - before_memory
print(f"Memory Used: {memory_used:.2f} MB")

execution_time = end_time - start_time
print(f"Execution Time: {execution_time:.6f} seconds")


@app.command(help="Show the version of the Nyun CLI.")
Expand Down
8 changes: 6 additions & 2 deletions nyuntam/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def create_from_json(

@classmethod
def create_from_yaml(
cls, path: Union[str, Path]
cls, path: Union[str, Path], flag_dict = True
) -> Optional[Union["Factory", None]]:
"""Create a Factory instance from a YAML file."""

Expand All @@ -145,7 +145,11 @@ def create_from_yaml(

with open(path, "r") as f:
args = yaml.safe_load(f)
return cls.create_from_dict(args)
if flag_dict:
return cls.create_from_dict(args)
else:
return args


@property
def algorithm(self) -> Algorithm:
Expand Down
5 changes: 4 additions & 1 deletion nyuntam/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from nyuntam.factory import Factory
from nyuntam.utils.logger import set_logger
from nyuntam.commands import get_args
from nyuntam.utils.benchmark import create_benchmarks

set_logger()

Expand All @@ -18,7 +19,8 @@ def main():
factory = Factory.create_from_yaml(args.yaml_path)
else:
factory = Factory.create_from_json(args.json_path)


arguments = Factory.create_from_yaml(args.yaml_path, flag_dict = False)
except Exception as e:
logging.exception(f"Failed to create Factory instance: {e}")
raise
Expand All @@ -32,6 +34,7 @@ def main():
try:
factory.run()
logging.info("Job completed successfully.")
create_benchmarks(arguments)
except Exception as e:
logging.exception(f"Failed to run job: {e}")
raise
Expand Down
49 changes: 49 additions & 0 deletions nyuntam/utils/benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import os
import logging

def create_benchmarks(args: dict):
# Define log file path
log_file_path = "/user_data/logs/compression_results.log"

# Configure logging to write to file
logging.basicConfig(
filename=log_file_path,
filemode="a", # Append mode
format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO
)

model_path = args["OUTPUT_PATH"]
quantised_model_path = args["MODEL_PATH"]
vendor = args["MODEL"].split("/")[0]
name = args["MODEL"].split("/")[1]

original_model_path = os.path.join(model_path, vendor, name, "model.safetensors")
quantised_model_path = os.path.join(quantised_model_path, "model.safetensors")

logging.info(f"Model Path: {original_model_path}")
logging.info(f"Quantised Model Path: {quantised_model_path}")

try:
original_size = os.path.getsize(original_model_path) / (1024**2) # Convert to MB
quantized_size = os.path.getsize(quantised_model_path) / (1024**2) # Convert to MB

compression_ratio = original_size / quantized_size if quantized_size > 0 else None

log_message = (
f"Original Model Size: {original_size:.2f} MB\n"
f"Quantized Model Size: {quantized_size:.2f} MB\n"
f"Compression Ratio: {compression_ratio:.2f}\n" if compression_ratio else "Error: Quantized model size is zero"
)

logging.info(log_message)

# Also write the results to a separate file
with open(log_file_path, "a") as log_file:
log_file.write("\n" + log_message + "\n" + "="*40 + "\n")

except FileNotFoundError as e:
logging.error(f"File not found: {e}")
except Exception as e:
logging.error(f"Unexpected error: {e}")