diff --git a/config_runner/configs/1_31_23/door/sacadrl.json b/config_runner/configs/1_31_23/door/sacadrl.json index d09fd05..cba8866 100644 --- a/config_runner/configs/1_31_23/door/sacadrl.json +++ b/config_runner/configs/1_31_23/door/sacadrl.json @@ -1,10 +1,4 @@ { - "num_agents": [[0,2]], - "eval_num_agents": [3, 4, 5, 7, 10, 20], - "train_length": 250000, - "ending_eval_trials": 25, - "eval_frequency": 0, - "intermediate_eval_trials": 25, "policy_algo_sb3_contrib": false, "policy_algo_name": "PPO", "policy_name": "MlpPolicy", diff --git a/log/.DS_Store b/log/.DS_Store new file mode 100644 index 0000000..df37fdf Binary files /dev/null and b/log/.DS_Store differ diff --git a/log/leaderboard.py b/log/leaderboard.py index 6ac90da..30058b7 100644 --- a/log/leaderboard.py +++ b/log/leaderboard.py @@ -1,9 +1,60 @@ -from flask import Flask, render_template +from flask import Flask, render_template, request import json import os +import threading +from queue import Queue +from pathlib import Path +import sys +parent_dir = Path(__file__).resolve().parent.parent +sys.path.insert(1, str(parent_dir)) +from config_runner.run import run + app = Flask(__name__) +configs = Queue() + +@app.route('/form') +def form(): + reports = get_eval_reports() + return render_template("form.html", reports=reports) + +@app.route('/upload', methods=['POST']) +def upload(): + # Access the uploaded file, and form fields + checkpoint_file = request.files['checkpoint'] + policy_algo_sb3_contrib = request.form['policy_algo_sb3_contrib'] + policy_algo_name = request.form['policy_algo_name'] + policy_name = request.form['policy_name'] + n_steps = request.form['n_steps'] + + # You can now save the file and use the form data to process further. + # This example only returns a confirmation message. + + # Process and save the file, etc... + # with open("log/template.json", 'r') as json_file: + # json_data = json.load(json_file) + + # os.makedirs(f"data", exist_ok=True) + # checkpoint_file.save(f"data/{policy_name}.zip") + + # json_data['policy_algo_name'] = policy_algo_name + # json_data['policy_name'] = policy_name + # json_data['continue_from'] = f"data/{policy_name}.zip" + # json_data['policy_algo_sb3_contrib'] = policy_algo_sb3_contrib + # json_data['policy_algo_kwargs'] = {"n_steps": n_steps} + + # os.makedirs("config_runner/configs/log", exist_ok=True) + # with open(f"config_runner/configs/log/{policy_name}.json", 'w') as updated_json_file: + # json.dump(json_data, updated_json_file, indent=2) + # configs.put(f"log/{policy_name}.json") + + return render_template('popup.html', + policy_algo_sb3_contrib=policy_algo_sb3_contrib, + policy_algo_name=policy_algo_name, + policy_name=policy_name, + n_steps=n_steps) + def get_eval_reports(): log_directory = os.path.join('..', 'log') reports = [] @@ -22,10 +73,21 @@ def get_eval_reports(): reports.append(report_data['3']) return reports -@app.route('/') +@app.route('/leaderboard') def leaderboard(): reports = get_eval_reports() return render_template('leaderboard.html', reports=reports) +def runner(): + print("Started Thread") + while True: + print("Waiting") + config = configs.get(block=True) + print("Got config: ", config) + run([config]) + if __name__ == "__main__": - app.run(debug=True, port=8000) \ No newline at end of file + runner = threading.Thread(target=runner) + runner.start() + app.run(debug=True, port=8080) + runner.join() \ No newline at end of file diff --git a/log/template.json b/log/template.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/log/template.json @@ -0,0 +1 @@ +{}