diff --git a/README.md b/README.md index 8397c605..e58d107f 100644 --- a/README.md +++ b/README.md @@ -278,13 +278,15 @@ dynamic benchmarks. ## Variables Here's a list of relevant env. variables that are used by AgentLab: -- `OPEAI_API_KEY` which is used by default for OpenAI LLMs. +- `OPENAI_API_KEY` which is used by default for OpenAI LLMs. - `AZURE_OPENAI_API_KEY`, used by default for AzureOpenAI LLMs. - `AZURE_OPENAI_ENDPOINT` to specify your Azure endpoint. - `OPENAI_API_VERSION` for the Azure API. - `OPENROUTER_API_KEY` for the Openrouter API - `AGENTLAB_EXP_ROOT`, desired path for your experiments to be stored, defaults to `~/agentlab-results`. - `AGENTXRAY_SHARE_GRADIO`, which prompts AgentXRay to open a public tunnel on launch. +- `RAY_PUBLIC_DASHBOARD` (true / false), used to specify whether the ray dashboard should be made publicly accessible (`0.0.0.0`) or not (`127.0.0.1`). +- `RAY_DASHBOARD_PORT` (int), used to specify the port on which the ray dashboard should be accessible. ## Misc diff --git a/src/agentlab/experiments/launch_exp.py b/src/agentlab/experiments/launch_exp.py index f14ae4c7..199454c5 100644 --- a/src/agentlab/experiments/launch_exp.py +++ b/src/agentlab/experiments/launch_exp.py @@ -9,6 +9,7 @@ from agentlab.experiments.loop import ExpArgs, yield_all_exp_results RAY_PUBLIC_DASHBOARD = os.environ.get("RAY_PUBLIC_DASHBOARD", "false") == "true" +RAY_DASHBOARD_PORT = os.environ.get("RAY_DASHBOARD_PORT") def run_experiments( @@ -86,7 +87,9 @@ def run_experiments( from agentlab.experiments.graph_execution_ray import execute_task_graph, ray ray.init( - num_cpus=n_jobs, dashboard_host="0.0.0.0" if RAY_PUBLIC_DASHBOARD else "127.0.0.1" + num_cpus=n_jobs, + dashboard_host="0.0.0.0" if RAY_PUBLIC_DASHBOARD else "127.0.0.1", + dashboard_port=None if RAY_DASHBOARD_PORT is None else int(RAY_DASHBOARD_PORT), ) try: execute_task_graph(exp_args_list, avg_step_timeout=avg_step_timeout)