Skip to content

Commit 2a7d376

Browse files
feat: add models-list command
1 parent 9682896 commit 2a7d376

3 files changed

Lines changed: 194 additions & 0 deletions

File tree

packages/uipath/src/uipath/_cli/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
"server": "cli_server",
4646
"register": "cli_register",
4747
"debug": "cli_debug",
48+
"list-models": "cli_list_models",
4849
"assets": "services.cli_assets",
4950
"buckets": "services.cli_buckets",
5051
"context-grounding": "services.cli_context_grounding",
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import click
2+
3+
from ._utils._service_base import ServiceCommandBase, service_command
4+
5+
6+
@click.command(name="list-models")
7+
@click.option(
8+
"--format",
9+
type=click.Choice(["json", "table", "csv"]),
10+
help="Output format (overrides global)",
11+
)
12+
@click.option("--output", "-o", type=click.Path(), help="Output file")
13+
@service_command
14+
async def list_models(ctx, format, output):
15+
"""List available LLM models."""
16+
client = ServiceCommandBase.get_client(ctx)
17+
return await client.agenthub.get_available_llm_models_async()
Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
"""Integration tests for the list-models CLI command.
2+
3+
These tests verify end-to-end functionality of the list-models command,
4+
including output formatting, error handling, and client invocation.
5+
"""
6+
7+
import json
8+
import os
9+
from unittest.mock import AsyncMock, MagicMock, patch
10+
11+
import pytest
12+
from click.testing import CliRunner
13+
14+
from uipath._cli import cli
15+
from uipath.platform.agenthub import LlmModel
16+
17+
18+
@pytest.fixture
19+
def runner():
20+
"""Provide a Click CLI test runner."""
21+
return CliRunner()
22+
23+
24+
@pytest.fixture
25+
def mock_client():
26+
"""Provide a mocked UiPath client with an async agenthub service."""
27+
with patch("uipath.platform._uipath.UiPath") as mock:
28+
client_instance = MagicMock()
29+
mock.return_value = client_instance
30+
31+
client_instance.agenthub = MagicMock()
32+
client_instance.agenthub.get_available_llm_models_async = AsyncMock()
33+
34+
yield client_instance
35+
36+
37+
def _make_models() -> list[LlmModel]:
38+
"""Build a small list of LlmModel instances."""
39+
return [
40+
LlmModel(modelName="gpt-4o-mini", vendor="openai"),
41+
LlmModel(modelName="claude-sonnet-4-5", vendor="anthropic"),
42+
]
43+
44+
45+
def test_list_models_basic(runner, mock_client, mock_env_vars):
46+
"""Default table output lists each model."""
47+
mock_client.agenthub.get_available_llm_models_async.return_value = _make_models()
48+
49+
result = runner.invoke(cli, ["list-models"])
50+
51+
assert result.exit_code == 0
52+
assert "gpt-4o-mini" in result.output
53+
assert "claude-sonnet-4-5" in result.output
54+
assert "openai" in result.output
55+
assert "anthropic" in result.output
56+
mock_client.agenthub.get_available_llm_models_async.assert_awaited_once()
57+
58+
59+
def test_list_models_empty(runner, mock_client, mock_env_vars):
60+
"""Empty result prints the formatter's no-results placeholder."""
61+
mock_client.agenthub.get_available_llm_models_async.return_value = []
62+
63+
result = runner.invoke(cli, ["list-models"])
64+
65+
assert result.exit_code == 0
66+
assert "No results" in result.output
67+
68+
69+
def test_list_models_json_format(runner, mock_client, mock_env_vars):
70+
"""--format json returns parseable JSON with model fields."""
71+
mock_client.agenthub.get_available_llm_models_async.return_value = _make_models()
72+
73+
result = runner.invoke(cli, ["list-models", "--format", "json"])
74+
75+
assert result.exit_code == 0
76+
payload = json.loads(result.output)
77+
assert isinstance(payload, list)
78+
assert {m["model_name"] for m in payload} == {"gpt-4o-mini", "claude-sonnet-4-5"}
79+
assert {m["vendor"] for m in payload} == {"openai", "anthropic"}
80+
81+
82+
def test_list_models_csv_format(runner, mock_client, mock_env_vars):
83+
"""--format csv emits a header row and one row per model."""
84+
mock_client.agenthub.get_available_llm_models_async.return_value = _make_models()
85+
86+
result = runner.invoke(cli, ["list-models", "--format", "csv"])
87+
88+
assert result.exit_code == 0
89+
lines = [line for line in result.output.splitlines() if line.strip()]
90+
# Header + 2 data rows
91+
assert len(lines) == 3
92+
assert "model_name" in lines[0]
93+
assert "vendor" in lines[0]
94+
assert any("gpt-4o-mini" in line for line in lines[1:])
95+
assert any("claude-sonnet-4-5" in line for line in lines[1:])
96+
97+
98+
def test_list_models_global_format_flag(runner, mock_client, mock_env_vars):
99+
"""Global --format flag on the cli group is honored."""
100+
mock_client.agenthub.get_available_llm_models_async.return_value = _make_models()
101+
102+
result = runner.invoke(cli, ["--format", "json", "list-models"])
103+
104+
assert result.exit_code == 0
105+
payload = json.loads(result.output)
106+
assert isinstance(payload, list)
107+
assert len(payload) == 2
108+
109+
110+
def test_list_models_output_to_file(runner, mock_client, mock_env_vars, tmp_path):
111+
"""--output writes the formatted result to the given file."""
112+
mock_client.agenthub.get_available_llm_models_async.return_value = _make_models()
113+
114+
out_file = tmp_path / "models.json"
115+
result = runner.invoke(
116+
cli,
117+
["list-models", "--format", "json", "--output", str(out_file)],
118+
)
119+
120+
assert result.exit_code == 0
121+
assert out_file.exists()
122+
payload = json.loads(out_file.read_text(encoding="utf-8"))
123+
assert {m["model_name"] for m in payload} == {"gpt-4o-mini", "claude-sonnet-4-5"}
124+
assert f"Output written to {out_file}" in result.output
125+
126+
127+
def test_list_models_missing_url(runner, monkeypatch):
128+
"""Missing UIPATH_URL surfaces an auth-configuration error."""
129+
monkeypatch.delenv("UIPATH_URL", raising=False)
130+
monkeypatch.setenv("UIPATH_ACCESS_TOKEN", "mock_token")
131+
132+
result = runner.invoke(cli, ["list-models"])
133+
134+
assert result.exit_code != 0
135+
assert "UIPATH_URL not configured" in result.output
136+
137+
138+
def test_list_models_missing_token(runner, monkeypatch):
139+
"""Missing UIPATH_ACCESS_TOKEN surfaces an auth-configuration error."""
140+
monkeypatch.setenv("UIPATH_URL", "https://cloud.uipath.com/org/tenant")
141+
monkeypatch.delenv("UIPATH_ACCESS_TOKEN", raising=False)
142+
143+
result = runner.invoke(cli, ["list-models"])
144+
145+
assert result.exit_code != 0
146+
assert "Authentication required" in result.output
147+
148+
149+
def test_list_models_service_error(runner, mock_client, mock_env_vars):
150+
"""Exceptions from the service are turned into click errors."""
151+
mock_client.agenthub.get_available_llm_models_async.side_effect = RuntimeError(
152+
"boom"
153+
)
154+
155+
result = runner.invoke(cli, ["list-models"])
156+
157+
assert result.exit_code != 0
158+
assert "boom" in result.output
159+
160+
161+
def test_list_models_help_text(runner):
162+
"""--help surfaces the command description and options."""
163+
result = runner.invoke(cli, ["list-models", "--help"])
164+
165+
assert result.exit_code == 0
166+
assert "List available LLM models" in result.output
167+
assert "--format" in result.output
168+
assert "--output" in result.output
169+
170+
171+
def test_list_models_registered_in_cli(runner):
172+
"""The command is wired into the top-level CLI group."""
173+
result = runner.invoke(cli, ["--help"])
174+
175+
assert result.exit_code == 0
176+
assert "list-models" in result.output

0 commit comments

Comments
 (0)