diff --git a/CLAUDE.md b/CLAUDE.md index ad14adb0..8c5743d9 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -84,13 +84,33 @@ The Roboflow Python SDK follows a hierarchical object model that mirrors the Rob - **rfapi** (`roboflow/adapters/rfapi.py`) - Low-level API communication - **deploymentapi** (`roboflow/adapters/deploymentapi.py`) - Model deployment operations -### CLI Interface - -The `roboflow` command line tool (`roboflow/roboflowpy.py`) provides: -- Authentication: `roboflow login` -- Dataset operations: `roboflow download`, `roboflow upload`, `roboflow import` -- Inference: `roboflow infer` -- Project/workspace management: `roboflow project`, `roboflow workspace` +### CLI Package (`roboflow/cli/`) + +The CLI is a modular package with auto-discovered handler modules. `roboflow/roboflowpy.py` is a backwards-compatibility shim that delegates to `roboflow.cli.main`. + +**Package structure:** +- `__init__.py` — Root parser with global flags (`--json`, `--workspace`, `--api-key`, `--quiet`), auto-discovery via `pkgutil.iter_modules`, custom `_CleanHelpFormatter`, and `_reorder_argv` for flexible flag positioning +- `_output.py` — `output(args, data, text)` for JSON/text output, `output_error(args, msg, hint, exit_code)` for structured errors, `suppress_sdk_output()` to silence SDK noise, `stub()` for unimplemented commands +- `_table.py` — `format_table(rows, columns)` for columnar list output +- `_resolver.py` — `resolve_resource(shorthand)` for parsing `project`, `ws/project`, `ws/project/3` +- `handlers/` — One file per command group (auto-discovered). `_aliases.py` registers backwards-compat top-level commands (loaded last) + +**Adding a new command:** +1. Create `roboflow/cli/handlers/mycommand.py` +2. Export `register(subparsers)` — it will be auto-discovered +3. Use lazy imports for heavy dependencies (inside handler functions, not at module top level) +4. Use `output()` for all output, `output_error()` for all errors +5. Wrap SDK calls in `with suppress_sdk_output():` to prevent "loading..." noise +6. Add tests in `tests/cli/test_mycommand_handler.py` + +**Agent experience requirements for all CLI commands:** +- Support `--json` for structured output (stable schema) +- No interactive prompts when all required flags are provided +- Structured error output: `{"error": {"message": "...", "hint": "..."}}` on stderr +- Exit codes: 0 = success, 1 = error, 2 = auth error, 3 = not found +- Actionable error messages: always tell the user what went wrong AND what to do + +**Documentation policy:** `CLI-COMMANDS.md` in this repo is a quickstart only. The full command reference lives in `roboflow-product-docs` (published to docs.roboflow.com). When adding commands, update both. ### Key Design Patterns @@ -98,12 +118,15 @@ The `roboflow` command line tool (`roboflow/roboflowpy.py`) provides: 2. **API Key Flow**: API key is passed down through the object hierarchy 3. **Format Flexibility**: Supports multiple dataset formats (YOLO, COCO, Pascal VOC, etc.) 4. **Batch Operations**: Upload and download operations support concurrent processing +5. **CLI Noun-Verb Pattern**: Commands follow `roboflow ` (e.g. `roboflow project list`). Common operations have top-level aliases (`login`, `upload`, `download`) +6. **CLI Auto-Discovery**: Handler modules in `roboflow/cli/handlers/` are loaded automatically — no registration list to maintain +7. **Backwards Compatibility**: Legacy command names and flag signatures are preserved as hidden aliases ## Project Configuration - **Python Version**: 3.8+ - **Main Dependencies**: See `requirements.txt` -- **Entry Point**: `roboflow=roboflow.roboflowpy:main` +- **Entry Point**: `roboflow=roboflow.roboflowpy:main` (shim delegates to `roboflow.cli.main`) - **Code Style**: Enforced by ruff with Google docstring convention - **Type Checking**: mypy configured for Python 3.8 diff --git a/CLI-COMMANDS.md b/CLI-COMMANDS.md index 1d24c277..fde4c621 100644 --- a/CLI-COMMANDS.md +++ b/CLI-COMMANDS.md @@ -1,310 +1,130 @@ -# The roboflow-python command line -This has the same capabilities of the [roboflow node cli](https://www.npmjs.com/package/roboflow-cli) so that our users don't need to install two different tools. +# Roboflow CLI -## See available commands +The `roboflow` command line tool provides access to the Roboflow platform for managing computer vision projects, datasets, models, and deployments. It's designed for both human developers and AI coding agents. -```bash -$ roboflow --help -``` - -``` -usage: roboflow [-h] {login,download,upload,import,infer,search-export,project,workspace} ... - -Welcome to the roboflow CLI: computer vision at your fingertips 🪄 - -options: - -h, --help show this help message and exit - -subcommands: - {login,download,upload,import,infer,search-export,project,workspace} - login Log in to Roboflow - download Download a dataset version from your workspace or Roboflow Universe. - upload Upload a single image to a dataset - import Import a dataset from a local folder - infer perform inference on an image - search-export Export search results as a dataset - project project related commands. type 'roboflow project' to see detailed command help - workspace workspace related commands. type 'roboflow workspace' to see detailed command help -``` +> **Full reference:** [docs.roboflow.com/deploy/sdks/python-cli](https://docs.roboflow.com/deploy/sdks/python-cli) -## Authentication - -You need to authenticate first +## Install & authenticate ```bash -$ roboflow login +pip install roboflow +export ROBOFLOW_API_KEY=rf_xxxxx # recommended for scripts and agents +roboflow auth login # or interactive login ``` -``` -visit https://app.roboflow.com/auth-cli to get your authentication token. -Paste the authentication token here: -``` -Open that link on your browser, get the token, paste it on the terminal. -The credentials get saved to `~/.config/roboflow/config.json` +## Global flags -## Display help usage for other commands +| Flag | Short | Description | +|------|-------|-------------| +| `--json` | `-j` | Structured JSON output (for agents and piping) | +| `--api-key` | `-k` | API key override | +| `--workspace` | `-w` | Workspace override | +| `--quiet` | `-q` | Suppress progress bars and status messages | +| `--version` | | Show version | -"How do I download stuff?" +Flags work in any position: `roboflow project list --json` and `roboflow --json project list` are equivalent. -```bash -$ roboflow download --help -``` -``` -usage: roboflow download [-h] [-f FORMAT] [-l LOCATION] datasetUrl - -positional arguments: - datasetUrl Dataset URL (e.g., `roboflow-100/cells-uyemf/2`) +## Quick examples -options: - -h, --help show this help message and exit - -f FORMAT Specify the format to download the version. Available options: [coco, yolov5pytorch, yolov7pytorch, my-yolov6, darknet, - voc, tfrecord, createml, clip, multiclass, coco-segmentation, yolo5-obb, png-mask-semantic, yolov8, yolov9] - -l LOCATION Location to download the dataset -``` - -"How do I import a dataset into my workspace?" +### Create a project and upload images ```bash -$ roboflow import --help -``` - -``` -usage: roboflow import [-h] [-w WORKSPACE] [-p PROJECT] [-c CONCURRENCY] [-f FORMAT] folder - -positional arguments: - folder filesystem path to a folder that contains your dataset - -options: - -h, --help show this help message and exit - -w WORKSPACE specify a workspace url or id (will use default workspace if not specified) - -p PROJECT project will be created if it does not exist - -c CONCURRENCY how many image uploads to perform concurrently (default: 10) - -n BATCH_NAME name of batch to upload to within project +roboflow project create my-project --type object-detection +roboflow image upload photo.jpg -p my-project +roboflow image upload ./dataset-folder/ -p my-project # smart: detects directory ``` -## Example: download dataset - -Download [Joseph's chess dataset](https://universe.roboflow.com/joseph-nelson/chess-pieces-new/dataset/25) from Roboflow Universe in VOC format: +### Download a dataset ```bash -$ roboflow download -f voc -l ~/tmp/chess joseph-nelson/chess-pieces-new/25 -``` +roboflow version download my-workspace/my-project/3 -f yolov8 +roboflow download my-workspace/my-project/3 -f coco # alias ``` -loading Roboflow workspace... -loading Roboflow project... -Downloading Dataset Version Zip in /Users/tony/tmp/chess to voc:: 100%|██████████████████████████| 19178/19178 [00:01<00:00, 10424.62it/s] -Extracting Dataset Version Zip to /Users/tony/tmp/chess in voc:: 100%|██████████████████████████████| 1391/1391 [00:00<00:00, 8992.30it/s] -``` -```bash -$ ls -lh ~/tmp/chess -total 16 --rw-r--r--@ 1 tony staff 1.8K Jan 5 10:32 README.dataset.txt --rw-r--r--@ 1 tony staff 562B Jan 5 10:32 README.roboflow.txt -drwxr-xr-x@ 60 tony staff 1.9K Jan 5 10:32 test -drwxr-xr-x@ 1214 tony staff 38K Jan 5 10:32 train -drwxr-xr-x@ 118 tony staff 3.7K Jan 5 10:32 valid -``` - -## Example: import a dataset - -Upload a dataset from a folder to a project in your workspace +### Run inference ```bash -roboflow import -w my-workspace -p my-chess ~/tmp/chess -``` - +roboflow infer photo.jpg -m my-project/3 ``` -loading Roboflow workspace... -loading Roboflow project... -Uploading to existing project my-workspace/my-chess -[UPLOADED] /home/jonny/tmp/chess/102_jpg.rf.205e2a0cb0fabbbf32b4a936e2d6f1e4.jpg (sFpTfnyLpLA8QcqPwdvf) / annotations = OK -[UPLOADED] /home/jonny/tmp/chess/2_jpg.rf.c1a4ed4e0c3947743b22ede09f7e1212.jpg (wDA2yxnLJWY5YwYwO7dP) / annotations = OK -[UPLOADED] /home/jonny/tmp/chess/221_jpg.rf.e841c9bbb31a135b8f6274643f522686.jpg (UCv7MeuvEqo7PYElatEn) / annotations = OK -[UPLOADED] /home/jonny/tmp/chess/10_jpg.rf.841f3ccdfc4b93ee68566e602025c03f.jpg (HnkCpUcYzxStvQF49VQW) / annotations = OK -[UPLOADED] /home/jonny/tmp/chess/130_jpg.rf.29f756d510d2e488eb5e12769c7707ff.jpg (WxrFIhfaJ9H1JvaXMgfF) / annotations = OK -[UPLOADED] /home/jonny/tmp/chess/112_jpg.rf.1a6e7b87410fa3f787f10e82bd02b54e.jpg (7tWtAn573cKrefeg5pIO) / annotations = OK -``` - -## Example: upload a single image -Upload a single image to a project, optionally with annotations, tags, and metadata: +### Search and export ```bash -roboflow upload image.jpg -p my-project -s train +roboflow search "tag:reviewed" --limit 100 +roboflow search "class:person" --export -f coco -l ./export/ ``` -Upload with custom metadata (JSON string): +### Browse resources ```bash -roboflow upload image.jpg -p my-project -M '{"camera_id":"cam001","location":"warehouse-3"}' +roboflow workspace list +roboflow project list +roboflow project get my-project +roboflow version list -p my-project +roboflow model list -p my-project ``` -Upload with annotation and tags: - -```bash -roboflow upload image.jpg -p my-project -a annotation.xml -t "outdoor,daytime" -s valid -``` +## JSON output for agents -## Example: list workspaces -List the workspaces you have access to +Every command supports `--json` for structured output that's safe to pipe: ```bash -$ roboflow workspace list +# stdout: JSON data, stderr: JSON errors, exit codes: 0/1/2/3 +roboflow --json project list | python3 -c "import sys,json; print(json.load(sys.stdin))" +roboflow --json project get nonexistent 2>/dev/null # stderr gets the error JSON ``` -``` -tonyprivate - link: https://app.roboflow.com/tonyprivate - id: tonyprivate +Error schema is consistent: `{"error": {"message": "...", "hint": "..."}}` -wolfodorpythontests - link: https://app.roboflow.com/wolfodorpythontests - id: wolfodorpythontests +## Resource shorthand -test minimize - link: https://app.roboflow.com/test-minimize - id: test-minimize -``` +Resources can be addressed with compact identifiers: -## Example: get workspace details +| Shorthand | Resolves to | +|-----------|-------------| +| `my-project` | default workspace + project | +| `my-ws/my-project` | explicit workspace + project | +| `my-project/3` | default workspace + project + version 3 | +| `my-ws/my-project/3` | explicit workspace + project + version 3 | -```bash -$ roboflow workspace get tonyprivate -``` +Version numbers are always numeric — that's how `x/y` is disambiguated between `workspace/project` and `project/version`. -``` -{ - "workspace": { - "name": "tonyprivate", - "url": "tonyprivate", - "members": 4, - "projects": [ - { - "id": "tonyprivate/annotation-upload", - "type": "object-detection", - "name": "annotation-upload", - "created": 1685199749.708, - "updated": 1695910515.48, - "images": 1, - (...) - } - ] - } -} -``` - -## Example: list projects - -```bash -roboflow project list -w tonyprivate -``` -``` -annotation-upload - link: https://app.roboflow.com/tonyprivate/annotation-upload - id: tonyprivate/annotation-upload - type: object-detection - versions: 0 - images: 1 - classes: dict_keys(['0', 'Rabbits1', 'Rabbits2', 'minion1', 'minion0', '5075E']) - -hand-gestures - link: https://app.roboflow.com/tonyprivate/hand-gestures-fsph8 - id: tonyprivate/hand-gestures-fsph8 - type: object-detection - versions: 5 - images: 387 - classes: dict_keys(['zero', 'four', 'one', 'two', 'five', 'three', 'Guard']) -``` - -## Example: get project details - -```bash -roboflow project get -w tonyprivate annotation-upload -``` -``` -{ - "workspace": { - "name": "tonyprivate", - "url": "tonyprivate", - "members": 4 - }, - "project": { - "id": "tonyprivate/annotation-upload", - "type": "object-detection", - "name": "annotation-upload", - "created": 1685199749.708, - "updated": 1695910515.48, - "images": 1, - (...) - }, - "versions": [] -} -``` - -## Example: run inference - -If your project has a trained model (or you are using a dataset from Roboflow Universe that has a trained model), you can run inference from the command line. - -Let's use [Rock-Paper-Scissors sample public dataset]([url](https://universe.roboflow.com/roboflow-58fyf/rock-paper-scissors-sxsw/model/11)) from Roboflow universe - -(In my case, `~/scissors.png` is me holding two fingers to the camera, you can use your own image file ;-)) +## All command groups -```bash -roboflow infer -w roboflow-58fyf -m rock-paper-scissors-sxsw/11 ~/scissors.png -``` -``` -{ - "x": 1230.0, - "y": 814.5, - "width": 840.0, - "height": 1273.0, - "confidence": 0.8817358016967773, - "class": "Scissors", - "class_id": 2, - "image_path": "/Users/tony/scissors.png", - "prediction_type": "ObjectDetectionModel" -} -``` - -## Example: search and export a dataset - -Use Roboflow's search to query images across your workspace and export matching results as a dataset. This is useful when you want to create a dataset from specific search criteria (e.g. images with a certain class, tag, or other metadata). - -```bash -$ roboflow search-export --help -``` -``` -usage: roboflow search-export [-h] [-f FORMAT] [-w WORKSPACE] [-l LOCATION] [-d DATASET] [-g ANNOTATION_GROUP] [-n NAME] [--no-extract] query - -positional arguments: - query Search query (e.g. 'tag:annotate' or '*') - -options: - -h, --help show this help message and exit - -f FORMAT Annotation format (default: coco) - -w WORKSPACE Workspace url or id (uses default workspace if not specified) - -l LOCATION Local directory to save the export - -d DATASET Limit export to a specific dataset (project slug) - -g ANNOTATION_GROUP Limit export to a specific annotation group - -n NAME Optional name for the export - --no-extract Skip extraction, keep the zip file -``` +| Command | Description | +|---------|-------------| +| `auth` | Login, logout, status, set default workspace | +| `workspace` | List and inspect workspaces | +| `project` | List, get, create projects | +| `version` | List, get, download, export dataset versions | +| `image` | Upload, get, search, tag, delete, annotate images | +| `model` | List, get, upload trained models | +| `train` | Start model training | +| `infer` | Run inference on images | +| `search` | Search workspace images (RoboQL), export results | +| `deployment` | Manage dedicated deployments | +| `workflow` | Manage workflows *(coming soon)* | +| `folder` | Manage project folders *(coming soon)* | +| `batch` | Batch processing jobs *(coming soon)* | +| `universe` | Browse Roboflow Universe *(coming soon)* | +| `video` | Video inference *(coming soon)* | +| `annotation` | Annotation batches and jobs *(coming soon)* | +| `completion` | Shell completion scripts *(coming soon)* | -Export all images tagged "annotate" in COCO format: +Run `roboflow --help` for details on any command. -```bash -$ roboflow search-export "tag:annotate" -``` +## Backwards compatibility -Export images containing a specific class, limited to one dataset, in COCO format: +All legacy command names still work: -```bash -$ roboflow search-export "class:person" -f coco -d my-dataset -l ~/exports/people -``` - -``` -Export started (id=abc123). Polling for completion... -Downloading search export to /Users/tony/exports/people: 100%|██████████| 5420/5420 [00:02<00:00, 2710.00it/s] -Search export extracted to /Users/tony/exports/people -``` +| Legacy | Current | +|--------|---------| +| `roboflow login` | `roboflow auth login` | +| `roboflow whoami` | `roboflow auth status` | +| `roboflow upload ` | `roboflow image upload ` | +| `roboflow import ` | `roboflow image upload ` | +| `roboflow download ` | `roboflow version download ` | +| `roboflow search-export` | `roboflow search --export` | +| `roboflow train` | `roboflow train start` | +| `roboflow deployment add` | `roboflow deployment create` | +| `roboflow deployment machine_type` | `roboflow deployment machine-type` | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index da367956..dfa08230 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -76,6 +76,55 @@ Before that, install the dependencies: python -m pip install mkdocs mkdocs-material mkdocstrings mkdocstrings[python] ``` +### CLI Development + +The CLI lives in `roboflow/cli/` with auto-discovered handler modules. To add a new command: + +1. Create `roboflow/cli/handlers/mycommand.py`: + +```python +"""My command description.""" +from __future__ import annotations +from typing import TYPE_CHECKING +if TYPE_CHECKING: + import argparse + +def register(subparsers: argparse._SubParsersAction) -> None: + parser = subparsers.add_parser("mycommand", help="Do something") + sub = parser.add_subparsers(title="mycommand commands") + + p = sub.add_parser("list", help="List things") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=_list) + + parser.set_defaults(func=lambda args: parser.print_help()) + +def _list(args: argparse.Namespace) -> None: + from roboflow.cli._output import output, output_error, suppress_sdk_output + + with suppress_sdk_output(): + try: + # ... your logic here ... + data = [{"id": "example"}] + except Exception as exc: + output_error(args, str(exc), hint="Check your project ID.", exit_code=3) + return + + output(args, data, text="Found 1 result.") +``` + +2. Add tests in `tests/cli/test_mycommand_handler.py` +3. Run `make check_code_quality` and `python -m unittest` + +**Agent experience checklist** (every command must satisfy): +- [ ] Supports `--json` via `output()` helper +- [ ] No interactive prompts when all required flags are provided +- [ ] Errors use `output_error(args, message, hint=..., exit_code=N)` +- [ ] SDK calls wrapped in `with suppress_sdk_output():` +- [ ] Exit codes: 0=success, 1=error, 2=auth, 3=not found + +**Documentation policy:** `CLI-COMMANDS.md` in this repo is a quickstart only. The comprehensive command reference lives in [`roboflow-product-docs`](https://github.com/roboflow/roboflow-product-docs) and is published to docs.roboflow.com. When adding a new command, update both: add a quick example to `CLI-COMMANDS.md` and the full reference to the product docs CLI page. + ### Pre-commit Hooks To ensure code quality and consistency, we use pre-commit hooks. Follow these steps to set up pre-commit in your development environment: diff --git a/roboflow/cli/__init__.py b/roboflow/cli/__init__.py new file mode 100644 index 00000000..0b7ff622 --- /dev/null +++ b/roboflow/cli/__init__.py @@ -0,0 +1,184 @@ +# PYTHON_ARGCOMPLETE_OK +"""Roboflow CLI — computer vision at your fingertips. + +This package implements the modular CLI for the Roboflow Python SDK. +Commands are auto-discovered from the ``handlers`` sub-package: any module +that exposes a ``register(subparsers)`` callable is loaded automatically. +""" + +from __future__ import annotations + +import argparse +import importlib +import pkgutil +import sys +from typing import Any + +import roboflow +from roboflow.cli import handlers as _handlers_pkg + + +class _CleanHelpFormatter(argparse.HelpFormatter): + """Custom formatter that hides SUPPRESS-ed subparser choices. + + The default argparse formatter includes *all* subparser names in the + ``{a,b,c,...}`` usage line and shows ``==SUPPRESS==`` in the command + list. This formatter filters both so that hidden legacy aliases are + truly invisible. + """ + + def _format_action(self, action: argparse.Action) -> str: + # Hide subparser entries whose help is SUPPRESS + if action.help == argparse.SUPPRESS: + return "" + return super()._format_action(action) + + def _metavar_formatter( + self, + action: argparse.Action, + default_metavar: str, + ) -> Any: + if isinstance(action, argparse._SubParsersAction): + # Filter choices to only those with visible help + visible = [ + name + for name, parser in action.choices.items() + if not any(ca.dest == name and ca.help == argparse.SUPPRESS for ca in action._choices_actions) + and name in [ca.dest for ca in action._choices_actions if ca.help != argparse.SUPPRESS] + ] + if visible: + + def _fmt(tuple_size: int) -> tuple[str, ...]: + result = "{" + ",".join(visible) + "}" + return (result,) * tuple_size if tuple_size > 1 else (result,) + + return _fmt + return super()._metavar_formatter(action, default_metavar) + + +def build_parser() -> argparse.ArgumentParser: + """Build the root argument parser with global flags and auto-discovered handlers.""" + parser = argparse.ArgumentParser( + prog="roboflow", + description="Roboflow CLI: computer vision at your fingertips", + formatter_class=_CleanHelpFormatter, + ) + + # --- global flags --- + parser.add_argument( + "--json", + "-j", + dest="json", + action="store_true", + default=False, + help="Output results as JSON (stable schema, for agents and piping)", + ) + parser.add_argument( + "--api-key", + "-k", + dest="api_key", + default=None, + help="API key override (default: $ROBOFLOW_API_KEY or config file)", + ) + parser.add_argument( + "--workspace", + "-w", + dest="workspace", + default=None, + help="Workspace URL or ID override (default: configured default)", + ) + parser.add_argument( + "--quiet", + "-q", + dest="quiet", + action="store_true", + default=False, + help="Suppress non-essential output (progress bars, status messages)", + ) + parser.add_argument( + "--version", + action="store_true", + default=False, + help="Show package version and exit", + ) + + # --- subcommands --- + subparsers = parser.add_subparsers(title="commands", dest="command") + + # Auto-discover handler modules (skip private modules starting with _) + for _importer, modname, _ispkg in pkgutil.iter_modules(_handlers_pkg.__path__): + if modname.startswith("_"): + continue + try: + mod = importlib.import_module(f"roboflow.cli.handlers.{modname}") + if hasattr(mod, "register"): + mod.register(subparsers) + except Exception as exc: # noqa: BLE001 + # A broken handler must not take down the entire CLI + import logging + + logging.getLogger("roboflow.cli").debug("Failed to load handler %s: %s", modname, exc) + + # Load aliases last so they can reference handler functions + from roboflow.cli.handlers import _aliases + + _aliases.register(subparsers) + + parser.set_defaults(func=None) + return parser + + +def _show_version(args: argparse.Namespace) -> None: + if getattr(args, "json", False): + import json + + print(json.dumps({"version": roboflow.__version__})) + else: + print(roboflow.__version__) + + +def _reorder_argv(argv: list[str]) -> list[str]: + """Move known global flags that appear after the subcommand to the front. + + argparse only recognises global flags when they appear *before* the + subcommand. Many users (and AI agents) naturally write them at the end, + e.g. ``roboflow project list --json``. This helper transparently + re-orders the argv so those flags are consumed by the root parser. + """ + # Note: -w is intentionally excluded — it collides with deployment's + # -w/--wait_on_pending (boolean). --workspace (long form) is safe. + global_flags_with_value = {"--api-key", "-k", "--workspace"} + global_flags_bool = {"--json", "-j", "--quiet", "-q", "--version"} + + reordered: list[str] = [] + rest: list[str] = [] + i = 0 + while i < len(argv): + arg = argv[i] + if arg in global_flags_bool: + reordered.append(arg) + elif arg in global_flags_with_value: + reordered.append(arg) + if i + 1 < len(argv): + i += 1 + reordered.append(argv[i]) + else: + rest.append(arg) + i += 1 + return reordered + rest + + +def main() -> None: + """CLI entry point.""" + parser = build_parser() + args = parser.parse_args(_reorder_argv(sys.argv[1:])) + + if args.version: + _show_version(args) + sys.exit(0) + + if args.func is not None: + args.func(args) + else: + parser.print_help() + sys.exit(0) diff --git a/roboflow/cli/_output.py b/roboflow/cli/_output.py new file mode 100644 index 00000000..48016a75 --- /dev/null +++ b/roboflow/cli/_output.py @@ -0,0 +1,129 @@ +"""Structured output helpers for the Roboflow CLI. + +Every command should use ``output()`` for its result and ``output_error()`` +for failures so that ``--json`` mode works uniformly. +""" + +from __future__ import annotations + +import contextlib +import io +import json +import sys +from typing import Any, Iterator, Optional + + +def output(args: Any, data: Any, text: Optional[str] = None) -> None: + """Print a command result in JSON or human-readable format. + + Parameters + ---------- + args: + The parsed argparse namespace (must have a ``json`` attribute). + data: + Structured data to emit when ``--json`` is active. Also used as + fallback when *text* is ``None``. + text: + Human-readable string printed in normal (non-JSON) mode. When + ``None``, *data* is pretty-printed as JSON regardless of mode. + """ + if getattr(args, "json", False): + print(json.dumps(data, indent=2, default=str)) + elif text is not None: + print(text) + else: + # Fallback: pretty-print data even in non-JSON mode + print(json.dumps(data, indent=2, default=str)) + + +def _parse_error_message(raw: str) -> tuple[Optional[dict[str, Any]], str]: + """Try to parse a raw error string that may contain embedded JSON. + + Returns ``(parsed_dict_or_None, human_readable_message)``. + The *parsed_dict* is the deserialized JSON when the string is JSON, + otherwise ``None``. The *human_readable_message* drills into nested + ``error.message`` structures so the text-mode output is clean. + """ + text = raw.strip() + # Strip status-code prefix like "404: {...}" + colon_idx = text.find(": {") + if 0 < colon_idx < 5: + text = text[colon_idx + 2 :] + try: + parsed = json.loads(text) + if isinstance(parsed, dict): + err = parsed.get("error", parsed) + if isinstance(err, dict): + human = str(err.get("message") or err.get("hint") or err) + else: + human = str(err) + return parsed, human + except (json.JSONDecodeError, TypeError, ValueError): + pass + return None, raw + + +def output_error( + args: Any, + message: str, + hint: Optional[str] = None, + exit_code: int = 1, +) -> None: + """Print an error and exit. + + Parameters + ---------- + args: + The parsed argparse namespace. + message: + What went wrong. + hint: + Actionable suggestion for the user / agent. + exit_code: + Process exit code. Convention: 1 = general, 2 = auth, 3 = not found. + """ + parsed, human_message = _parse_error_message(message) + + if getattr(args, "json", False): + # Normalise error to always be {"error": {"message": "..."}} so + # consumers see a consistent schema regardless of error source. + if parsed is not None and "error" in parsed: + inner: Any = parsed["error"] + elif parsed is not None: + inner = parsed + else: + inner = None + + if isinstance(inner, dict): + error_obj: dict[str, Any] = dict(inner) + error_obj.setdefault("message", human_message) + else: + error_obj = {"message": human_message} + + if hint: + error_obj.setdefault("hint", hint) + payload: dict[str, Any] = {"error": error_obj} + print(json.dumps(payload), file=sys.stderr) + else: + msg = f"Error: {human_message}" + if hint: + msg += f"\n Hint: {hint}" + print(msg, file=sys.stderr) + sys.exit(exit_code) + + +def stub(args: Any) -> None: + """Placeholder handler for not-yet-implemented commands.""" + output_error(args, "This command is not yet implemented.", hint="Coming soon.", exit_code=1) + + +@contextlib.contextmanager +def suppress_sdk_output(args: Any = None) -> Iterator[None]: + """Suppress SDK stdout noise (e.g. 'loading Roboflow workspace...'). + + Always active — the SDK's "loading Roboflow workspace..." messages + are not useful CLI output in any mode. The CLI controls its own + output via ``output()`` and ``output_error()``. + """ + with contextlib.redirect_stdout(io.StringIO()): + yield diff --git a/roboflow/cli/_resolver.py b/roboflow/cli/_resolver.py new file mode 100644 index 00000000..93317751 --- /dev/null +++ b/roboflow/cli/_resolver.py @@ -0,0 +1,115 @@ +"""Universal resource shorthand resolver. + +Parses compact resource identifiers into (workspace, project, version) +tuples, filling in the default workspace from configuration when omitted. + +Disambiguation rule: version numbers are always numeric. So ``x/y`` where +``y`` is numeric means project/version; where ``y`` is non-numeric means +workspace/project. + +Examples +-------- +- ``"my-project"`` → (default_ws, "my-project", None) +- ``"my-ws/my-project"`` → ("my-ws", "my-project", None) +- ``"my-project/3"`` → (default_ws, "my-project", 3) +- ``"my-ws/my-project/3"`` → ("my-ws", "my-project", 3) +""" + +from __future__ import annotations + +import os +from typing import Optional, Tuple + +from roboflow.config import get_conditional_configuration_variable + + +def resolve_default_workspace(api_key: Optional[str] = None) -> Optional[str]: + """Return the default workspace URL, querying the API if necessary. + + Checks (in order): ``RF_WORKSPACE`` in config/env, then the API + validation endpoint using the supplied *api_key* (or ``ROBOFLOW_API_KEY``). + """ + ws = get_conditional_configuration_variable("RF_WORKSPACE", default=None) + if ws: + return ws + + key = api_key or os.getenv("ROBOFLOW_API_KEY") + if not key: + return None + + import requests + + from roboflow.config import API_URL + + try: + resp = requests.post(API_URL + "/?api_key=" + key) + if resp.status_code == 200: + return resp.json().get("workspace") or None + except Exception: # noqa: BLE001 + pass + return None + + +def resolve_resource( + shorthand: str, + workspace_override: Optional[str] = None, +) -> Tuple[str, str, Optional[int]]: + """Parse a resource shorthand into (workspace, project, version). + + Parameters + ---------- + shorthand: + The compact identifier (see module docstring for formats). + workspace_override: + Explicit workspace from ``--workspace`` / ``-w``. Takes precedence + over the shorthand's workspace segment when the shorthand is + ambiguous (single segment). + + Returns + ------- + tuple[str, str, int | None] + ``(workspace_url, project_slug, version_number_or_none)`` + + Raises + ------ + ValueError + If the shorthand cannot be parsed or no workspace can be resolved. + """ + parts = shorthand.strip("/").split("/") + + default_ws = workspace_override or resolve_default_workspace() + + if len(parts) == 1: + # "my-project" + if not default_ws: + raise ValueError( + f"Cannot resolve '{shorthand}': no workspace specified and no default configured. " + "Use --workspace or run 'roboflow auth login'." + ) + return (default_ws, parts[0], None) + + if len(parts) == 2: + # Could be "workspace/project" OR "project/version" + if parts[1].isdigit(): + # "project/3" + if not default_ws: + raise ValueError( + f"Cannot resolve '{shorthand}': no workspace specified and no default configured. " + "Use --workspace or run 'roboflow auth login'." + ) + return (default_ws, parts[0], int(parts[1])) + # "workspace/project" + ws = workspace_override or parts[0] + return (ws, parts[1], None) + + if len(parts) == 3: + # "workspace/project/version" + if not parts[2].isdigit(): + raise ValueError(f"Cannot resolve '{shorthand}': expected numeric version but got '{parts[2]}'.") + ws = workspace_override or parts[0] + return (ws, parts[1], int(parts[2])) + + raise ValueError( + f"Cannot resolve '{shorthand}': expected 1-3 path segments " + "(project, workspace/project, or workspace/project/version)." + ) diff --git a/roboflow/cli/_table.py b/roboflow/cli/_table.py new file mode 100644 index 00000000..b02e9133 --- /dev/null +++ b/roboflow/cli/_table.py @@ -0,0 +1,79 @@ +"""Simple table formatter for CLI list commands. + +No external dependency — uses plain string formatting. Respects terminal +width when available and truncates long fields. +""" + +from __future__ import annotations + +import os +import shutil +from typing import Any, Dict, List, Optional, Sequence + + +def format_table( + rows: Sequence[Dict[str, Any]], + columns: Sequence[str], + headers: Optional[Sequence[str]] = None, + max_width: Optional[int] = None, +) -> str: + """Format a list of dicts as a columnar table. + + Parameters + ---------- + rows: + Each row is a dict whose keys match *columns*. + columns: + Ordered list of dict keys to include as columns. + headers: + Display names for each column. Defaults to *columns* with + title-casing and hyphens replaced by spaces. + max_width: + Terminal width cap. ``None`` means auto-detect. + + Returns + ------- + str + The formatted table string (without trailing newline). + """ + if not rows: + return "(no results)" + + if headers is None: + headers = [c.replace("_", " ").replace("-", " ").upper() for c in columns] + + # Stringify all cell values + str_rows: List[List[str]] = [] + for row in rows: + str_rows.append([str(row.get(c, "")) for c in columns]) + + # Compute column widths + col_widths = [len(h) for h in headers] + for sr in str_rows: + for i, cell in enumerate(sr): + col_widths[i] = max(col_widths[i], len(cell)) + + # Optionally clamp to terminal width + if max_width is None: + max_width = shutil.get_terminal_size((120, 24)).columns + # Leave room for column separators (2 spaces between columns) + total = sum(col_widths) + 2 * (len(columns) - 1) + if total > max_width and len(columns) > 1: + # Shrink the widest column proportionally + excess = total - max_width + widest_idx = col_widths.index(max(col_widths)) + col_widths[widest_idx] = max(col_widths[widest_idx] - excess, 10) + + def _truncate(s: str, width: int) -> str: + return s if len(s) <= width else s[: width - 1] + "\u2026" + + # Build lines + lines: list[str] = [] + header_line = " ".join(h.ljust(col_widths[i]) for i, h in enumerate(headers)) + lines.append(header_line) + lines.append(" ".join("-" * col_widths[i] for i in range(len(columns)))) + for sr in str_rows: + line = " ".join(_truncate(sr[i], col_widths[i]).ljust(col_widths[i]) for i in range(len(columns))) + lines.append(line) + + return os.linesep.join(lines) diff --git a/roboflow/cli/handlers/__init__.py b/roboflow/cli/handlers/__init__.py new file mode 100644 index 00000000..89c9cb1c --- /dev/null +++ b/roboflow/cli/handlers/__init__.py @@ -0,0 +1,8 @@ +"""Handler modules for the Roboflow CLI. + +Each module in this package that exposes a ``register(subparsers)`` function +is auto-discovered and loaded by ``roboflow.cli.build_parser()``. + +Modules whose names start with ``_`` (e.g. ``_aliases.py``) are *not* +auto-discovered — they are loaded explicitly after all other handlers. +""" diff --git a/roboflow/cli/handlers/_aliases.py b/roboflow/cli/handlers/_aliases.py new file mode 100644 index 00000000..1d9e816b --- /dev/null +++ b/roboflow/cli/handlers/_aliases.py @@ -0,0 +1,129 @@ +"""Top-level backwards-compatibility aliases. + +Registers convenience commands at the root level (``roboflow login``, +``roboflow upload``, etc.) that delegate to the canonical noun-verb handlers. + +This module is loaded *after* all other handlers by ``build_parser()`` so +that it can import their handler functions. +""" + +from __future__ import annotations + +import argparse + +# Use SUPPRESS to hide legacy aliases from --help output +_HIDDEN = argparse.SUPPRESS + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register top-level aliases for common commands.""" + + # --- roboflow login (visible alias for auth login) --- + from roboflow.cli.handlers.auth import _login + + login_p = subparsers.add_parser("login", help="Log in to Roboflow (alias for 'auth login')") + login_p.add_argument("--api-key", dest="login_api_key", default=None, help="API key (skip interactive login)") + login_p.add_argument("--force", "-f", action="store_true", help="Force re-login") + login_p.set_defaults(func=_login) + + # --- roboflow whoami (visible alias for auth status) --- + from roboflow.cli.handlers.auth import _status + + whoami_p = subparsers.add_parser("whoami", help="Show current user (alias for 'auth status')") + whoami_p.set_defaults(func=_status) + + # --- roboflow upload (visible alias for image upload) --- + from roboflow.cli.handlers.image import _handle_upload + + upload_p = subparsers.add_parser("upload", help="Upload images to a project (alias for 'image upload')") + upload_p.add_argument("path", help="Path to image file or directory") + upload_p.add_argument("-p", "--project", dest="project", help="Project ID (required)", required=True) + upload_p.add_argument("-a", "--annotation", dest="annotation", help="Path to annotation file") + upload_p.add_argument("-m", "--labelmap", dest="labelmap", help="Path to labelmap file") + upload_p.add_argument("-s", "--split", dest="split", default="train", help="Split (train/valid/test)") + upload_p.add_argument("-r", "--retries", dest="num_retries", type=int, default=0, help="Retry count") + upload_p.add_argument("-b", "--batch", dest="batch", help="Batch name") + upload_p.add_argument("-t", "--tag", dest="tag_names", help="Comma-separated tag names") + upload_p.add_argument("-M", "--metadata", dest="metadata", help="JSON metadata string") + upload_p.add_argument("-c", "--concurrency", dest="concurrency", type=int, default=10, help="Upload concurrency") + upload_p.add_argument("--is-prediction", dest="is_prediction", action="store_true", help="Mark as prediction") + upload_p.set_defaults(func=_handle_upload) + + # --- roboflow import (hidden alias for image upload with directory) --- + from roboflow.cli.handlers.image import _handle_upload as _handle_import + + import_p = subparsers.add_parser("import", help="Import dataset from folder (alias for 'image upload')") + import_p.add_argument("path", metavar="folder", help="Path to dataset folder") + import_p.add_argument("-p", "--project", dest="project", help="Project ID (required)", required=True) + import_p.add_argument("-c", "--concurrency", dest="concurrency", type=int, default=10, help="Upload concurrency") + import_p.add_argument("-n", "--batch-name", dest="batch", help="Batch name") + import_p.add_argument("-r", "--retries", dest="num_retries", type=int, default=0, help="Retry count") + import_p.set_defaults(func=_handle_import) + + # --- roboflow download (visible alias for version download) --- + from roboflow.cli.handlers.version import _download + + download_p = subparsers.add_parser("download", help="Download a dataset version (alias for 'version download')") + download_p.add_argument("url_or_id", metavar="datasetUrl", help="Dataset URL (e.g. workspace/project/version)") + download_p.add_argument("-f", "--format", dest="format", default="voc", help="Export format") + download_p.add_argument("-l", "--location", dest="location", help="Download location") + download_p.set_defaults(func=_download) + + # --- roboflow search-export (hidden alias for search --export) --- + from roboflow.cli.handlers.search import _search as _search_handler + + search_export_p = subparsers.add_parser("search-export", help=_HIDDEN) + search_export_p.add_argument("query", help="Search query (e.g. 'tag:annotate' or '*')") + search_export_p.add_argument("-f", dest="format", default="coco", help="Annotation format") + search_export_p.add_argument("-l", dest="location", help="Local directory for export") + search_export_p.add_argument("-d", dest="dataset", help="Limit to specific dataset") + search_export_p.add_argument("-g", dest="annotation_group", help="Limit to annotation group") + search_export_p.add_argument("-n", dest="name", help="Export name") + search_export_p.add_argument( + "--no-extract", dest="no_extract", action="store_true", help="Keep zip, skip extraction" + ) + search_export_p.set_defaults(func=_search_handler, export=True) # Force --export mode + + # --- roboflow upload_model (hidden alias for model upload) --- + from roboflow.cli.handlers.model import _upload_model + + upload_model_p = subparsers.add_parser("upload_model", help=_HIDDEN) + upload_model_p.add_argument("-a", dest="api_key", help="API key") + upload_model_p.add_argument("-p", dest="project", action="append", help="Project ID") + upload_model_p.add_argument("-v", dest="version_number", type=int, default=None, help="Version number") + upload_model_p.add_argument("-t", dest="model_type", help="Model type") + upload_model_p.add_argument("-m", dest="model_path", help="Model file path") + upload_model_p.add_argument("-f", dest="filename", default="weights/best.pt", help="Model filename") + upload_model_p.add_argument("-n", dest="model_name", help="Model name") + upload_model_p.set_defaults(func=_upload_model) + + # --- roboflow get_workspace_info (hidden alias, preserved) --- + get_ws_info_p = subparsers.add_parser("get_workspace_info", help=_HIDDEN) + get_ws_info_p.add_argument("-a", dest="api_key", help="API key") + get_ws_info_p.add_argument("-p", dest="project", help="Project ID") + get_ws_info_p.add_argument("-v", dest="version_number", type=int, help="Version number") + get_ws_info_p.set_defaults(func=_get_workspace_info_compat) + + # --- roboflow run_video_inference_api (hidden alias for video infer) --- + from roboflow.cli.handlers.video import _video_infer + + video_api_p = subparsers.add_parser("run_video_inference_api", help=_HIDDEN) + video_api_p.add_argument("-a", dest="api_key", help="API key") + video_api_p.add_argument("-p", dest="project", help="Project ID") + video_api_p.add_argument("-v", dest="version_number", type=int, help="Version number") + video_api_p.add_argument("-f", dest="video_file", help="Video file path") + video_api_p.add_argument("-fps", dest="fps", type=int, default=5, help="FPS") + video_api_p.set_defaults(func=_video_infer) + + +def _get_workspace_info_compat(args: argparse.Namespace) -> None: + """Backwards-compat handler for the old get_workspace_info command.""" + import roboflow + + rf = roboflow.Roboflow(args.api_key) + workspace = rf.workspace() + print("workspace", workspace) + project = workspace.project(args.project) + print("project", project) + version = project.version(args.version_number) + print("version", version) diff --git a/roboflow/cli/handlers/annotation.py b/roboflow/cli/handlers/annotation.py new file mode 100644 index 00000000..862b5995 --- /dev/null +++ b/roboflow/cli/handlers/annotation.py @@ -0,0 +1,75 @@ +"""Annotation management commands: batch and job operations (stubs).""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + +from roboflow.cli._output import stub + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``annotation`` command group.""" + ann_parser = subparsers.add_parser("annotation", help="Annotation management commands") + ann_sub = ann_parser.add_subparsers(title="annotation commands", dest="annotation_command") + + _add_batch(ann_sub) + _add_job(ann_sub) + + ann_parser.set_defaults(func=lambda args: ann_parser.print_help()) + + +# --------------------------------------------------------------------------- +# batch +# --------------------------------------------------------------------------- + + +def _add_batch(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + batch_parser = sub.add_parser("batch", help="Annotation batch commands") + batch_sub = batch_parser.add_subparsers(title="batch commands", dest="batch_command") + + # batch list + p = batch_sub.add_parser("list", help="List annotation batches") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=stub) + + # batch get + p = batch_sub.add_parser("get", help="Get annotation batch details") + p.add_argument("batch_id", help="Batch ID") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=stub) + + batch_parser.set_defaults(func=lambda args: batch_parser.print_help()) + + +# --------------------------------------------------------------------------- +# job +# --------------------------------------------------------------------------- + + +def _add_job(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + job_parser = sub.add_parser("job", help="Annotation job commands") + job_sub = job_parser.add_subparsers(title="job commands", dest="job_command") + + # job list + p = job_sub.add_parser("list", help="List annotation jobs") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=stub) + + # job get + p = job_sub.add_parser("get", help="Get annotation job details") + p.add_argument("job_id", help="Job ID") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=stub) + + # job create + p = job_sub.add_parser("create", help="Create an annotation job") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.add_argument("--name", required=True, help="Job name") + p.add_argument("--batch", default=None, help="Batch ID to assign") + p.add_argument("--assignees", default=None, help="Comma-separated assignee emails") + p.set_defaults(func=stub) + + job_parser.set_defaults(func=lambda args: job_parser.print_help()) diff --git a/roboflow/cli/handlers/auth.py b/roboflow/cli/handlers/auth.py new file mode 100644 index 00000000..0cac073e --- /dev/null +++ b/roboflow/cli/handlers/auth.py @@ -0,0 +1,280 @@ +"""Auth commands: login, logout, status, set-workspace.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``auth`` command group.""" + auth_parser = subparsers.add_parser("auth", help="Manage authentication and credentials") + auth_sub = auth_parser.add_subparsers(title="auth commands", dest="auth_command") + + # --- auth login --- + login_p = auth_sub.add_parser("login", help="Log in to Roboflow") + login_p.add_argument( + "--api-key", + dest="login_api_key", + default=None, + help="API key (skip interactive prompt)", + ) + login_p.add_argument( + "--workspace", + dest="login_workspace", + default=None, + help="Set default workspace during login", + ) + login_p.add_argument( + "--force", + "-f", + action="store_true", + default=False, + help="Force re-login even if already logged in", + ) + login_p.set_defaults(func=_login) + + # --- auth status --- + status_p = auth_sub.add_parser("status", help="Show current auth status") + status_p.set_defaults(func=_status) + + # --- auth set-workspace --- + sw_p = auth_sub.add_parser("set-workspace", help="Set the default workspace") + sw_p.add_argument("workspace_id", help="Workspace URL or ID to set as default") + sw_p.set_defaults(func=_set_workspace) + + # --- auth logout --- + logout_p = auth_sub.add_parser("logout", help="Remove stored credentials") + logout_p.set_defaults(func=_logout) + + # Default: show help when no subcommand given + auth_parser.set_defaults(func=lambda args: auth_parser.print_help()) + + +def _get_config_path() -> str: + import os + from pathlib import Path + + if os.name == "nt": + default_path = str(Path.home() / "roboflow" / "config.json") + else: + default_path = str(Path.home() / ".config" / "roboflow" / "config.json") + return os.getenv("ROBOFLOW_CONFIG_DIR", default=default_path) + + +def _load_config() -> dict: + import json + import os + + path = _get_config_path() + if os.path.exists(path): + with open(path) as f: + return json.load(f) + return {} + + +def _save_config(config: dict) -> None: + import json + import os + import stat + + path = _get_config_path() + os.makedirs(os.path.dirname(path), exist_ok=True) + # Write with owner-only permissions (0600) since the file contains API keys + fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, stat.S_IRUSR | stat.S_IWUSR) + with os.fdopen(fd, "w") as f: + json.dump(config, f, indent=2) + + +def _mask_key(key: str) -> str: + if not key or len(key) <= 4: + return "****" + return key[:2] + "*" * (len(key) - 4) + key[-2:] + + +def _login(args: argparse.Namespace) -> None: + from roboflow.cli._output import output, output_error + + api_key = getattr(args, "login_api_key", None) or getattr(args, "api_key", None) + workspace_id = getattr(args, "login_workspace", None) or getattr(args, "workspace", None) + force = getattr(args, "force", False) + + if api_key: + # Non-interactive: validate key and fetch workspace info + import requests + + from roboflow.config import API_URL + + resp = requests.post(API_URL + "/?api_key=" + api_key) + if resp.status_code == 401: + output_error(args, "Invalid API key.", hint="Check your key at app.roboflow.com/settings", exit_code=2) + return + if resp.status_code != 200: + output_error(args, f"API error ({resp.status_code}).", exit_code=1) + return + + r_login = resp.json() + if r_login is None: + output_error(args, "Invalid API key.", exit_code=2) + return + + # The validation endpoint returns {"workspace": "", ...} + ws_url = workspace_id or r_login.get("workspace", "") + if not ws_url: + output_error(args, "Could not determine workspace.", hint="Pass --workspace explicitly.", exit_code=1) + return + + # Fetch workspace name from the API + ws_name = ws_url + try: + from roboflow.adapters import rfapi + + ws_json = rfapi.get_workspace(api_key, ws_url) + ws_detail = ws_json.get("workspace", ws_json) + ws_name = ws_detail.get("name", ws_url) + except Exception: # noqa: BLE001 + pass # Fall back to using the URL as the name + + # Build config with workspace info + config = _load_config() + workspaces = config.get("workspaces", {}) + workspaces[ws_url] = {"url": ws_url, "name": ws_name, "apiKey": api_key} + config["workspaces"] = workspaces + config["RF_WORKSPACE"] = ws_url + _save_config(config) + + note = "" + if len(workspaces) == 1: + note = "\n Note: API key login stores only the key's workspace. Use interactive login for all workspaces." + output( + args, + {"status": "logged_in", "workspace": ws_url, "api_key": _mask_key(api_key)}, + text=f"Logged in. Default workspace: {ws_url}{note}", + ) + else: + # Interactive flow + import roboflow + + conf_path = _get_config_path() + import os + + if os.path.isfile(conf_path) and not force: + # Already logged in — show status + config = _load_config() + ws = config.get("RF_WORKSPACE", "unknown") + output( + args, + {"status": "logged_in", "workspace": ws, "api_key": "****"}, + text=f"Already logged in. Default workspace: {ws}\nUse --force to re-login.", + ) + return + + roboflow.login(workspace=workspace_id, force=force) + # Re-read config after interactive login + config = _load_config() + ws = config.get("RF_WORKSPACE", "unknown") + output( + args, + {"status": "logged_in", "workspace": ws, "api_key": "****"}, + text=f"Logged in. Default workspace: {ws}", + ) + + +def _status(args: argparse.Namespace) -> None: + import os + + from roboflow.cli._output import output, output_error + + config = _load_config() + workspaces = config.get("workspaces", {}) + default_ws_url = config.get("RF_WORKSPACE") + + # Explicit --api-key flag takes priority, then env var + explicit_api_key = getattr(args, "api_key", None) + api_key = explicit_api_key or os.getenv("ROBOFLOW_API_KEY") + + # When an explicit --api-key is provided, always validate it against the + # API rather than showing saved config — the user wants to check *this* key. + if explicit_api_key or (api_key and not default_ws_url): + import requests + + from roboflow.config import API_URL + + assert api_key is not None # guaranteed by the condition above + resp = requests.post(API_URL + "/?api_key=" + api_key) + if resp.status_code == 200: + ws_url = resp.json().get("workspace", "unknown") + data = {"url": ws_url, "name": ws_url, "apiKey": _mask_key(api_key)} + lines = [ + f"Workspace: {ws_url}", + f" URL: {ws_url}", + f" API Key: {_mask_key(api_key)}", + " (authenticated via --api-key or ROBOFLOW_API_KEY)", + ] + output(args, data, text="\n".join(lines)) + else: + output_error(args, "API key is invalid or expired.", exit_code=2) + return + + if not workspaces and not default_ws_url and not api_key: + output_error(args, "Not logged in.", hint="Run 'roboflow auth login' to authenticate.", exit_code=2) + return # unreachable, but helps mypy + + if not default_ws_url: + output_error(args, "No default workspace configured.", hint="Run 'roboflow auth set-workspace '.") + return # unreachable, but helps mypy + + workspaces_by_url = {w["url"]: w for w in workspaces.values()} + default_ws = workspaces_by_url.get(default_ws_url) + + if default_ws: + # Use stored API key, or fall back to env var + display_key = api_key or default_ws.get("apiKey", "") + masked = dict(default_ws) + masked["apiKey"] = _mask_key(display_key) + lines = [ + f"Workspace: {masked.get('name', 'unknown')}", + f" URL: {masked.get('url', 'unknown')}", + f" API Key: {masked['apiKey']}", + ] + output(args, masked, text="\n".join(lines)) + else: + # RF_WORKSPACE is set but no matching workspace details + data = {"url": default_ws_url, "name": default_ws_url} + output( + args, + data, + text=f"Workspace: {default_ws_url}\n (no detailed info available)", + ) + + +def _set_workspace(args: argparse.Namespace) -> None: + from roboflow.cli._output import output + + workspace_id = args.workspace_id + config = _load_config() + config["RF_WORKSPACE"] = workspace_id + _save_config(config) + output( + args, + {"default_workspace": workspace_id}, + text=f"Default workspace set to: {workspace_id}", + ) + + +def _logout(args: argparse.Namespace) -> None: + import os + + from roboflow.cli._output import output + + conf_path = _get_config_path() + if os.path.isfile(conf_path): + os.remove(conf_path) + + output( + args, + {"status": "logged_out"}, + text="Logged out. Credentials removed.", + ) diff --git a/roboflow/cli/handlers/batch.py b/roboflow/cli/handlers/batch.py new file mode 100644 index 00000000..3ce1b414 --- /dev/null +++ b/roboflow/cli/handlers/batch.py @@ -0,0 +1,45 @@ +"""Batch processing commands.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``batch`` command group.""" + from roboflow.cli._output import stub + + batch_parser = subparsers.add_parser("batch", help="Batch processing operations") + batch_subs = batch_parser.add_subparsers(title="batch commands", dest="batch_command") + + # --- batch create --- + create_p = batch_subs.add_parser("create", help="Create a batch processing job") + create_p.add_argument("--workflow", dest="workflow", required=True, help="Workflow ID to run") + create_p.add_argument("--input", dest="input", required=True, help="Input path (image directory or video file)") + create_p.add_argument("--model", dest="model", default=None, help="Model ID override (default: workflow model)") + create_p.add_argument("--output", dest="output", default=None, help="Output directory for results") + create_p.set_defaults(func=stub) + + # --- batch status --- + status_p = batch_subs.add_parser("status", help="Check batch job status") + status_p.add_argument("job_id", help="Batch job ID") + status_p.set_defaults(func=stub) + + # --- batch list --- + list_p = batch_subs.add_parser("list", help="List batch jobs") + list_p.add_argument( + "--status", dest="status", default=None, help="Filter by status (pending, running, completed, failed)" + ) + list_p.set_defaults(func=stub) + + # --- batch results --- + results_p = batch_subs.add_parser("results", help="Get batch job results") + results_p.add_argument("job_id", help="Batch job ID") + results_p.add_argument("--format", dest="format", default=None, help="Output format (json, csv)") + results_p.set_defaults(func=stub) + + # Default + batch_parser.set_defaults(func=lambda args: batch_parser.print_help()) diff --git a/roboflow/cli/handlers/completion.py b/roboflow/cli/handlers/completion.py new file mode 100644 index 00000000..8f2acf85 --- /dev/null +++ b/roboflow/cli/handlers/completion.py @@ -0,0 +1,31 @@ +"""Shell completion commands.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``completion`` command group.""" + from roboflow.cli._output import stub + + comp_parser = subparsers.add_parser("completion", help="Generate shell completions") + comp_subs = comp_parser.add_subparsers(title="completion commands", dest="completion_command") + + # --- completion bash --- + bash_p = comp_subs.add_parser("bash", help="Generate bash completions") + bash_p.set_defaults(func=stub) + + # --- completion zsh --- + zsh_p = comp_subs.add_parser("zsh", help="Generate zsh completions") + zsh_p.set_defaults(func=stub) + + # --- completion fish --- + fish_p = comp_subs.add_parser("fish", help="Generate fish completions") + fish_p.set_defaults(func=stub) + + # Default + comp_parser.set_defaults(func=lambda args: comp_parser.print_help()) diff --git a/roboflow/cli/handlers/deployment.py b/roboflow/cli/handlers/deployment.py new file mode 100644 index 00000000..f85bd4ba --- /dev/null +++ b/roboflow/cli/handlers/deployment.py @@ -0,0 +1,215 @@ +"""Deployment management commands. + +Builds clean, kebab-case subcommands that delegate to the handler +functions in ``roboflow.deployment``. Legacy snake_case names are +registered as hidden aliases (``argparse.SUPPRESS``) so old scripts +keep working. +""" + +from __future__ import annotations + +import argparse +import io +import sys +from typing import Any, Callable + +# --------------------------------------------------------------------------- +# Wrapper that captures legacy handler stdout/exit and normalises output +# --------------------------------------------------------------------------- + + +def _wrap(func: Callable[..., Any]) -> Callable[..., None]: + """Wrap a legacy deployment handler for structured errors + JSON output.""" + + def _wrapped(args: argparse.Namespace) -> None: + from roboflow.cli._output import output, output_error + + captured = io.StringIO() + orig_stdout = sys.stdout + try: + sys.stdout = captured + func(args) + except SystemExit as exc: + sys.stdout = orig_stdout + code = exc.code if isinstance(exc.code, int) else 1 + # Map legacy exit codes to CLI conventions: 1=general, 2=auth, 3=not-found + exit_code = {0: 1, 1: 1, 2: 2, 3: 3}.get(code, 1) if code else 1 + text = captured.getvalue().strip() + if text: + output_error(args, text, exit_code=exit_code) + else: + output_error(args, "Deployment command failed.", exit_code=1) + return + finally: + sys.stdout = orig_stdout + + text = captured.getvalue() + if text: + if getattr(args, "json", False): + import json + + try: + data = json.loads(text) + output(args, data) + except (ValueError, TypeError): + print(text, end="") + else: + print(text, end="") + + return _wrapped + + +# --------------------------------------------------------------------------- +# Hidden-alias helper +# --------------------------------------------------------------------------- + +_HIDDEN = argparse.SUPPRESS + + +# --------------------------------------------------------------------------- +# Register +# --------------------------------------------------------------------------- + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``deployment`` command group with clean kebab-case names.""" + from roboflow.cli import _CleanHelpFormatter + from roboflow.deployment import ( + add_deployment, + delete_deployment, + get_deployment, + get_deployment_log, + get_deployment_usage, + get_workspace_usage, + list_deployment, + list_machine_types, + pause_deployment, + resume_deployment, + ) + + dep = subparsers.add_parser("deployment", help="Manage dedicated deployments", formatter_class=_CleanHelpFormatter) + sub = dep.add_subparsers(title="deployment commands", dest="deployment_command") + + # --- machine-type (canonical) --- + mt = sub.add_parser("machine-type", help="List available machine types") + mt.set_defaults(func=_wrap(list_machine_types)) + + # --- create (canonical, replaces "add") --- + create = sub.add_parser("create", help="Create a dedicated deployment") + create.add_argument("deployment_name", help="Deployment name (5-15 lowercase chars, starts with letter)") + create.add_argument( + "-m", + "--machine-type", + dest="machine_type", + required=True, + help="Machine type (run 'roboflow deployment machine-type' to list options)", + ) + create.add_argument( + "-e", + "--email", + dest="creator_email", + required=True, + help="Your email (must be a workspace member)", + ) + create.add_argument("--duration", type=float, default=3, help="Duration in hours (default: 3)") + create.add_argument( + "--no-delete-on-expiration", + dest="no_delete_on_expiration", + action="store_true", + help="Keep deployment when it expires", + ) + create.add_argument( + "--inference-version", + dest="inference_version", + default="latest", + help="Inference server version (default: latest)", + ) + create.add_argument("--wait", dest="wait_on_pending", action="store_true", help="Wait until deployment is ready") + create.set_defaults(func=_wrap(add_deployment)) + + # --- get --- + get = sub.add_parser("get", help="Show details for a deployment") + get.add_argument("deployment_name", help="Deployment name") + get.add_argument("--wait", dest="wait_on_pending", action="store_true", help="Wait if deployment is pending") + get.set_defaults(func=_wrap(get_deployment)) + + # --- list --- + ls = sub.add_parser("list", help="List deployments in workspace") + ls.set_defaults(func=_wrap(list_deployment)) + + # --- usage --- + usage = sub.add_parser("usage", help="Show usage statistics") + usage.add_argument("deployment_name", nargs="?", default=None, help="Deployment name (omit for workspace-wide)") + usage.add_argument("--from", dest="from_timestamp", default=None, help="Start time (ISO 8601)") + usage.add_argument("--to", dest="to_timestamp", default=None, help="End time (ISO 8601)") + usage.set_defaults(func=_usage_handler) + + # --- pause --- + pause = sub.add_parser("pause", help="Pause a deployment") + pause.add_argument("deployment_name", help="Deployment name") + pause.set_defaults(func=_wrap(pause_deployment)) + + # --- resume --- + resume = sub.add_parser("resume", help="Resume a paused deployment") + resume.add_argument("deployment_name", help="Deployment name") + resume.set_defaults(func=_wrap(resume_deployment)) + + # --- delete --- + delete = sub.add_parser("delete", help="Delete a deployment") + delete.add_argument("deployment_name", help="Deployment name") + delete.set_defaults(func=_wrap(delete_deployment)) + + # --- log --- + log = sub.add_parser("log", help="Show deployment logs") + log.add_argument("deployment_name", help="Deployment name") + log.add_argument("-d", "--duration", type=int, default=3600, help="Log window in seconds (default: 3600)") + log.add_argument("-n", "--tail", type=int, default=10, help="Lines to show from end (max 50)") + log.add_argument("-f", "--follow", action="store_true", help="Follow log output") + log.set_defaults(func=_wrap(get_deployment_log)) + + # --- hidden legacy aliases (exact old flag signatures for backwards compat) --- + + # machine_type → machine-type + legacy_mt = sub.add_parser("machine_type", help=_HIDDEN) + legacy_mt.add_argument("-a", "--api_key", default=None) + legacy_mt.set_defaults(func=_wrap(list_machine_types)) + + # add → create (with old flag names: -m/--machine_type, -e/--creator_email, etc.) + legacy_add = sub.add_parser("add", help=_HIDDEN) + legacy_add.add_argument("deployment_name") + legacy_add.add_argument("-a", "--api_key", default=None) + legacy_add.add_argument("-m", "--machine_type", required=True) + legacy_add.add_argument("-e", "--creator_email", required=True) + legacy_add.add_argument("-t", "--duration", type=float, default=3) + legacy_add.add_argument("-nodel", "--no_delete_on_expiration", action="store_true") + legacy_add.add_argument("-v", "--inference_version", default="latest") + legacy_add.add_argument("-w", "--wait_on_pending", action="store_true") + legacy_add.set_defaults(func=_wrap(add_deployment)) + + # usage_workspace + legacy_uw = sub.add_parser("usage_workspace", help=_HIDDEN) + legacy_uw.add_argument("-a", "--api_key", default=None) + legacy_uw.add_argument("-f", "--from_timestamp", default=None) + legacy_uw.add_argument("-t", "--to_timestamp", default=None) + legacy_uw.set_defaults(func=_wrap(get_workspace_usage)) + + # usage_deployment + legacy_ud = sub.add_parser("usage_deployment", help=_HIDDEN) + legacy_ud.add_argument("-a", "--api_key", default=None) + legacy_ud.add_argument("deployment_name") + legacy_ud.add_argument("-f", "--from_timestamp", default=None) + legacy_ud.add_argument("-t", "--to_timestamp", default=None) + legacy_ud.set_defaults(func=_wrap(get_deployment_usage)) + + # Default: show help when no subcommand given + dep.set_defaults(func=lambda args: dep.print_help()) + + +def _usage_handler(args: argparse.Namespace) -> None: + """Dispatch to workspace or deployment usage based on whether a name was given.""" + from roboflow.deployment import get_deployment_usage, get_workspace_usage + + if args.deployment_name: + _wrap(get_deployment_usage)(args) + else: + _wrap(get_workspace_usage)(args) diff --git a/roboflow/cli/handlers/folder.py b/roboflow/cli/handlers/folder.py new file mode 100644 index 00000000..c2dc7f3a --- /dev/null +++ b/roboflow/cli/handlers/folder.py @@ -0,0 +1,44 @@ +"""Folder management commands.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``folder`` command group.""" + from roboflow.cli._output import stub + + folder_parser = subparsers.add_parser("folder", help="Manage workspace folders") + folder_subs = folder_parser.add_subparsers(title="folder commands", dest="folder_command") + + # --- folder list --- + list_p = folder_subs.add_parser("list", help="List folders") + list_p.set_defaults(func=stub) + + # --- folder get --- + get_p = folder_subs.add_parser("get", help="Show folder details") + get_p.add_argument("folder_id", help="Folder ID") + get_p.set_defaults(func=stub) + + # --- folder create --- + create_p = folder_subs.add_parser("create", help="Create a folder") + create_p.add_argument("name", help="Folder name") + create_p.set_defaults(func=stub) + + # --- folder update --- + update_p = folder_subs.add_parser("update", help="Update a folder") + update_p.add_argument("folder_id", help="Folder ID") + update_p.add_argument("--name", help="New folder name") + update_p.set_defaults(func=stub) + + # --- folder delete --- + delete_p = folder_subs.add_parser("delete", help="Delete a folder") + delete_p.add_argument("folder_id", help="Folder ID") + delete_p.set_defaults(func=stub) + + # Default + folder_parser.set_defaults(func=lambda args: folder_parser.print_help()) diff --git a/roboflow/cli/handlers/image.py b/roboflow/cli/handlers/image.py new file mode 100644 index 00000000..58f8eb04 --- /dev/null +++ b/roboflow/cli/handlers/image.py @@ -0,0 +1,377 @@ +"""Image management commands: upload, get, search, tag, delete, annotate.""" + +from __future__ import annotations + +import json +import os +from typing import TYPE_CHECKING + +from roboflow.adapters import rfapi +from roboflow.cli._output import output, output_error +from roboflow.config import API_URL, load_roboflow_api_key + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``image`` command group.""" + image_parser = subparsers.add_parser("image", help="Image management commands") + image_sub = image_parser.add_subparsers(title="image commands", dest="image_command") + + _add_upload(image_sub) + _add_get(image_sub) + _add_search(image_sub) + _add_tag(image_sub) + _add_delete(image_sub) + _add_annotate(image_sub) + + image_parser.set_defaults(func=lambda args: image_parser.print_help()) + + +# --------------------------------------------------------------------------- +# upload +# --------------------------------------------------------------------------- + + +def _add_upload(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + p = sub.add_parser("upload", help="Upload an image file or import a directory") + p.add_argument("path", help="Path to image file or directory (auto-detects single file vs. directory bulk import)") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.add_argument("-a", "--annotation", default=None, help="Path to annotation file (single upload)") + p.add_argument("-s", "--split", default="train", help="Dataset split (default: train)") + p.add_argument("-b", "--batch", default=None, help="Batch name") + p.add_argument("-t", "--tag", default=None, help="Comma-separated tag names") + p.add_argument("--metadata", default=None, help="JSON string of key-value metadata") + p.add_argument("-c", "--concurrency", type=int, default=10, help="Concurrency for directory import (default: 10)") + p.add_argument("-r", "--retries", type=int, default=0, help="Retry failed uploads N times (default: 0)") + p.add_argument("--labelmap", default=None, help="Path to labelmap file") + p.add_argument("--is-prediction", action="store_true", default=False, help="Mark upload as prediction") + p.set_defaults(func=_handle_upload) + + +def _handle_upload(args: argparse.Namespace) -> None: + api_key = args.api_key or load_roboflow_api_key(args.workspace) + if not api_key: + output_error(args, "No API key found", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'", exit_code=2) + return + + path = args.path + if os.path.isdir(path): + _handle_upload_directory(args, api_key, path) + elif os.path.isfile(path): + _handle_upload_single(args, api_key, path) + else: + output_error(args, f"Path not found: {path}", hint="Provide a valid file or directory path") + return + + +def _handle_upload_single(args: argparse.Namespace, api_key: str, path: str) -> None: + import roboflow + from roboflow.cli._output import suppress_sdk_output + + metadata_raw = getattr(args, "metadata", None) + metadata = json.loads(metadata_raw) if metadata_raw else None + tag_raw = getattr(args, "tag", None) or getattr(args, "tag_names", None) + tag_names = tag_raw.split(",") if tag_raw else [] + retries = getattr(args, "retries", None) or getattr(args, "num_retries", 0) or 0 + + # Always suppress SDK "loading..." noise during workspace/project init + with suppress_sdk_output(): + try: + rf = roboflow.Roboflow(api_key) + workspace = rf.workspace(args.workspace) + project = workspace.project(args.project) + except Exception as exc: + output_error(args, str(exc), exit_code=3) + return + + try: + project.single_upload( + image_path=path, + annotation_path=args.annotation, + annotation_labelmap=getattr(args, "labelmap", None), + split=args.split, + num_retry_uploads=retries, + batch_name=args.batch, + tag_names=tag_names, + is_prediction=getattr(args, "is_prediction", False), + metadata=metadata, + ) + except Exception as exc: + msg = str(exc) + hint = None + if "cannot identify image file" in msg: + hint = "Supported formats: JPEG, PNG, BMP, GIF, TIFF, WebP." + output_error(args, msg, hint=hint) + return + + data = {"status": "uploaded", "path": path, "project": args.project} + output(args, data, text=f"Uploaded {path} to {args.project}") + + +def _handle_upload_directory(args: argparse.Namespace, api_key: str, path: str) -> None: + import roboflow + from roboflow.cli._output import suppress_sdk_output + + # Always suppress SDK "loading..." noise during workspace init + with suppress_sdk_output(): + try: + rf = roboflow.Roboflow(api_key) + workspace = rf.workspace(args.workspace) + except Exception as exc: + output_error(args, str(exc), exit_code=3) + return + + retries = getattr(args, "retries", None) or getattr(args, "num_retries", 0) or 0 + + try: + workspace.upload_dataset( + dataset_path=path, + project_name=args.project, + num_workers=args.concurrency, + batch_name=getattr(args, "batch", None), + num_retries=retries, + ) + except Exception as exc: + output_error(args, str(exc)) + return + + # Count files uploaded (approximate via image extensions) + count = 0 + image_exts = {".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff", ".webp"} + for root, _dirs, files in os.walk(path): + for f in files: + if os.path.splitext(f)[1].lower() in image_exts: + count += 1 + + data = {"status": "imported", "path": path, "count": count} + output(args, data, text=f"Imported {count} images from {path} to {args.project}") + + +# --------------------------------------------------------------------------- +# get +# --------------------------------------------------------------------------- + + +def _add_get(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + p = sub.add_parser("get", help="Get image details") + p.add_argument("image_id", help="Image ID") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=_handle_get) + + +def _handle_get(args: argparse.Namespace) -> None: + import requests + + api_key = args.api_key or load_roboflow_api_key(args.workspace) + if not api_key: + output_error(args, "No API key found", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'", exit_code=2) + return + + workspace_url = args.workspace or _default_workspace() + if not workspace_url: + output_error(args, "No workspace specified", hint="Use --workspace or run 'roboflow auth login'") + return + + url = f"{API_URL}/{workspace_url}/{args.project}/images/{args.image_id}" + response = requests.get(url, params={"api_key": api_key}) + if response.status_code != 200: + output_error(args, f"Failed to get image: {response.text}", exit_code=3) + return + + data = response.json() + output(args, data, text=json.dumps(data, indent=2)) + + +# --------------------------------------------------------------------------- +# search +# --------------------------------------------------------------------------- + + +def _add_search(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + p = sub.add_parser("search", help="Search images in workspace") + p.add_argument("query", help="RoboQL search query") + p.add_argument("-p", "--project", required=True, help="Project ID (used in query filter)") + p.add_argument("--limit", type=int, default=50, help="Number of results (default: 50)") + p.add_argument("--cursor", default=None, help="Continuation token for pagination") + p.set_defaults(func=_handle_search) + + +def _handle_search(args: argparse.Namespace) -> None: + api_key = args.api_key or load_roboflow_api_key(args.workspace) + if not api_key: + output_error(args, "No API key found", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'", exit_code=2) + return + + workspace_url: str = args.workspace or _default_workspace() or "" + if not workspace_url: + output_error(args, "No workspace specified", hint="Use --workspace or run 'roboflow auth login'") + return + + result = rfapi.workspace_search( + api_key=api_key, + workspace_url=workspace_url, + query=args.query, + page_size=args.limit, + continuation_token=args.cursor, + ) + output(args, result, text=json.dumps(result, indent=2)) + + +# --------------------------------------------------------------------------- +# tag +# --------------------------------------------------------------------------- + + +def _add_tag(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + p = sub.add_parser("tag", help="Add or remove tags on an image") + p.add_argument("image_id", help="Image ID") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.add_argument("--add", default=None, dest="add_tags", help="Comma-separated tags to add") + p.add_argument("--remove", default=None, dest="remove_tags", help="Comma-separated tags to remove") + p.set_defaults(func=_handle_tag) + + +def _handle_tag(args: argparse.Namespace) -> None: + import requests + + if not args.add_tags and not args.remove_tags: + output_error(args, "Nothing to do", hint="Specify --add and/or --remove with comma-separated tags") + return + + api_key = args.api_key or load_roboflow_api_key(args.workspace) + if not api_key: + output_error(args, "No API key found", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'", exit_code=2) + return + + workspace_url = args.workspace or _default_workspace() + if not workspace_url: + output_error(args, "No workspace specified", hint="Use --workspace or run 'roboflow auth login'") + return + + base = f"{API_URL}/{workspace_url}/{args.project}/images/{args.image_id}/tags" + added = [] + removed = [] + + if args.add_tags: + for tag in args.add_tags.split(","): + tag = tag.strip() + if not tag: + continue + resp = requests.post(base, params={"api_key": api_key}, json={"tag": tag}) + if resp.status_code == 200: + added.append(tag) + + if args.remove_tags: + for tag in args.remove_tags.split(","): + tag = tag.strip() + if not tag: + continue + resp = requests.delete(f"{base}/{tag}", params={"api_key": api_key}) + if resp.status_code == 200: + removed.append(tag) + + data = {"added": added, "removed": removed} + parts = [] + if added: + parts.append(f"Added tags: {', '.join(added)}") + if removed: + parts.append(f"Removed tags: {', '.join(removed)}") + text = "; ".join(parts) if parts else "No tags modified" + output(args, data, text=text) + + +# --------------------------------------------------------------------------- +# delete +# --------------------------------------------------------------------------- + + +def _add_delete(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + p = sub.add_parser("delete", help="Delete images from workspace") + p.add_argument("image_ids", help="Comma-separated image IDs") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.set_defaults(func=_handle_delete) + + +def _handle_delete(args: argparse.Namespace) -> None: + api_key = args.api_key or load_roboflow_api_key(args.workspace) + if not api_key: + output_error(args, "No API key found", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'", exit_code=2) + return + + workspace_url: str = args.workspace or _default_workspace() or "" + if not workspace_url: + output_error(args, "No workspace specified", hint="Use --workspace or run 'roboflow auth login'") + return + + ids = [i.strip() for i in args.image_ids.split(",") if i.strip()] + result = rfapi.workspace_delete_images( + api_key=api_key, + workspace_url=workspace_url, + image_ids=ids, + ) + + deleted = result.get("deleted", 0) + skipped = result.get("skipped", 0) + data = {"deleted": deleted, "skipped": skipped} + output(args, data, text=f"Deleted {deleted}, skipped {skipped}") + + +# --------------------------------------------------------------------------- +# annotate +# --------------------------------------------------------------------------- + + +def _add_annotate(sub: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + p = sub.add_parser("annotate", help="Upload annotation for an image") + p.add_argument("image_id", help="Image ID") + p.add_argument("-p", "--project", required=True, help="Project ID") + p.add_argument("--annotation-file", required=True, help="Path to annotation file") + p.add_argument("--format", default=None, dest="annotation_format", help="Annotation format name") + p.add_argument("--labelmap", default=None, help="Path to labelmap file") + p.set_defaults(func=_handle_annotate) + + +def _handle_annotate(args: argparse.Namespace) -> None: + api_key = args.api_key or load_roboflow_api_key(args.workspace) + if not api_key: + output_error(args, "No API key found", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'", exit_code=2) + return + + annotation_path = args.annotation_file + if not os.path.isfile(annotation_path): + output_error(args, f"Annotation file not found: {annotation_path}") + return + + with open(annotation_path) as f: + annotation_string = f.read() + + annotation_name = os.path.basename(annotation_path) + labelmap = None + if args.labelmap: + with open(args.labelmap) as f: + labelmap = json.load(f) + + rfapi.save_annotation( + api_key=api_key, + project_url=args.project, + annotation_name=annotation_name, + annotation_string=annotation_string, + image_id=args.image_id, + annotation_labelmap=labelmap, + ) + + data = {"status": "saved"} + output(args, data, text=f"Annotation saved for image {args.image_id}") + + +# --------------------------------------------------------------------------- +# helpers +# --------------------------------------------------------------------------- + + +def _default_workspace() -> str | None: + from roboflow.config import get_conditional_configuration_variable + + return get_conditional_configuration_variable("RF_WORKSPACE", default=None) diff --git a/roboflow/cli/handlers/infer.py b/roboflow/cli/handlers/infer.py new file mode 100644 index 00000000..79a40b1d --- /dev/null +++ b/roboflow/cli/handlers/infer.py @@ -0,0 +1,139 @@ +"""Infer command: run inference on an image.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the top-level ``infer`` command.""" + infer_parser = subparsers.add_parser("infer", help="Run inference on an image") + infer_parser.add_argument( + "file", + help="Path to an image file", + ) + infer_parser.add_argument( + "-m", + "--model", + dest="model", + required=True, + help="Model ID (project/version, e.g. my-project/3)", + ) + infer_parser.add_argument( + "-c", + "--confidence", + dest="confidence", + type=float, + default=0.5, + help="Confidence threshold 0.0-1.0 (default: 0.5)", + ) + infer_parser.add_argument( + "-o", + "--overlap", + dest="overlap", + type=float, + default=0.5, + help="Overlap threshold 0.0-1.0 (default: 0.5)", + ) + infer_parser.add_argument( + "-t", + "--type", + dest="type", + default=None, + choices=[ + "object-detection", + "classification", + "instance-segmentation", + "semantic-segmentation", + "keypoint-detection", + ], + help="Model type (auto-detected if not specified)", + ) + infer_parser.set_defaults(func=_infer) + + +def _infer(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.config import load_roboflow_api_key + + try: + workspace_url, project_slug, version = resolve_resource(args.model, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + project_type = args.type + if not project_type: + try: + dataset_json = rfapi.get_project(api_key, workspace_url, project_slug) + project_type = dataset_json["project"]["type"] + except (rfapi.RoboflowError, KeyError) as exc: + output_error(args, f"Could not determine project type: {exc}", hint="Use -t/--type to specify.") + return + + # Lazy imports of model classes + from roboflow.models.classification import ClassificationModel + from roboflow.models.instance_segmentation import InstanceSegmentationModel + from roboflow.models.keypoint_detection import KeypointDetectionModel + from roboflow.models.object_detection import ObjectDetectionModel + from roboflow.models.semantic_segmentation import SemanticSegmentationModel + + model_class_map = { + "object-detection": ObjectDetectionModel, + "classification": ClassificationModel, + "instance-segmentation": InstanceSegmentationModel, + "semantic-segmentation": SemanticSegmentationModel, + "keypoint-detection": KeypointDetectionModel, + } + + model_cls = model_class_map.get(project_type) + if model_cls is None: + output_error(args, f"Unsupported project type: {project_type}") + return + + if version is not None: + project_url = f"{workspace_url}/{project_slug}/{version}" + else: + project_url = f"{workspace_url}/{project_slug}" + + model = model_cls(api_key, project_url) + + kwargs = {} + if args.confidence is not None and project_type in [ + "object-detection", + "instance-segmentation", + "semantic-segmentation", + ]: + kwargs["confidence"] = int(args.confidence * 100) + if args.overlap is not None and project_type == "object-detection": + kwargs["overlap"] = int(args.overlap * 100) + + try: + group = model.predict(args.file, **kwargs) + except Exception as exc: + output_error(args, f"Inference failed: {exc}") + return + + # Serialize predictions for JSON output + if getattr(args, "json", False): + predictions = [] + for pred in group: + if hasattr(pred, "json"): + predictions.append(pred.json()) + elif hasattr(pred, "__dict__"): + predictions.append(pred.__dict__) + else: + predictions.append(str(pred)) + output(args, predictions) + else: + output(args, None, text=str(group)) diff --git a/roboflow/cli/handlers/model.py b/roboflow/cli/handlers/model.py new file mode 100644 index 00000000..5f51bbbc --- /dev/null +++ b/roboflow/cli/handlers/model.py @@ -0,0 +1,205 @@ +"""Model management commands: list, get, upload.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register ``model`` subcommand and its verbs.""" + model_parser = subparsers.add_parser("model", help="Manage trained models") + model_subs = model_parser.add_subparsers(title="model commands", dest="model_command") + + # --- model list --- + list_parser = model_subs.add_parser("list", help="List trained models for a project") + list_parser.add_argument( + "-p", + "--project", + dest="project", + required=True, + help="Project ID or shorthand (e.g. my-ws/my-project)", + ) + list_parser.set_defaults(func=_list_models) + + # --- model get --- + get_parser = model_subs.add_parser("get", help="Show details for a trained model") + get_parser.add_argument( + "model_url", + help="Model URL (e.g. workspace/model-name)", + ) + get_parser.set_defaults(func=_get_model) + + # --- model upload --- + upload_parser = model_subs.add_parser("upload", help="Upload a trained model") + upload_parser.add_argument( + "-p", + "--project", + dest="project", + action="append", + help="Project ID (can be specified multiple times for multi-project deploy)", + ) + upload_parser.add_argument( + "-v", + "--version", + dest="version_number", + type=int, + default=None, + help="Version number to deploy to (for single-version deploy)", + ) + upload_parser.add_argument( + "-t", + "--type", + dest="model_type", + required=True, + help="Model type (e.g. yolov8, yolov5)", + ) + upload_parser.add_argument( + "-m", + "--model-path", + dest="model_path", + required=True, + help="Path to the trained model file", + ) + upload_parser.add_argument( + "-f", + "--filename", + dest="filename", + default="weights/best.pt", + help="Name of the model file (default: weights/best.pt)", + ) + upload_parser.add_argument( + "-n", + "--model-name", + dest="model_name", + default=None, + help="Name for the model (used in multi-project deploy)", + ) + upload_parser.set_defaults(func=_upload_model) + + # Default when no verb is given + model_parser.set_defaults(func=lambda args: model_parser.print_help()) + + +def _list_models(args: argparse.Namespace) -> None: + import roboflow + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.cli._table import format_table + + try: + workspace_url, project_slug, _version = resolve_resource(args.project, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + api_key = args.api_key or None + + try: + from roboflow.cli._output import suppress_sdk_output + + with suppress_sdk_output(args): + rf = roboflow.Roboflow(api_key=api_key) + workspace = rf.workspace(workspace_url) + project = workspace.project(project_slug) + versions = project.versions() + except Exception as exc: + output_error(args, str(exc), exit_code=3) + return + + models = [] + for v in versions: + if v.model: + models.append( + { + "version": v.version, + "id": v.id, + "model": getattr(v, "model_format", ""), + "map": getattr(v, "model", {}).get("map", "") + if isinstance(getattr(v, "model", None), dict) + else "", + } + ) + + table = format_table( + models, + columns=["version", "id", "model", "map"], + headers=["VERSION", "ID", "MODEL", "MAP"], + ) + output(args, models, text=table) + + +def _get_model(args: argparse.Namespace) -> None: + import json + + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.config import load_roboflow_api_key + + try: + workspace_url, project_slug, version = resolve_resource(args.model_url, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + if version is not None: + data = rfapi.get_version(api_key, workspace_url, project_slug, str(version)) + else: + data = rfapi.get_project(api_key, workspace_url, project_slug) + except rfapi.RoboflowError as exc: + output_error(args, str(exc), exit_code=3) + return + + output(args, data, text=json.dumps(data, indent=2, default=str)) + + +def _upload_model(args: argparse.Namespace) -> None: + import roboflow + from roboflow.cli._output import output, output_error + + api_key = args.api_key or None + rf = roboflow.Roboflow(api_key=api_key) + workspace = rf.workspace(args.workspace) + + if args.version_number is not None: + # Deploy to a specific version + project_id = args.project[0] if isinstance(args.project, list) else args.project + if not project_id: + output_error(args, "Project is required for model upload.", hint="Use -p/--project.") + return + + try: + project = workspace.project(project_id) + version = project.version(args.version_number) + version.deploy(str(args.model_type), str(args.model_path), str(args.filename)) + except Exception as exc: + output_error(args, str(exc)) + return + else: + # Deploy to multiple projects + if not args.project: + output_error(args, "At least one project is required.", hint="Use -p/--project.") + return + + try: + workspace.deploy_model( + model_type=str(args.model_type), + model_path=str(args.model_path), + project_ids=args.project, + model_name=str(args.model_name) if args.model_name else "", + filename=str(args.filename), + ) + except Exception as exc: + output_error(args, str(exc)) + return + + output(args, {"status": "uploaded"}, text="Model uploaded successfully.") diff --git a/roboflow/cli/handlers/project.py b/roboflow/cli/handlers/project.py new file mode 100644 index 00000000..c7e5d0b1 --- /dev/null +++ b/roboflow/cli/handlers/project.py @@ -0,0 +1,187 @@ +"""Project management commands: list, get, create.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register ``project`` subcommand and its verbs.""" + project_parser = subparsers.add_parser("project", help="Manage projects") + project_subs = project_parser.add_subparsers(title="project commands", dest="project_command") + + # --- project list --- + list_parser = project_subs.add_parser("list", help="List projects in a workspace") + list_parser.add_argument("--type", dest="type", default=None, help="Filter by project type") + list_parser.set_defaults(func=_list_projects) + + # --- project get --- + get_parser = project_subs.add_parser("get", help="Show detailed info for a project") + get_parser.add_argument("project_id", help="Project ID or shorthand (e.g. my-ws/my-project)") + get_parser.set_defaults(func=_get_project) + + # --- project create --- + create_parser = project_subs.add_parser("create", help="Create a new project") + create_parser.add_argument("name", help="Project name") + create_parser.add_argument( + "--type", + dest="type", + required=True, + choices=[ + "object-detection", + "single-label-classification", + "multi-label-classification", + "instance-segmentation", + "semantic-segmentation", + "keypoint-detection", + ], + help="Project type", + ) + create_parser.add_argument("--license", dest="license", default="Private", help="Project license") + create_parser.add_argument("--annotation", dest="annotation", default="", help="Annotation group name") + create_parser.set_defaults(func=_create_project) + + # Default when no verb is given + project_parser.set_defaults(func=lambda args: project_parser.print_help()) + + +def _list_projects(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._table import format_table + from roboflow.config import load_roboflow_api_key + + workspace_url = args.workspace + if not workspace_url: + from roboflow.cli._resolver import resolve_default_workspace + + workspace_url = resolve_default_workspace(api_key=args.api_key) + + if not workspace_url: + output_error(args, "No workspace specified.", hint="Use --workspace or run 'roboflow auth login'.") + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + data = rfapi.get_workspace(api_key, workspace_url) + except rfapi.RoboflowError as exc: + output_error(args, str(exc), exit_code=3) + return + + projects = data.get("workspace", {}).get("projects", []) + + if args.type: + projects = [p for p in projects if p.get("type") == args.type] + + table = format_table( + projects, + columns=["name", "id", "type", "versions", "images"], + headers=["NAME", "ID", "TYPE", "VERSIONS", "IMAGES"], + ) + output(args, projects, text=table) + + +def _get_project(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.config import load_roboflow_api_key + + try: + workspace_url, project_slug, _version = resolve_resource(args.project_id, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + data = rfapi.get_project(api_key, workspace_url, project_slug) + except rfapi.RoboflowError as exc: + output_error(args, str(exc), exit_code=3) + return + + project = data.get("project", data) + lines = [] + field_map = [ + ("Name", "name"), + ("ID", "id"), + ("Type", "type"), + ("License", "license"), + ("Annotation", "annotation"), + ("Classes", "classes"), + ("Images", "images"), + ("Versions", "versions"), + ("Created", "created"), + ("Updated", "updated"), + ("Public", "public"), + ] + epoch_keys = {"created", "updated"} + for label, key in field_map: + if key in project: + val = project[key] + if key in epoch_keys and isinstance(val, (int, float)): + import datetime + + val = datetime.datetime.fromtimestamp(val).strftime("%Y-%m-%d %H:%M:%S") + elif isinstance(val, dict): + val = ", ".join(f"{k}: {v}" for k, v in val.items()) + lines.append(f" {label:12s} {val}") + text = "\n".join(lines) if lines else "(no project details)" + + output(args, data, text=text) + + +def _create_project(args: argparse.Namespace) -> None: + import roboflow + from roboflow.cli._output import output, output_error, suppress_sdk_output + + annotation = args.annotation if args.annotation else args.name + + with suppress_sdk_output(args): + try: + rf = roboflow.Roboflow() + workspace = rf.workspace(args.workspace) + except Exception as exc: + output_error(args, str(exc)) + return + + try: + project = workspace.create_project( + project_name=args.name, + project_type=args.type, + project_license=args.license, + annotation=annotation, + ) + except Exception as exc: + msg = str(exc) + hint = None + # Try to extract a useful message from HTTP 422 responses + if hasattr(exc, "response"): + try: + body = exc.response.json() # type: ignore[union-attr] + if "error" in body: + hint = body["error"].get("message", None) if isinstance(body["error"], dict) else str(body["error"]) + elif "message" in body: + hint = str(body["message"]) + except Exception: + pass + output_error(args, msg, hint=hint) + return + + data = { + "id": project.id, + "name": project.name, + "type": project.type, + } + output(args, data, text=f"Created project: {project.name} ({project.id})") diff --git a/roboflow/cli/handlers/search.py b/roboflow/cli/handlers/search.py new file mode 100644 index 00000000..c3152f77 --- /dev/null +++ b/roboflow/cli/handlers/search.py @@ -0,0 +1,110 @@ +"""Search commands: query workspace images and export search results.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``search`` command.""" + search_parser = subparsers.add_parser("search", help="Search workspace images or export results as a dataset") + search_parser.add_argument("query", help="Search query (e.g. 'tag:review' or '*')") + search_parser.add_argument("--limit", type=int, default=50, help="Max results to return (default: 50)") + search_parser.add_argument("--cursor", default=None, help="Continuation token for pagination") + search_parser.add_argument("--fields", default=None, help="Comma-separated list of fields to include") + search_parser.add_argument( + "--export", action="store_true", default=False, help="Export search results as a dataset" + ) + search_parser.add_argument( + "-f", "--format", dest="format", default="coco", help="Annotation format for export (default: coco)" + ) + search_parser.add_argument("-l", "--location", dest="location", default=None, help="Local directory for export") + search_parser.add_argument( + "-d", "--dataset", dest="dataset", default=None, help="Limit to a specific dataset (project slug)" + ) + search_parser.add_argument( + "-g", + "--annotation-group", + dest="annotation_group", + default=None, + help="Limit export to a specific annotation group", + ) + search_parser.add_argument("--name", dest="name", default=None, help="Optional name for the export") + search_parser.add_argument( + "--no-extract", dest="no_extract", action="store_true", default=False, help="Keep zip file, skip extraction" + ) + search_parser.set_defaults(func=_search) + + +def _search(args: argparse.Namespace) -> None: + import roboflow + from roboflow.cli._output import output_error, suppress_sdk_output + + try: + with suppress_sdk_output(): + rf = roboflow.Roboflow() + workspace = rf.workspace(args.workspace) + except Exception as exc: + output_error(args, str(exc), exit_code=2) + return + + if args.export: + _do_export(args, workspace) + else: + _do_search(args, workspace) + + +def _do_search(args: argparse.Namespace, workspace: Any) -> None: + from roboflow.cli._output import output, output_error + + fields = args.fields.split(",") if args.fields else None + try: + result = workspace.search( + query=args.query, + page_size=args.limit, + fields=fields, + continuation_token=args.cursor, + ) + except Exception as exc: + output_error(args, str(exc)) + return + + results = result.get("results", []) + total = result.get("total", len(results)) + token = result.get("continuationToken") + + data = {"results": results, "total": total} + if token: + data["cursor"] = token + + text_lines = [f"Found {total} result(s)."] + for r in results: + text_lines.append(f" {r.get('filename', r.get('id', ''))}") + if token: + text_lines.append(f"\nNext page: --cursor {token}") + + output(args, data, text="\n".join(text_lines)) + + +def _do_export(args: argparse.Namespace, workspace: Any) -> None: + from roboflow.cli._output import output, output_error + + try: + result_path = workspace.search_export( + query=args.query, + format=args.format, + location=args.location, + dataset=args.dataset, + annotation_group=getattr(args, "annotation_group", None), + name=args.name, + extract_zip=not args.no_extract, + ) + except Exception as exc: + output_error(args, str(exc)) + return + + data = {"status": "completed", "path": str(result_path)} + output(args, data, text=f"Export completed: {result_path}") diff --git a/roboflow/cli/handlers/train.py b/roboflow/cli/handlers/train.py new file mode 100644 index 00000000..e126fd3e --- /dev/null +++ b/roboflow/cli/handlers/train.py @@ -0,0 +1,115 @@ +"""Train commands: start training for a dataset version.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register ``train`` subcommand and its verbs.""" + train_parser = subparsers.add_parser("train", help="Train a model") + train_subs = train_parser.add_subparsers(title="train commands", dest="train_command") + + # --- train start --- + start_parser = train_subs.add_parser("start", help="Start training for a dataset version") + _add_start_args(start_parser, required=True) + start_parser.set_defaults(func=_start) + + # Default: `train` without subcommand behaves like `train start` + _add_start_args(train_parser, required=False) + train_parser.set_defaults(func=_start) + + +def _add_start_args(parser: argparse.ArgumentParser, *, required: bool = True) -> None: + """Add shared arguments for the train start command.""" + parser.add_argument( + "-p", + "--project", + dest="project", + required=required, + help="Project ID to train", + ) + parser.add_argument( + "-v", + "--version", + dest="version_number", + type=int, + required=required, + help="Version number to train", + ) + parser.add_argument( + "-t", + "--type", + dest="model_type", + default=None, + help="Model type (e.g. rfdetr-nano, yolov8n)", + ) + parser.add_argument( + "--checkpoint", + dest="checkpoint", + default=None, + help="Checkpoint to resume training from", + ) + parser.add_argument( + "--speed", + dest="speed", + default=None, + help="Training speed preset", + ) + parser.add_argument( + "--epochs", + dest="epochs", + type=int, + default=None, + help="Number of training epochs", + ) + + +def _start(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.config import load_roboflow_api_key + + if not getattr(args, "project", None): + output_error(args, "Project is required.", hint="Use -p/--project.") + return + if getattr(args, "version_number", None) is None: + output_error(args, "Version is required.", hint="Use -v/--version.") + return + + try: + workspace_url, project_slug, _version = resolve_resource(args.project, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + rfapi.start_version_training( + api_key, + workspace_url, + project_slug, + str(args.version_number), + speed=args.speed, + checkpoint=args.checkpoint, + model_type=args.model_type, + epochs=args.epochs, + ) + except rfapi.RoboflowError as exc: + output_error(args, str(exc)) + return + + data = { + "status": "training_started", + "project": project_slug, + "version": args.version_number, + } + output(args, data, text=f"Training started for {project_slug} version {args.version_number}.") diff --git a/roboflow/cli/handlers/universe.py b/roboflow/cli/handlers/universe.py new file mode 100644 index 00000000..fea90c8d --- /dev/null +++ b/roboflow/cli/handlers/universe.py @@ -0,0 +1,26 @@ +"""Universe search commands.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``universe`` command group.""" + from roboflow.cli._output import stub + + uni_parser = subparsers.add_parser("universe", help="Browse Roboflow Universe") + uni_subs = uni_parser.add_subparsers(title="universe commands", dest="universe_command") + + # --- universe search --- + search_p = uni_subs.add_parser("search", help="Search Roboflow Universe") + search_p.add_argument("query", help="Search query") + search_p.add_argument("--type", dest="type", choices=["dataset", "model"], default=None, help="Filter by type") + search_p.add_argument("--limit", type=int, default=20, help="Max results (default: 20)") + search_p.set_defaults(func=stub) + + # Default + uni_parser.set_defaults(func=lambda args: uni_parser.print_help()) diff --git a/roboflow/cli/handlers/version.py b/roboflow/cli/handlers/version.py new file mode 100644 index 00000000..205abf82 --- /dev/null +++ b/roboflow/cli/handlers/version.py @@ -0,0 +1,245 @@ +"""Version management commands: list, get, download, export, create.""" + +from __future__ import annotations + +import re +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register ``version`` subcommand and its verbs.""" + version_parser = subparsers.add_parser("version", help="Manage dataset versions") + version_subs = version_parser.add_subparsers(title="version commands", dest="version_command") + + # --- version list --- + list_parser = version_subs.add_parser("list", help="List versions for a project") + list_parser.add_argument("-p", "--project", dest="project", required=True, help="Project ID") + list_parser.set_defaults(func=_list_versions) + + # --- version get --- + get_parser = version_subs.add_parser("get", help="Show detailed info for a version") + get_parser.add_argument("version_num", help="Version number or shorthand (e.g. my-project/3)") + get_parser.add_argument("-p", "--project", dest="project", default=None, help="Project ID") + get_parser.set_defaults(func=_get_version) + + # --- version download --- + dl_parser = version_subs.add_parser("download", help="Download a dataset version") + dl_parser.add_argument("url_or_id", help="Dataset URL or shorthand (e.g. ws/project/3)") + dl_parser.add_argument("-f", "--format", dest="format", default="voc", help="Export format (default: voc)") + dl_parser.add_argument("-l", "--location", dest="location", default=None, help="Download location") + dl_parser.set_defaults(func=_download) + + # --- version export --- + export_parser = version_subs.add_parser("export", help="Trigger an async export") + export_parser.add_argument("version_num", help="Version number") + export_parser.add_argument("-p", "--project", dest="project", required=True, help="Project ID") + export_parser.add_argument("-f", "--format", dest="format", default="voc", help="Export format (default: voc)") + export_parser.set_defaults(func=_export) + + # --- version create (stub) --- + create_parser = version_subs.add_parser("create", help="Create a new version (coming soon)") + create_parser.add_argument("-p", "--project", dest="project", required=True, help="Project ID") + create_parser.add_argument("--settings", dest="settings", default=None, help="Version settings as JSON string") + create_parser.set_defaults(func=_create) + + # Default when no verb is given + version_parser.set_defaults(func=lambda args: version_parser.print_help()) + + +def _list_versions(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.cli._table import format_table + from roboflow.config import load_roboflow_api_key + + try: + workspace_url, project_slug, _ver = resolve_resource(args.project, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + project_data = rfapi.get_project(api_key, workspace_url, project_slug) + except rfapi.RoboflowError as exc: + output_error(args, str(exc), exit_code=3) + return + + versions = project_data.get("versions", []) + rows = [] + for v in versions: + rows.append( + { + "id": v.get("id", ""), + "name": v.get("name", ""), + "images": v.get("images", 0), + "splits": _format_splits(v.get("splits", {})), + "created": v.get("created", ""), + } + ) + + table = format_table( + rows, + columns=["id", "name", "images", "splits", "created"], + headers=["ID", "NAME", "IMAGES", "SPLITS", "CREATED"], + ) + output(args, versions, text=table) + + +def _format_splits(splits: dict) -> str: + if not splits: + return "" + parts = [] + for key in ("train", "valid", "test"): + count = splits.get(key, 0) + if count: + parts.append(f"{key}:{count}") + return " ".join(parts) + + +def _get_version(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.config import load_roboflow_api_key + + # Build shorthand: if --project is given, combine with version_num + shorthand = args.version_num + if args.project: + shorthand = f"{args.project}/{args.version_num}" + + try: + workspace_url, project_slug, version_num = resolve_resource(shorthand, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + if version_num is None: + output_error(args, "Version number is required.", hint="Use e.g. 'version get 3 -p my-project'.") + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + data = rfapi.get_version(api_key, workspace_url, project_slug, str(version_num)) + except rfapi.RoboflowError as exc: + output_error(args, str(exc), exit_code=3) + return + + import json + + output(args, data, text=json.dumps(data, indent=2, default=str)) + + +def _parse_url(url: str) -> tuple: + """Parse a Roboflow URL or shorthand into (workspace, project, version).""" + regex = ( + r"(?:https?://)?(?:universe|app)\.roboflow\.(?:com|one)/([^/]+)/([^/]+)" + r"(?:/dataset)?(?:/(\d+))?" + r"|([^/]+)/([^/]+)(?:/(\d+))?" + ) + match = re.match(regex, url) + if match: + organization = match.group(1) or match.group(4) + dataset = match.group(2) or match.group(5) + version = match.group(3) or match.group(6) + return organization, dataset, version + return None, None, None + + +def _download(args: argparse.Namespace) -> None: + import roboflow + from roboflow.cli._output import output, output_error, suppress_sdk_output + + w, p, v = _parse_url(args.url_or_id) + + if not w or not p: + output_error(args, f"Could not parse URL or shorthand: {args.url_or_id}") + return + + # Always suppress SDK "loading..." noise during workspace/project init + with suppress_sdk_output(): + try: + rf = roboflow.Roboflow() + project = rf.workspace(w).project(p) + except Exception as exc: + output_error(args, str(exc), exit_code=3) + return + + try: + if not v: + versions = project.versions() + if not versions: + output_error(args, f"Project {p} does not have any versions.") + return + version_obj = versions[-1] + else: + version_obj = project.version(int(v)) + + version_obj.download(args.format, location=args.location, overwrite=True) + except SystemExit: + raise + except Exception as exc: + output_error(args, str(exc), exit_code=3) + return + + data = { + "workspace": w, + "project": p, + "version": int(v) if v else version_obj.version, + "format": args.format, + "location": args.location or "", + } + output(args, data, text=f"Downloaded {w}/{p}/{data['version']} in {args.format} format") + + +def _export(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.cli._output import output, output_error + from roboflow.cli._resolver import resolve_resource + from roboflow.config import load_roboflow_api_key + + shorthand = f"{args.project}/{args.version_num}" + try: + workspace_url, project_slug, version_num = resolve_resource(shorthand, workspace_override=args.workspace) + except ValueError as exc: + output_error(args, str(exc)) + return + + if version_num is None: + output_error(args, "Version number is required.") + return + + api_key = args.api_key or load_roboflow_api_key(workspace_url) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + data = rfapi.get_version_export(api_key, workspace_url, project_slug, str(version_num), args.format) + except rfapi.RoboflowError as exc: + output_error(args, str(exc), exit_code=3) + return + + if data.get("ready") is False: + progress = data.get("progress", 0) + output(args, data, text=f"Export in progress ({progress:.0%})...") + else: + output(args, data, text=f"Export ready for {project_slug}/{version_num} in {args.format} format") + + +def _create(args: argparse.Namespace) -> None: + from roboflow.cli._output import output_error + + output_error(args, "This command is not yet implemented.", hint="Coming soon.", exit_code=1) diff --git a/roboflow/cli/handlers/video.py b/roboflow/cli/handlers/video.py new file mode 100644 index 00000000..dcb41918 --- /dev/null +++ b/roboflow/cli/handlers/video.py @@ -0,0 +1,61 @@ +"""Video inference commands.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``video`` command group.""" + video_parser = subparsers.add_parser("video", help="Video inference operations") + video_subs = video_parser.add_subparsers(title="video commands", dest="video_command") + + # --- video infer --- + infer_p = video_subs.add_parser("infer", help="Run video inference") + infer_p.add_argument("-p", "--project", dest="project", required=True, help="Project ID") + infer_p.add_argument("-v", "--version", dest="version_number", type=int, required=True, help="Model version number") + infer_p.add_argument("-f", "--file", dest="video_file", required=True, help="Path to video file") + infer_p.add_argument("--fps", dest="fps", type=int, default=5, help="Frames per second (default: 5)") + infer_p.set_defaults(func=_video_infer) + + # --- video status --- + status_p = video_subs.add_parser("status", help="Check video inference job status") + status_p.add_argument("job_id", help="Job ID to check") + from roboflow.cli._output import stub + + status_p.set_defaults(func=stub) + + # Default + video_parser.set_defaults(func=lambda args: video_parser.print_help()) + + +def _video_infer(args: argparse.Namespace) -> None: + import roboflow + from roboflow.cli._output import output, output_error + from roboflow.config import load_roboflow_api_key + + api_key = args.api_key or load_roboflow_api_key(None) + if not api_key: + output_error(args, "No API key found.", hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.", exit_code=2) + return + + try: + rf = roboflow.Roboflow(api_key) + project = rf.workspace().project(args.project) + version = project.version(args.version_number) + model = version.model + + job_id, _signed_url, _expire_time = model.predict_video( + args.video_file, + args.fps, + prediction_type="batch-video", + ) + except Exception as exc: + output_error(args, str(exc)) + return + + data = {"job_id": job_id, "status": "submitted"} + output(args, data, text=f"Video inference submitted. Job ID: {job_id}") diff --git a/roboflow/cli/handlers/workflow.py b/roboflow/cli/handlers/workflow.py new file mode 100644 index 00000000..76db80df --- /dev/null +++ b/roboflow/cli/handlers/workflow.py @@ -0,0 +1,70 @@ +"""Workflow management commands.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``workflow`` command group.""" + from roboflow.cli._output import stub + + wf_parser = subparsers.add_parser("workflow", help="Manage workflows") + wf_subs = wf_parser.add_subparsers(title="workflow commands", dest="workflow_command") + + # --- workflow list --- + list_p = wf_subs.add_parser("list", help="List workflows in a workspace") + list_p.set_defaults(func=stub) + + # --- workflow get --- + get_p = wf_subs.add_parser("get", help="Show details for a workflow") + get_p.add_argument("workflow_url", help="Workflow URL or ID") + get_p.set_defaults(func=stub) + + # --- workflow create --- + create_p = wf_subs.add_parser("create", help="Create a new workflow") + create_p.add_argument("--name", required=True, help="Workflow name") + create_p.add_argument("--definition", help="Path to JSON definition file") + create_p.add_argument("--description", default=None, help="Workflow description") + create_p.set_defaults(func=stub) + + # --- workflow update --- + update_p = wf_subs.add_parser("update", help="Update an existing workflow") + update_p.add_argument("workflow_url", help="Workflow URL or ID") + update_p.add_argument("--definition", help="Path to JSON definition file") + update_p.set_defaults(func=stub) + + # --- workflow version --- + version_p = wf_subs.add_parser("version", help="Manage workflow versions") + version_subs = version_p.add_subparsers(title="workflow version commands", dest="workflow_version_command") + version_list_p = version_subs.add_parser("list", help="List versions of a workflow") + version_list_p.add_argument("workflow_url", help="Workflow URL or ID") + version_list_p.set_defaults(func=stub) + version_p.set_defaults(func=lambda args: version_p.print_help()) + + # --- workflow fork --- + fork_p = wf_subs.add_parser("fork", help="Fork a workflow") + fork_p.add_argument("workflow_url", help="Workflow URL or ID") + fork_p.set_defaults(func=stub) + + # --- workflow build --- + build_p = wf_subs.add_parser("build", help="Build a workflow from a prompt") + build_p.add_argument("prompt", help="Natural language prompt describing the workflow") + build_p.set_defaults(func=stub) + + # --- workflow run --- + run_p = wf_subs.add_parser("run", help="Run a workflow") + run_p.add_argument("workflow_url", help="Workflow URL or ID") + run_p.add_argument("--input", dest="input", help="Input file or URL") + run_p.set_defaults(func=stub) + + # --- workflow deploy --- + deploy_p = wf_subs.add_parser("deploy", help="Deploy a workflow") + deploy_p.add_argument("workflow_url", help="Workflow URL or ID") + deploy_p.set_defaults(func=stub) + + # Default + wf_parser.set_defaults(func=lambda args: wf_parser.print_help()) diff --git a/roboflow/cli/handlers/workspace.py b/roboflow/cli/handlers/workspace.py new file mode 100644 index 00000000..bff3758c --- /dev/null +++ b/roboflow/cli/handlers/workspace.py @@ -0,0 +1,117 @@ +"""Workspace commands: list, get.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import argparse + + +def register(subparsers: argparse._SubParsersAction) -> None: # type: ignore[type-arg] + """Register the ``workspace`` command group.""" + ws_parser = subparsers.add_parser("workspace", help="Manage workspaces") + ws_sub = ws_parser.add_subparsers(title="workspace commands", dest="workspace_command") + + # --- workspace list --- + list_p = ws_sub.add_parser("list", help="List configured workspaces") + list_p.set_defaults(func=_list_workspaces) + + # --- workspace get --- + get_p = ws_sub.add_parser("get", help="Get workspace details") + get_p.add_argument("workspace_id", help="Workspace URL or ID") + get_p.set_defaults(func=_get_workspace) + + # Default: show help + ws_parser.set_defaults(func=lambda args: ws_parser.print_help()) + + +def _list_workspaces(args: argparse.Namespace) -> None: + import os + + from roboflow.cli._output import output + from roboflow.cli._resolver import resolve_default_workspace + from roboflow.cli._table import format_table + from roboflow.config import APP_URL, get_conditional_configuration_variable + + workspaces = get_conditional_configuration_variable("workspaces", default={}) + default_ws_url = get_conditional_configuration_variable("RF_WORKSPACE", default=None) + + # When no workspaces in config, fall back to API using available API key + if not workspaces: + api_key = getattr(args, "api_key", None) or os.getenv("ROBOFLOW_API_KEY") + ws_url = resolve_default_workspace(api_key=api_key) + if ws_url: + ws_name = ws_url + if api_key: + try: + from roboflow.adapters import rfapi + + ws_json = rfapi.get_workspace(api_key, ws_url) + ws_detail = ws_json.get("workspace", ws_json) + ws_name = ws_detail.get("name", ws_url) + except Exception: # noqa: BLE001 + pass + workspaces = {ws_url: {"url": ws_url, "name": ws_name}} + if not default_ws_url: + default_ws_url = ws_url + + rows = [] + for w in workspaces.values(): + rows.append( + { + "name": w.get("name", ""), + "url": w.get("url", ""), + "link": f"{APP_URL}/{w.get('url', '')}", + "default": "yes" if w.get("url") == default_ws_url else "", + } + ) + + table = format_table(rows, columns=["name", "url", "default"], headers=["NAME", "ID", "DEFAULT"]) + output(args, rows, text=table) + + +def _get_workspace(args: argparse.Namespace) -> None: + from roboflow.adapters import rfapi + from roboflow.adapters.rfapi import RoboflowError + from roboflow.cli._output import output, output_error + from roboflow.config import APP_URL, load_roboflow_api_key + + workspace_id = args.workspace_id + api_key = getattr(args, "api_key", None) or load_roboflow_api_key(workspace_id) + + if not api_key: + output_error( + args, + "No API key found.", + hint="Run 'roboflow auth login' or pass --api-key.", + exit_code=2, + ) + return # unreachable, but helps mypy + + try: + workspace_json = rfapi.get_workspace(api_key, workspace_id) + except RoboflowError: + output_error( + args, + f"Workspace '{workspace_id}' not found.", + hint=f"Check the workspace ID and try again. Browse workspaces at {APP_URL}.", + exit_code=3, + ) + return # unreachable, but helps mypy + + # Human-readable text for non-JSON mode + ws = workspace_json.get("workspace", workspace_json) + name = ws.get("name", workspace_id) + members = ws.get("members", 0) + projects = ws.get("projects", []) + member_count = members if isinstance(members, int) else len(members) + project_count = len(projects) if isinstance(projects, list) else projects + lines = [ + f"Workspace: {name}", + f" URL: {workspace_id}", + f" Link: {APP_URL}/{workspace_id}", + f" Members: {member_count}", + f" Projects: {project_count}", + ] + output(args, workspace_json, text="\n".join(lines)) diff --git a/roboflow/roboflowpy.py b/roboflow/roboflowpy.py index ccf8aa48..e589e9a6 100755 --- a/roboflow/roboflowpy.py +++ b/roboflow/roboflowpy.py @@ -1,655 +1,20 @@ #!/usr/bin/env python3 -import argparse -import json -import re +"""Backwards-compatibility shim. -import roboflow -from roboflow import config as roboflow_config -from roboflow import deployment -from roboflow.adapters import rfapi -from roboflow.config import APP_URL, get_conditional_configuration_variable, load_roboflow_api_key -from roboflow.models.classification import ClassificationModel -from roboflow.models.instance_segmentation import InstanceSegmentationModel -from roboflow.models.keypoint_detection import KeypointDetectionModel -from roboflow.models.object_detection import ObjectDetectionModel -from roboflow.models.semantic_segmentation import SemanticSegmentationModel +The CLI implementation has moved to :mod:`roboflow.cli`. This module +re-exports ``main`` so that the ``setup.py`` entry-point +(``roboflow=roboflow.roboflowpy:main``) continues to work without changes. +It also re-exports legacy function names so that existing scripts doing +``from roboflow.roboflowpy import _argparser`` (etc.) continue to work. +""" -def login(args): - roboflow.login(force=args.force) +from roboflow.cli import build_parser, main +# Legacy alias: some scripts import _argparser directly +_argparser = build_parser -def train(args): - rf = roboflow.Roboflow() - workspace = rf.workspace(args.workspace) # handles None internally - project = workspace.project(args.project) - version = project.version(args.version_number) - model = version.train(model_type=args.model_type, checkpoint=args.checkpoint) - print(model) - - -def _parse_url(url): - regex = r"(?:https?://)?(?:universe|app)\.roboflow\.(?:com|one)/([^/]+)/([^/]+)(?:/dataset)?(?:/(\d+))?|([^/]+)/([^/]+)(?:/(\d+))?" # noqa: E501 - match = re.match(regex, url) - if match: - organization = match.group(1) or match.group(4) - dataset = match.group(2) or match.group(5) - version = match.group(3) or match.group(6) # This can be None if not present in the URL - return organization, dataset, version - return None, None, None - - -def download(args): - rf = roboflow.Roboflow() - w, p, v = _parse_url(args.datasetUrl) - project = rf.workspace(w).project(p) - if not v: - versions = project.versions() - if not versions: - print(f"project {p} does not have any version. exiting") - exit(1) - version = versions[-1] - print(f"Version not provided. Downloading last one ({version.version})") - else: - version = project.version(int(v)) - version.download(args.format, location=args.location, overwrite=True) - - -def import_dataset(args): - api_key = load_roboflow_api_key(args.workspace) - rf = roboflow.Roboflow(api_key) - workspace = rf.workspace(args.workspace) - workspace.upload_dataset( - dataset_path=args.folder, - project_name=args.project, - num_workers=args.concurrency, - batch_name=args.batch_name, - num_retries=args.num_retries, - ) - - -def upload_image(args): - rf = roboflow.Roboflow() - workspace = rf.workspace(args.workspace) - project = workspace.project(args.project) - metadata = json.loads(args.metadata) if args.metadata else None - project.single_upload( - image_path=args.imagefile, - annotation_path=args.annotation, - annotation_labelmap=args.labelmap, - split=args.split, - num_retry_uploads=args.num_retries, - batch_name=args.batch, - tag_names=args.tag_names.split(",") if args.tag_names else [], - is_prediction=args.is_prediction, - metadata=metadata, - ) - - -def upload_model(args): - rf = roboflow.Roboflow(args.api_key) - workspace = rf.workspace(args.workspace) - - if args.version_number is not None: - # Deploy to specific version - project_id = args.project[0] if isinstance(args.project, list) else args.project - project = workspace.project(project_id) - version = project.version(args.version_number) - version.deploy(str(args.model_type), str(args.model_path), str(args.filename)) - else: - # Deploy to multiple projects - workspace.deploy_model( - model_type=str(args.model_type), - model_path=str(args.model_path), - project_ids=args.project, - model_name=str(args.model_name), - filename=str(args.filename), - ) - - -def list_projects(args): - rf = roboflow.Roboflow() - workspace = rf.workspace(args.workspace) - projects = workspace.project_list - for p in projects: - print() - print(p["name"]) - print(f" link: {APP_URL}/{p['id']}") - print(f" id: {p['id']}") - print(f" type: {p['type']}") - print(f" versions: {p['versions']}") - print(f" images: {p['images']}") - print(f" classes: {p['classes'].keys()}") - - -def list_workspaces(args): - workspaces = roboflow_config.RF_WORKSPACES.values() - rf_workspace = get_conditional_configuration_variable("RF_WORKSPACE", default=None) - for w in workspaces: - print() - print(f"{w['name']}{' (default workspace)' if w['url'] == rf_workspace else ''}") - print(f" link: {APP_URL}/{w['url']}") - print(f" id: {w['url']}") - - -def get_workspace(args): - api_key = load_roboflow_api_key(args.workspaceId) - workspace_json = rfapi.get_workspace(api_key, args.workspaceId) - print(json.dumps(workspace_json, indent=2)) - - -def run_video_inference_api(args): - rf = roboflow.Roboflow(args.api_key) - project = rf.workspace().project(args.project) - version = project.version(args.version_number) - model = project.version(version).model - - # model = VideoInferenceModel(args.api_key, project.id, version.version, project.id) # Pass dataset_id - # Pass model_id and version - job_id, signed_url, expire_time = model.predict_video( - args.video_file, - args.fps, - prediction_type="batch-video", - ) - results = model.poll_until_video_results(job_id) - with open("test_video.json", "w") as f: - json.dump(results, f) - - -def get_workspace_project_version(args): - # api_key = load_roboflow_api_key(args.workspaceId) - rf = roboflow.Roboflow(args.api_key) - workspace = rf.workspace() - print("workspace", workspace) - project = workspace.project(args.project) - print("project", project) - version = project.version(args.version_number) - print("version", version) - - -def get_project(args): - workspace_url = args.workspace or get_conditional_configuration_variable("RF_WORKSPACE", default=None) - api_key = load_roboflow_api_key(workspace_url) - dataset_json = rfapi.get_project(api_key, workspace_url, args.projectId) - print(json.dumps(dataset_json, indent=2)) - - -def infer(args): - workspace_url = args.workspace or get_conditional_configuration_variable("RF_WORKSPACE", default=None) - api_key = load_roboflow_api_key(workspace_url) - project_url = f"{workspace_url}/{args.model}" - projectType = args.type - if not projectType: - projectId, _ = args.model.split("/") - dataset_json = rfapi.get_project(api_key, workspace_url, projectId) - projectType = dataset_json["project"]["type"] - modelClass = { - "object-detection": ObjectDetectionModel, - "classification": ClassificationModel, - "instance-segmentation": InstanceSegmentationModel, - "semantic-segmentation": SemanticSegmentationModel, - "keypoint-detection": KeypointDetectionModel, - }[projectType] - model = modelClass(api_key, project_url) - kwargs = {} - if args.confidence is not None and projectType in [ - "object-detection", - "instance-segmentation", - "semantic-segmentation", - ]: - kwargs["confidence"] = int(args.confidence * 100) - if args.overlap is not None and projectType == "object-detection": - kwargs["overlap"] = int(args.overlap * 100) - group = model.predict(args.file, **kwargs) - print(group) - - -def search_export(args): - rf = roboflow.Roboflow() - workspace = rf.workspace(args.workspace) - result = workspace.search_export( - query=args.query, - format=args.format, - location=args.location, - dataset=args.dataset, - annotation_group=args.annotation_group, - name=args.name, - extract_zip=not args.no_extract, - ) - print(result) - - -def _argparser(): - parser = argparse.ArgumentParser(description="Welcome to the roboflow CLI: computer vision at your fingertips 🪄") - subparsers = parser.add_subparsers(title="subcommands") - _add_login_parser(subparsers) - _add_download_parser(subparsers) - _add_train_parser(subparsers) - _add_upload_parser(subparsers) - _add_import_parser(subparsers) - _add_infer_parser(subparsers) - _add_projects_parser(subparsers) - _add_workspaces_parser(subparsers) - _add_upload_model_parser(subparsers) - _add_get_workspace_project_version_parser(subparsers) - _add_run_video_inference_api_parser(subparsers) - deployment.add_deployment_parser(subparsers) - _add_whoami_parser(subparsers) - _add_search_export_parser(subparsers) - - parser.add_argument("-v", "--version", help="show version info", action="store_true") - parser.set_defaults(func=show_version) - - return parser - - -def show_version(args): - print(roboflow.__version__) - - -def show_whoami(args): - RF_WORKSPACES = get_conditional_configuration_variable("workspaces", default={}) - workspaces_by_url = {w["url"]: w for w in RF_WORKSPACES.values()} - default_workspace_url = get_conditional_configuration_variable("RF_WORKSPACE", default=None) - default_workspace = workspaces_by_url.get(default_workspace_url, None) - default_workspace["apiKey"] = "**********" - print(json.dumps(default_workspace, indent=2)) - - -def _add_whoami_parser(subparsers): - download_parser = subparsers.add_parser("whoami", help="show current user info") - download_parser.set_defaults(func=show_whoami) - - -def _add_download_parser(subparsers): - download_parser = subparsers.add_parser( - "download", - help="Download a dataset version from your workspace or Roboflow Universe.", - ) - download_parser.add_argument("datasetUrl", help="Dataset URL (e.g., `roboflow-100/cells-uyemf/2`)") - download_parser.add_argument( - "-f", - dest="format", - default="voc", - help="Specify the format to download the version. Available options: [coco, " - "yolov5pytorch, yolov7pytorch, my-yolov6, darknet, voc, tfrecord, " - "createml, clip, multiclass, coco-segmentation, yolo5-obb, " - "png-mask-semantic, yolov8, yolov9]", - ) - download_parser.add_argument("-l", dest="location", help="Location to download the dataset") - download_parser.set_defaults(func=download) - - -def _add_upload_parser(subparsers): - upload_parser = subparsers.add_parser("upload", help="Upload a single image to a dataset") - upload_parser.add_argument( - "imagefile", - help="path to image file", - ) - upload_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - upload_parser.add_argument( - "-p", - dest="project", - help="project_id to upload the image into", - ) - upload_parser.add_argument( - "-a", - dest="annotation", - help="path to annotation file (optional)", - ) - upload_parser.add_argument( - "-m", - dest="labelmap", - help="path to labelmap file (optional)", - ) - upload_parser.add_argument( - "-s", - dest="split", - help="split set (train, valid, test) - optional", - default="train", - ) - upload_parser.add_argument( - "-r", - dest="num_retries", - help="Retry failed uploads this many times (default: 0)", - type=int, - default=0, - ) - upload_parser.add_argument( - "-b", - dest="batch", - help="Batch name to upload to (optional)", - ) - upload_parser.add_argument( - "-t", - dest="tag_names", - help="Tag names to apply to the image (optional)", - ) - upload_parser.add_argument( - "-i", - dest="is_prediction", - help="Whether this upload is a prediction (optional)", - action="store_true", - ) - upload_parser.add_argument( - "-M", - "--metadata", - dest="metadata", - help='JSON string of metadata to attach to the image (e.g. \'{"camera_id":"cam001"}\')', - ) - upload_parser.set_defaults(func=upload_image) - - -def _add_train_parser(subparsers): - train_parser = subparsers.add_parser("train", help="Train a model for a dataset version") - train_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - train_parser.add_argument( - "-p", - dest="project", - help="project_id to train the model for", - ) - train_parser.add_argument( - "-v", - dest="version_number", - type=int, - help="version number to train", - ) - train_parser.add_argument( - "-t", - dest="model_type", - help="type of the model to train (e.g., rfdetr-nano, yolov8n)", - ) - train_parser.add_argument( - "--checkpoint", - dest="checkpoint", - help="checkpoint to resume training from", - ) - train_parser.set_defaults(func=train) - - -def _add_import_parser(subparsers): - import_parser = subparsers.add_parser("import", help="Import a dataset from a local folder") - import_parser.add_argument( - "folder", - help="filesystem path to a folder that contains your dataset", - ) - import_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - import_parser.add_argument( - "-p", - dest="project", - help="project will be created if it does not exist", - ) - import_parser.add_argument( - "-c", - dest="concurrency", - type=int, - help="how many image uploads to perform concurrently (default: 10)", - default=10, - ) - import_parser.add_argument( - "-n", - dest="batch_name", - help="name of batch to upload to within project", - ) - import_parser.add_argument( - "-r", dest="num_retries", type=int, help="Retry failed uploads this many times (default=0)", default=0 - ) - import_parser.set_defaults(func=import_dataset) - - -def _add_projects_parser(subparsers): - project_parser = subparsers.add_parser( - "project", - help="project related commands. type 'roboflow project' to see detailed command help", - ) - projectsubparsers = project_parser.add_subparsers(title="project subcommands") - projectlist_parser = projectsubparsers.add_parser("list", help="list projects") - projectlist_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - projectlist_parser.set_defaults(func=list_projects) - projectget_parser = projectsubparsers.add_parser("get", help="show detailed info for a project") - projectget_parser.add_argument( - "projectId", - help="project ID", - ) - projectget_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - projectget_parser.set_defaults(func=get_project) - - -def _add_workspaces_parser(subparsers): - workspace_parser = subparsers.add_parser( - "workspace", - help="workspace related commands. type 'roboflow workspace' to see detailed command help", - ) - workspacesubparsers = workspace_parser.add_subparsers(title="workspace subcommands") - workspacelist_parser = workspacesubparsers.add_parser("list", help="list workspaces") - workspacelist_parser.set_defaults(func=list_workspaces) - workspaceget_parser = workspacesubparsers.add_parser("get", help="show detailed info for a workspace") - workspaceget_parser.add_argument( - "workspaceId", - help="project ID", - ) - workspaceget_parser.set_defaults(func=get_workspace) - - -def _add_run_video_inference_api_parser(subparsers): - run_video_inference_api_parser = subparsers.add_parser( - "run_video_inference_api", - help="run video inference api", - ) - - run_video_inference_api_parser.add_argument( - "-a", - dest="api_key", - help="api_key", - ) - run_video_inference_api_parser.add_argument( - "-p", - dest="project", - help="project_id to upload the image into", - ) - run_video_inference_api_parser.add_argument( - "-v", - dest="version_number", - type=int, - help="version number to upload the model to", - ) - run_video_inference_api_parser.add_argument( - "-f", - dest="video_file", - help="path to video file", - ) - run_video_inference_api_parser.add_argument( - "-fps", - dest="fps", - type=int, - help="fps", - default=5, - ) - run_video_inference_api_parser.set_defaults(func=run_video_inference_api) - - -def _add_infer_parser(subparsers): - infer_parser = subparsers.add_parser( - "infer", - help="perform inference on an image", - ) - infer_parser.add_argument( - "file", - help="filesystem path to an image file", - ) - infer_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - infer_parser.add_argument( - "-m", - dest="model", - help="model id (id of a version with trained model e.g. my-project/3)", - ) - infer_parser.add_argument( - "-c", - dest="confidence", - type=float, - help="specify a confidence threshold between 0.0 and 1.0, default is 0.5" - "(only applies to object-detection models)", - default=0.5, - ) - infer_parser.add_argument( - "-o", - dest="overlap", - type=float, - help="specify an overlap threshold between 0.0 and 1.0, default is 0.5" - "(only applies to object-detection models)", - default=0.5, - ) - infer_parser.add_argument( - "-t", - dest="type", - help="specify the model type to skip api call to look it up", - choices=[ - "object-detection", - "classification", - "instance-segmentation", - "semantic-segmentation", - ], - ) - infer_parser.set_defaults(func=infer) - - -def _add_upload_model_parser(subparsers): - upload_model_parser = subparsers.add_parser( - "upload_model", - help="Upload a trained model to Roboflow", - ) - upload_model_parser.add_argument( - "-a", - dest="api_key", - help="api_key", - ) - upload_model_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - upload_model_parser.add_argument( - "-p", - dest="project", - action="append", # Allow multiple projects - help="project_id to upload the model into (can be specified multiple times)", - ) - upload_model_parser.add_argument( - "-v", - dest="version_number", - type=int, - help="version number to upload the model to (optional)", - default=None, - ) - upload_model_parser.add_argument( - "-t", - dest="model_type", - help="type of the model (e.g., yolov8, yolov5)", - ) - upload_model_parser.add_argument( - "-m", - dest="model_path", - help="path to the trained model file", - ) - upload_model_parser.add_argument( - "-f", - dest="filename", - default="weights/best.pt", - help="name of the model file", - ) - upload_model_parser.add_argument( - "-n", - dest="model_name", - help="name of the model", - ) - upload_model_parser.set_defaults(func=upload_model) - - -def _add_get_workspace_project_version_parser(subparsers): - workspace_project_version_parser = subparsers.add_parser( - "get_workspace_info", - help="get workspace project version info", - ) - workspace_project_version_parser.add_argument( - "-a", - dest="api_key", - help="api_key", - ) - workspace_project_version_parser.add_argument( - "-w", - dest="workspace", - help="specify a workspace url or id (will use default workspace if not specified)", - ) - workspace_project_version_parser.add_argument( - "-p", - dest="project", - help="project_id to upload the model into", - ) - workspace_project_version_parser.add_argument( - "-v", - dest="version_number", - type=int, - help="version number to upload the model to", - ) - workspace_project_version_parser.set_defaults(func=get_workspace_project_version) - - -def _add_search_export_parser(subparsers): - p = subparsers.add_parser("search-export", help="Export search results as a dataset") - p.add_argument("query", help="Search query (e.g. 'tag:annotate' or '*')") - p.add_argument("-f", dest="format", default="coco", help="Annotation format (default: coco)") - p.add_argument("-w", dest="workspace", help="Workspace url or id (uses default workspace if not specified)") - p.add_argument("-l", dest="location", help="Local directory to save the export") - p.add_argument("-d", dest="dataset", help="Limit export to a specific dataset (project slug)") - p.add_argument("-g", dest="annotation_group", help="Limit export to a specific annotation group") - p.add_argument("-n", dest="name", help="Optional name for the export") - p.add_argument("--no-extract", dest="no_extract", action="store_true", help="Skip extraction, keep the zip file") - p.set_defaults(func=search_export) - - -def _add_login_parser(subparsers): - login_parser = subparsers.add_parser("login", help="Log in to Roboflow") - login_parser.add_argument( - "-f", - dest="force", - help="force login", - action="store_true", - ) - login_parser.set_defaults(func=login) - - -def main(): - parser = _argparser() - args = parser.parse_args() - if hasattr(args, "func"): - args.func(args) - else: - parser.print_help() - +__all__ = ["main", "_argparser"] if __name__ == "__main__": main() diff --git a/tests/cli/__init__.py b/tests/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cli/test_annotation_handler.py b/tests/cli/test_annotation_handler.py new file mode 100644 index 00000000..bcd9aa4b --- /dev/null +++ b/tests/cli/test_annotation_handler.py @@ -0,0 +1,114 @@ +"""Unit tests for roboflow.cli.handlers.annotation.""" + +import argparse +import io +import sys +import types +import unittest + + +def _build_annotation_parser(): + """Build a minimal parser with just the annotation handler registered.""" + parser = argparse.ArgumentParser() + parser.add_argument("--json", "-j", action="store_true", default=False) + parser.add_argument("--api-key", "-k", dest="api_key", default=None) + parser.add_argument("--workspace", "-w", dest="workspace", default=None) + parser.add_argument("--quiet", "-q", action="store_true", default=False) + sub = parser.add_subparsers(title="commands", dest="command") + + from roboflow.cli.handlers.annotation import register + + register(sub) + return parser + + +class TestAnnotationParserRegistration(unittest.TestCase): + """Verify the annotation handler registers its subcommands.""" + + def test_annotation_subcommand_exists(self): + parser = _build_annotation_parser() + args = parser.parse_args(["annotation", "batch", "list", "-p", "proj"]) + self.assertEqual(args.project, "proj") + self.assertTrue(callable(args.func)) + + def test_annotation_batch_get(self): + parser = _build_annotation_parser() + args = parser.parse_args(["annotation", "batch", "get", "batch-1", "-p", "proj"]) + self.assertEqual(args.batch_id, "batch-1") + self.assertEqual(args.project, "proj") + + def test_annotation_job_list(self): + parser = _build_annotation_parser() + args = parser.parse_args(["annotation", "job", "list", "-p", "proj"]) + self.assertEqual(args.project, "proj") + + def test_annotation_job_get(self): + parser = _build_annotation_parser() + args = parser.parse_args(["annotation", "job", "get", "job-1", "-p", "proj"]) + self.assertEqual(args.job_id, "job-1") + + def test_annotation_job_create(self): + parser = _build_annotation_parser() + args = parser.parse_args( + [ + "annotation", + "job", + "create", + "-p", + "proj", + "--name", + "my-job", + "--batch", + "batch-1", + "--assignees", + "a@b.com,c@d.com", + ] + ) + self.assertEqual(args.name, "my-job") + self.assertEqual(args.batch, "batch-1") + self.assertEqual(args.assignees, "a@b.com,c@d.com") + + +class TestAnnotationStub(unittest.TestCase): + """Verify stub handlers print not-yet-implemented.""" + + def test_stub_prints_message(self): + from roboflow.cli._output import stub as _stub + + args = types.SimpleNamespace(json=False) + + buf = io.StringIO() + old = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit) as ctx: + _stub(args) + self.assertEqual(ctx.exception.code, 1) + finally: + sys.stderr = old + + self.assertIn("not yet implemented", buf.getvalue()) + + def test_stub_json_mode(self): + import json + + from roboflow.cli._output import stub as _stub + + args = types.SimpleNamespace(json=True) + + buf = io.StringIO() + old = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit) as ctx: + _stub(args) + self.assertEqual(ctx.exception.code, 1) + finally: + sys.stderr = old + + result = json.loads(buf.getvalue()) + self.assertIn("not yet implemented", result["error"]["message"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_auth.py b/tests/cli/test_auth.py new file mode 100644 index 00000000..83ee87ee --- /dev/null +++ b/tests/cli/test_auth.py @@ -0,0 +1,70 @@ +"""Tests for the auth CLI handler.""" + +import unittest + + +class TestAuthRegistration(unittest.TestCase): + """Verify auth handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.auth import register + + self.assertTrue(callable(register)) + + def test_auth_subcommand_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["auth", "status"]) + self.assertIsNotNone(args.func) + + def test_auth_login_defaults(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["auth", "login"]) + self.assertFalse(args.force) + self.assertIsNone(args.login_api_key) + self.assertIsNone(args.login_workspace) + + def test_auth_login_flags(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["auth", "login", "--api-key", "test123", "--force"]) + self.assertEqual(args.login_api_key, "test123") + self.assertTrue(args.force) + + def test_auth_set_workspace_positional(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["auth", "set-workspace", "my-ws"]) + self.assertEqual(args.workspace_id, "my-ws") + + def test_auth_logout_has_func(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["auth", "logout"]) + self.assertIsNotNone(args.func) + + def test_handler_functions_exist(self) -> None: + from roboflow.cli.handlers import auth + + # All handler functions should be importable + self.assertTrue(callable(auth._login)) + self.assertTrue(callable(auth._status)) + self.assertTrue(callable(auth._set_workspace)) + self.assertTrue(callable(auth._logout)) + + def test_mask_key(self) -> None: + from roboflow.cli.handlers.auth import _mask_key + + self.assertEqual(_mask_key("abcdefgh"), "ab****gh") + self.assertEqual(_mask_key("ab"), "****") + self.assertEqual(_mask_key(""), "****") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_backwards_compat.py b/tests/cli/test_backwards_compat.py new file mode 100644 index 00000000..feabc0ff --- /dev/null +++ b/tests/cli/test_backwards_compat.py @@ -0,0 +1,63 @@ +"""Tests that the roboflowpy.py backwards-compatibility shim works. + +Ensures that existing scripts and integrations that import from the old +monolithic module continue to work after the CLI modularization. +""" + +import unittest + + +class TestRoboflowpyShim(unittest.TestCase): + """Verify the roboflowpy.py shim re-exports work.""" + + def test_main_importable(self) -> None: + from roboflow.roboflowpy import main + + self.assertTrue(callable(main)) + + def test_argparser_importable(self) -> None: + """debugme.py imports _argparser — this must not break.""" + from roboflow.roboflowpy import _argparser + + self.assertTrue(callable(_argparser)) + + def test_argparser_returns_parser(self) -> None: + import argparse + + from roboflow.roboflowpy import _argparser + + parser = _argparser() + self.assertIsInstance(parser, argparse.ArgumentParser) + + def test_argparser_has_subcommands(self) -> None: + """The parser returned by _argparser should have the new CLI subcommands.""" + from roboflow.roboflowpy import _argparser + + parser = _argparser() + # Parse a known new-style command (--json must come before subcommand + # when using parse_args directly; _reorder_argv handles end-position + # in the real main() entry point) + args = parser.parse_args(["--json", "project", "list"]) + self.assertTrue(args.json) + + def test_argparser_has_legacy_aliases(self) -> None: + """Legacy command names should still parse.""" + from roboflow.roboflowpy import _argparser + + parser = _argparser() + + # 'login' was a top-level command in the old CLI + args = parser.parse_args(["login"]) + self.assertIsNotNone(args.func) + + # 'whoami' was a top-level command + args = parser.parse_args(["whoami"]) + self.assertIsNotNone(args.func) + + # 'download' was a top-level command + args = parser.parse_args(["download", "ws/proj/1"]) + self.assertIsNotNone(args.func) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_batch_handler.py b/tests/cli/test_batch_handler.py new file mode 100644 index 00000000..0162a508 --- /dev/null +++ b/tests/cli/test_batch_handler.py @@ -0,0 +1,48 @@ +"""Tests for the batch CLI handler.""" + +import unittest + + +class TestBatchRegistration(unittest.TestCase): + """Verify batch handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.batch import register + + self.assertTrue(callable(register)) + + def test_batch_create_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["batch", "create", "--workflow", "wf-1", "--input", "/tmp/imgs"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.workflow, "wf-1") + self.assertEqual(args.input, "/tmp/imgs") + + def test_batch_status_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["batch", "status", "job-abc"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.job_id, "job-abc") + + def test_batch_list_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["batch", "list"]) + self.assertIsNotNone(args.func) + + def test_batch_results_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["batch", "results", "job-abc"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.job_id, "job-abc") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_completion_handler.py b/tests/cli/test_completion_handler.py new file mode 100644 index 00000000..387f8ee2 --- /dev/null +++ b/tests/cli/test_completion_handler.py @@ -0,0 +1,37 @@ +"""Tests for the completion CLI handler.""" + +import unittest + + +class TestCompletionRegistration(unittest.TestCase): + """Verify completion handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.completion import register + + self.assertTrue(callable(register)) + + def test_completion_bash_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["completion", "bash"]) + self.assertIsNotNone(args.func) + + def test_completion_zsh_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["completion", "zsh"]) + self.assertIsNotNone(args.func) + + def test_completion_fish_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["completion", "fish"]) + self.assertIsNotNone(args.func) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_deployment_handler.py b/tests/cli/test_deployment_handler.py new file mode 100644 index 00000000..48d90dcc --- /dev/null +++ b/tests/cli/test_deployment_handler.py @@ -0,0 +1,149 @@ +"""Tests for the deployment CLI handler.""" + +import io +import unittest +from unittest.mock import patch + + +class TestDeploymentRegistration(unittest.TestCase): + """Verify deployment handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.deployment import register + + self.assertTrue(callable(register)) + + def test_deployment_subcommand_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "list"]) + self.assertIsNotNone(args.func) + + def test_deployment_add_hidden_alias(self) -> None: + """Legacy 'add' alias should still work (hidden from help).""" + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "add", "mydepl", "-m", "gpu-small", "-e", "test@example.com"]) + self.assertIsNotNone(args.func) + + def test_deployment_create_canonical(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "create", "mydepl", "-m", "gpu-small", "-e", "test@example.com"]) + self.assertIsNotNone(args.func) + + def test_deployment_machine_type_canonical(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "machine-type"]) + self.assertIsNotNone(args.func) + + def test_deployment_machine_type_legacy_alias(self) -> None: + """Legacy 'machine_type' alias should still work.""" + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "machine_type"]) + self.assertIsNotNone(args.func) + + def test_deployment_get_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "get", "mydepl"]) + self.assertIsNotNone(args.func) + + def test_deployment_delete_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "delete", "mydepl"]) + self.assertIsNotNone(args.func) + + def test_deployment_subparser_registered(self) -> None: + """The 'deployment' subparser should be registered on the root parser.""" + from roboflow.cli import build_parser + + parser = build_parser() + # Find the subparsers action + for action in parser._actions: + if isinstance(action, type(parser._subparsers._group_actions[0])): + self.assertIn("deployment", action.choices) + return + self.fail("No subparsers action found") + + def test_deployment_usage_canonical(self) -> None: + """The new 'usage' command accepts optional deployment name.""" + from roboflow.cli import build_parser + + parser = build_parser() + # Workspace-wide usage (no deployment name) + args = parser.parse_args(["deployment", "usage"]) + self.assertIsNotNone(args.func) + self.assertIsNone(args.deployment_name) + + # Deployment-specific usage + args = parser.parse_args(["deployment", "usage", "mydepl"]) + self.assertEqual(args.deployment_name, "mydepl") + + def test_deployment_usage_legacy_aliases(self) -> None: + """Legacy usage_workspace and usage_deployment aliases should still work.""" + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["deployment", "usage_workspace"]) + self.assertIsNotNone(args.func) + + args = parser.parse_args(["deployment", "usage_deployment", "mydepl"]) + self.assertIsNotNone(args.func) + + +class TestDeploymentErrorWrapping(unittest.TestCase): + """Verify deployment errors produce structured output.""" + + def test_wrapped_error_uses_structured_output(self) -> None: + """Deployment errors should go through output_error, not bare print.""" + from roboflow.cli.handlers.deployment import _wrap + + def _fake_handler(args: object) -> None: + print("401: Unauthorized (invalid api_key)") + raise SystemExit(401) + + import argparse + + ns = argparse.Namespace(json=True, api_key=None, workspace=None, quiet=False) + wrapped = _wrap(_fake_handler) + stderr = io.StringIO() + with patch("sys.stderr", stderr): + with self.assertRaises(SystemExit) as ctx: + wrapped(ns) + self.assertLessEqual(ctx.exception.code, 3) + import json + + err_output = stderr.getvalue().strip() + parsed = json.loads(err_output) + self.assertIn("error", parsed) + + def test_wrapped_success_prints_output(self) -> None: + """On success, wrapped func should replay captured stdout.""" + from roboflow.cli.handlers.deployment import _wrap + + def _fake_handler(args: object) -> None: + print('{"machines": []}') + + import argparse + + ns = argparse.Namespace(json=False, api_key=None, workspace=None, quiet=False) + wrapped = _wrap(_fake_handler) + captured = io.StringIO() + with patch("sys.stdout", captured): + wrapped(ns) + self.assertIn('{"machines": []}', captured.getvalue()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_discovery.py b/tests/cli/test_discovery.py new file mode 100644 index 00000000..38f33d89 --- /dev/null +++ b/tests/cli/test_discovery.py @@ -0,0 +1,159 @@ +"""Tests that the CLI auto-discovery mechanism works correctly.""" + +import unittest + + +class TestCLIDiscovery(unittest.TestCase): + """Verify build_parser discovers handlers and creates expected subcommands.""" + + def test_build_parser_returns_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + self.assertIsNotNone(parser) + + def test_parser_has_global_flags(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + # Parse with no args should work (defaults to help / version) + args = parser.parse_args(["--json"]) + self.assertTrue(args.json) + + def test_version_flag(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["--version"]) + self.assertTrue(args.version) + + def test_handlers_package_importable(self) -> None: + import roboflow.cli.handlers + + self.assertIsNotNone(roboflow.cli.handlers) + + def test_output_module_importable(self) -> None: + from roboflow.cli._output import output, output_error + + self.assertTrue(callable(output)) + self.assertTrue(callable(output_error)) + + def test_resolver_module_importable(self) -> None: + from roboflow.cli._resolver import resolve_resource + + self.assertTrue(callable(resolve_resource)) + + def test_table_module_importable(self) -> None: + from roboflow.cli._table import format_table + + self.assertTrue(callable(format_table)) + + +class TestReorderArgv(unittest.TestCase): + """Verify _reorder_argv moves global flags before subcommands.""" + + def _reorder(self, argv: list[str]) -> list[str]: + from roboflow.cli import _reorder_argv + + return _reorder_argv(argv) + + def test_no_flags(self) -> None: + self.assertEqual(self._reorder(["project", "list"]), ["project", "list"]) + + def test_empty(self) -> None: + self.assertEqual(self._reorder([]), []) + + def test_bool_flag_after_subcommand(self) -> None: + result = self._reorder(["project", "list", "--json"]) + self.assertEqual(result, ["--json", "project", "list"]) + + def test_bool_flag_already_first(self) -> None: + result = self._reorder(["--json", "project", "list"]) + self.assertEqual(result, ["--json", "project", "list"]) + + def test_short_bool_flag(self) -> None: + result = self._reorder(["project", "list", "-j"]) + self.assertEqual(result, ["-j", "project", "list"]) + + def test_value_flag_after_subcommand(self) -> None: + result = self._reorder(["project", "list", "--api-key", "abc123"]) + self.assertEqual(result, ["--api-key", "abc123", "project", "list"]) + + def test_short_value_flag(self) -> None: + result = self._reorder(["project", "list", "-k", "abc123"]) + self.assertEqual(result, ["-k", "abc123", "project", "list"]) + + def test_multiple_flags_mixed(self) -> None: + # -w is NOT reordered (collides with deployment's -w/--wait_on_pending) + # but --workspace (long form) and --json are reordered + result = self._reorder(["project", "list", "--json", "-w", "my-ws"]) + self.assertEqual(result, ["--json", "project", "list", "-w", "my-ws"]) + + def test_value_flag_at_end_without_value(self) -> None: + """A value flag at the very end with no following arg should still be moved.""" + result = self._reorder(["project", "list", "--api-key"]) + self.assertEqual(result, ["--api-key", "project", "list"]) + + def test_non_global_flags_preserved(self) -> None: + """Flags not in the global set stay in place.""" + result = self._reorder(["image", "upload", "--project", "my-proj", "--json"]) + self.assertEqual(result, ["--json", "image", "upload", "--project", "my-proj"]) + + def test_quiet_and_version_flags(self) -> None: + result = self._reorder(["project", "list", "--quiet", "--version"]) + self.assertEqual(result, ["--quiet", "--version", "project", "list"]) + + def test_workspace_flag(self) -> None: + result = self._reorder(["project", "list", "--workspace", "ws-1"]) + self.assertEqual(result, ["--workspace", "ws-1", "project", "list"]) + + def test_preserves_subcommand_positional_args(self) -> None: + result = self._reorder(["version", "download", "ws/proj/3", "--json", "-f", "yolov8"]) + self.assertEqual(result, ["--json", "version", "download", "ws/proj/3", "-f", "yolov8"]) + + +class TestAliases(unittest.TestCase): + """Verify top-level aliases parse correctly and delegate to the right handler.""" + + def _parse(self, argv: list[str]): + from roboflow.cli import build_parser + + parser = build_parser() + return parser.parse_args(argv) + + def test_login_alias_exists(self) -> None: + args = self._parse(["login"]) + self.assertIsNotNone(args.func) + + def test_whoami_alias_exists(self) -> None: + args = self._parse(["whoami"]) + self.assertIsNotNone(args.func) + + def test_upload_alias_exists(self) -> None: + args = self._parse(["upload", "img.jpg", "-p", "my-project"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.path, "img.jpg") + self.assertEqual(args.project, "my-project") + + def test_import_alias_exists(self) -> None: + args = self._parse(["import", "/data/images", "-p", "my-project"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.path, "/data/images") + self.assertEqual(args.project, "my-project") + + def test_download_alias_parses_url(self) -> None: + """Regression: download alias must use url_or_id as dest, not datasetUrl.""" + args = self._parse(["download", "my-ws/my-proj/3"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.url_or_id, "my-ws/my-proj/3") + + def test_download_alias_delegates_to_version_download(self) -> None: + """The download alias should use the same handler as 'version download'.""" + from roboflow.cli.handlers.version import _download + + args = self._parse(["download", "my-ws/my-proj/3"]) + self.assertIs(args.func, _download) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_folder_handler.py b/tests/cli/test_folder_handler.py new file mode 100644 index 00000000..c75ba939 --- /dev/null +++ b/tests/cli/test_folder_handler.py @@ -0,0 +1,53 @@ +"""Tests for the folder CLI handler.""" + +import unittest + + +class TestFolderRegistration(unittest.TestCase): + """Verify folder handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.folder import register + + self.assertTrue(callable(register)) + + def test_folder_list_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["folder", "list"]) + self.assertIsNotNone(args.func) + + def test_folder_get_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["folder", "get", "folder-123"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.folder_id, "folder-123") + + def test_folder_create_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["folder", "create", "My Folder"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.name, "My Folder") + + def test_folder_update_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["folder", "update", "folder-123", "--name", "New Name"]) + self.assertIsNotNone(args.func) + + def test_folder_delete_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["folder", "delete", "folder-123"]) + self.assertIsNotNone(args.func) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_image_handler.py b/tests/cli/test_image_handler.py new file mode 100644 index 00000000..3be1454b --- /dev/null +++ b/tests/cli/test_image_handler.py @@ -0,0 +1,354 @@ +"""Unit tests for roboflow.cli.handlers.image.""" + +import argparse +import io +import json +import os +import sys +import tempfile +import types +import unittest +from unittest.mock import MagicMock, patch + + +def _make_args(**overrides): + defaults = { + "json": False, + "api_key": "test-key", + "workspace": "test-ws", + "quiet": False, + } + defaults.update(overrides) + return types.SimpleNamespace(**defaults) + + +def _build_image_parser(): + """Build a minimal parser with just the image handler registered.""" + parser = argparse.ArgumentParser() + parser.add_argument("--json", "-j", action="store_true", default=False) + parser.add_argument("--api-key", "-k", dest="api_key", default=None) + parser.add_argument("--workspace", "-w", dest="workspace", default=None) + parser.add_argument("--quiet", "-q", action="store_true", default=False) + sub = parser.add_subparsers(title="commands", dest="command") + + from roboflow.cli.handlers.image import register + + register(sub) + return parser + + +class TestImageParserRegistration(unittest.TestCase): + """Verify the image handler registers its subcommands.""" + + def test_image_subcommand_exists(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "upload", "test.jpg", "-p", "my-proj"]) + self.assertEqual(args.path, "test.jpg") + self.assertEqual(args.project, "my-proj") + + def test_image_upload_defaults(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "upload", "test.jpg", "-p", "proj"]) + self.assertEqual(args.split, "train") + self.assertEqual(args.concurrency, 10) + self.assertEqual(args.retries, 0) + self.assertFalse(args.is_prediction) + + def test_image_get_parser(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "get", "img-123", "-p", "proj"]) + self.assertEqual(args.image_id, "img-123") + self.assertEqual(args.project, "proj") + + def test_image_search_parser(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "search", "tag:review", "-p", "proj", "--limit", "10"]) + self.assertEqual(args.query, "tag:review") + self.assertEqual(args.limit, 10) + + def test_image_tag_parser(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "tag", "img-1", "-p", "proj", "--add", "a,b", "--remove", "c"]) + self.assertEqual(args.image_id, "img-1") + self.assertEqual(args.add_tags, "a,b") + self.assertEqual(args.remove_tags, "c") + + def test_image_delete_parser(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "delete", "id1,id2", "-p", "proj"]) + self.assertEqual(args.image_ids, "id1,id2") + + def test_image_annotate_parser(self): + parser = _build_image_parser() + args = parser.parse_args(["image", "annotate", "img-1", "-p", "proj", "--annotation-file", "ann.txt"]) + self.assertEqual(args.image_id, "img-1") + self.assertEqual(args.annotation_file, "ann.txt") + + +class TestImageUploadSingle(unittest.TestCase): + """Test the single-file upload path.""" + + @patch("roboflow.Roboflow") + def test_upload_single_file(self, mock_rf_cls): + from roboflow.cli.handlers.image import _handle_upload + + with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f: + f.write(b"fake-image") + tmp = f.name + try: + mock_project = MagicMock() + mock_rf_cls.return_value.workspace.return_value.project.return_value = mock_project + + args = _make_args( + path=tmp, + project="proj", + annotation=None, + split="train", + batch=None, + tag=None, + metadata=None, + concurrency=10, + retries=0, + labelmap=None, + is_prediction=False, + ) + + buf = io.StringIO() + old = sys.stdout + sys.stdout = buf + try: + _handle_upload(args) + finally: + sys.stdout = old + + mock_project.single_upload.assert_called_once() + self.assertIn("Uploaded", buf.getvalue()) + finally: + os.unlink(tmp) + + @patch("roboflow.Roboflow") + def test_upload_single_json_mode(self, mock_rf_cls): + from roboflow.cli.handlers.image import _handle_upload + + with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f: + f.write(b"fake-image") + tmp = f.name + try: + mock_project = MagicMock() + mock_rf_cls.return_value.workspace.return_value.project.return_value = mock_project + + args = _make_args( + json=True, + path=tmp, + project="proj", + annotation=None, + split="train", + batch=None, + tag=None, + metadata=None, + concurrency=10, + retries=0, + labelmap=None, + is_prediction=False, + ) + + buf = io.StringIO() + old = sys.stdout + sys.stdout = buf + try: + _handle_upload(args) + finally: + sys.stdout = old + + result = json.loads(buf.getvalue()) + self.assertEqual(result["status"], "uploaded") + finally: + os.unlink(tmp) + + +class TestImageUploadDirectory(unittest.TestCase): + """Test the directory import path.""" + + @patch("roboflow.Roboflow") + def test_upload_directory(self, mock_rf_cls): + from roboflow.cli.handlers.image import _handle_upload + + with tempfile.TemporaryDirectory() as tmpdir: + # Create some fake images + for name in ["a.jpg", "b.png", "c.txt"]: + with open(os.path.join(tmpdir, name), "w") as f: + f.write("x") + + mock_ws = MagicMock() + mock_rf_cls.return_value.workspace.return_value = mock_ws + + args = _make_args( + json=True, + path=tmpdir, + project="proj", + annotation=None, + split="train", + batch=None, + tag=None, + metadata=None, + concurrency=5, + retries=1, + labelmap=None, + is_prediction=False, + ) + + buf = io.StringIO() + old = sys.stdout + sys.stdout = buf + try: + _handle_upload(args) + finally: + sys.stdout = old + + mock_ws.upload_dataset.assert_called_once() + result = json.loads(buf.getvalue()) + self.assertEqual(result["status"], "imported") + self.assertEqual(result["count"], 2) # .jpg and .png only + + +class TestImageDelete(unittest.TestCase): + """Test the delete handler.""" + + @patch("roboflow.cli.handlers.image.rfapi") + def test_delete_images(self, mock_rfapi): + from roboflow.cli.handlers.image import _handle_delete + + mock_rfapi.workspace_delete_images.return_value = {"deleted": 2, "skipped": 0} + + args = _make_args(json=True, image_ids="id1,id2", project="proj") + + buf = io.StringIO() + old = sys.stdout + sys.stdout = buf + try: + _handle_delete(args) + finally: + sys.stdout = old + + mock_rfapi.workspace_delete_images.assert_called_once_with( + api_key="test-key", + workspace_url="test-ws", + image_ids=["id1", "id2"], + ) + result = json.loads(buf.getvalue()) + self.assertEqual(result["deleted"], 2) + + +class TestImageSearch(unittest.TestCase): + """Test the search handler.""" + + @patch("roboflow.cli.handlers.image.rfapi") + def test_search(self, mock_rfapi): + from roboflow.cli.handlers.image import _handle_search + + mock_rfapi.workspace_search.return_value = {"results": [], "total": 0} + + args = _make_args(json=True, query="tag:test", project="proj", limit=10, cursor=None) + + buf = io.StringIO() + old = sys.stdout + sys.stdout = buf + try: + _handle_search(args) + finally: + sys.stdout = old + + mock_rfapi.workspace_search.assert_called_once() + result = json.loads(buf.getvalue()) + self.assertEqual(result["total"], 0) + + +class TestImageAnnotate(unittest.TestCase): + """Test the annotate handler.""" + + @patch("roboflow.cli.handlers.image.rfapi") + def test_annotate(self, mock_rfapi): + from roboflow.cli.handlers.image import _handle_annotate + + mock_rfapi.save_annotation.return_value = {"success": True} + + with tempfile.NamedTemporaryFile(suffix=".txt", delete=False, mode="w") as f: + f.write("annotation data") + ann_path = f.name + + try: + args = _make_args( + json=True, + image_id="img-1", + project="proj", + annotation_file=ann_path, + annotation_format=None, + labelmap=None, + ) + + buf = io.StringIO() + old = sys.stdout + sys.stdout = buf + try: + _handle_annotate(args) + finally: + sys.stdout = old + + mock_rfapi.save_annotation.assert_called_once() + result = json.loads(buf.getvalue()) + self.assertEqual(result["status"], "saved") + finally: + os.unlink(ann_path) + + +class TestUploadPathNotFound(unittest.TestCase): + """Test error when path doesn't exist.""" + + def test_nonexistent_path(self): + from roboflow.cli.handlers.image import _handle_upload + + args = _make_args( + path="/nonexistent/path.jpg", + project="proj", + annotation=None, + split="train", + batch=None, + tag=None, + metadata=None, + concurrency=10, + retries=0, + labelmap=None, + is_prediction=False, + ) + + with self.assertRaises(SystemExit): + _handle_upload(args) + + +class TestImageTagValidation(unittest.TestCase): + """Test that tag command validates --add/--remove presence.""" + + def test_tag_no_add_or_remove(self): + from roboflow.cli.handlers.image import _handle_tag + + args = _make_args( + image_id="img-1", + project="proj", + add_tags=None, + remove_tags=None, + ) + + buf = io.StringIO() + old = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit): + _handle_tag(args) + finally: + sys.stderr = old + + self.assertIn("Nothing to do", buf.getvalue()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_infer_handler.py b/tests/cli/test_infer_handler.py new file mode 100644 index 00000000..88daa071 --- /dev/null +++ b/tests/cli/test_infer_handler.py @@ -0,0 +1,184 @@ +"""Unit tests for roboflow.cli.handlers.infer.""" + +import io +import json +import sys +import types +import unittest +from unittest.mock import MagicMock, patch + + +class TestInferRegister(unittest.TestCase): + """Verify infer handler registers as a top-level command.""" + + def test_register_adds_infer_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["infer", "image.jpg", "-m", "proj/1"]) + self.assertEqual(args.command, "infer") + self.assertEqual(args.file, "image.jpg") + self.assertEqual(args.model, "proj/1") + self.assertTrue(callable(args.func)) + + def test_infer_default_values(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["infer", "img.png", "-m", "proj/1"]) + self.assertEqual(args.confidence, 0.5) + self.assertEqual(args.overlap, 0.5) + self.assertIsNone(args.type) + + def test_infer_all_flags(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args( + [ + "infer", + "img.png", + "-m", + "proj/1", + "-c", + "0.7", + "-o", + "0.3", + "-t", + "object-detection", + ] + ) + self.assertAlmostEqual(args.confidence, 0.7) + self.assertAlmostEqual(args.overlap, 0.3) + self.assertEqual(args.type, "object-detection") + + def test_infer_type_choices(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + with self.assertRaises(SystemExit): + parser.parse_args(["infer", "img.png", "-m", "proj/1", "-t", "invalid-type"]) + + +class TestInferHandler(unittest.TestCase): + """Test _infer handler function.""" + + def _make_args(self, **kwargs: object) -> types.SimpleNamespace: + defaults = { + "json": False, + "api_key": "test-key", + "workspace": "test-ws", + "model": "test-project/1", + "file": "test.jpg", + "confidence": 0.5, + "overlap": 0.5, + "type": "object-detection", + } + defaults.update(kwargs) + return types.SimpleNamespace(**defaults) + + @patch("roboflow.models.object_detection.ObjectDetectionModel") + def test_infer_text_output(self, mock_model_cls: MagicMock) -> None: + from roboflow.cli.handlers.infer import _infer + + mock_group = MagicMock() + mock_group.__str__ = lambda self: "detection results" + mock_group.__iter__ = lambda self: iter([]) + mock_model = MagicMock() + mock_model.predict.return_value = mock_group + mock_model_cls.return_value = mock_model + + args = self._make_args() + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _infer(args) + finally: + sys.stdout = old_stdout + + self.assertIn("detection results", buf.getvalue()) + + @patch("roboflow.models.object_detection.ObjectDetectionModel") + def test_infer_json_output(self, mock_model_cls: MagicMock) -> None: + from roboflow.cli.handlers.infer import _infer + + mock_pred = MagicMock() + mock_pred.json.return_value = {"class": "dog", "confidence": 0.9} + mock_group = MagicMock() + mock_group.__iter__ = lambda self: iter([mock_pred]) + mock_model = MagicMock() + mock_model.predict.return_value = mock_group + mock_model_cls.return_value = mock_model + + args = self._make_args(json=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _infer(args) + finally: + sys.stdout = old_stdout + + result = json.loads(buf.getvalue()) + self.assertIsInstance(result, list) + self.assertEqual(result[0]["class"], "dog") + + @patch("roboflow.models.object_detection.ObjectDetectionModel") + @patch("roboflow.adapters.rfapi.get_project") + def test_infer_auto_detects_type(self, mock_get_project: MagicMock, mock_model_cls: MagicMock) -> None: + from roboflow.cli.handlers.infer import _infer + + mock_get_project.return_value = {"project": {"type": "object-detection"}} + mock_group = MagicMock() + mock_group.__str__ = lambda self: "results" + mock_group.__iter__ = lambda self: iter([]) + mock_model = MagicMock() + mock_model.predict.return_value = mock_group + mock_model_cls.return_value = mock_model + + args = self._make_args(type=None) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _infer(args) + finally: + sys.stdout = old_stdout + + mock_get_project.assert_called_once() + + @patch("roboflow.config.load_roboflow_api_key", return_value=None) + def test_infer_no_api_key(self, _mock_key: MagicMock) -> None: + from roboflow.cli.handlers.infer import _infer + + args = self._make_args(api_key=None) + with self.assertRaises(SystemExit) as ctx: + _infer(args) + self.assertEqual(ctx.exception.code, 2) + + @patch("roboflow.models.object_detection.ObjectDetectionModel") + def test_infer_confidence_converted_to_percentage(self, mock_model_cls: MagicMock) -> None: + from roboflow.cli.handlers.infer import _infer + + mock_group = MagicMock() + mock_group.__str__ = lambda self: "results" + mock_group.__iter__ = lambda self: iter([]) + mock_model = MagicMock() + mock_model.predict.return_value = mock_group + mock_model_cls.return_value = mock_model + + args = self._make_args(confidence=0.7, overlap=0.3) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _infer(args) + finally: + sys.stdout = old_stdout + + mock_model.predict.assert_called_once_with("test.jpg", confidence=70, overlap=30) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_model_handler.py b/tests/cli/test_model_handler.py new file mode 100644 index 00000000..33e9ceaf --- /dev/null +++ b/tests/cli/test_model_handler.py @@ -0,0 +1,286 @@ +"""Unit tests for roboflow.cli.handlers.model.""" + +import io +import json +import sys +import types +import unittest +from unittest.mock import MagicMock, patch + + +class TestModelRegister(unittest.TestCase): + """Verify model handler registers expected subcommands.""" + + def test_register_adds_model_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["model"]) + self.assertEqual(args.command, "model") + + def test_model_list_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["model", "list", "-p", "my-project"]) + self.assertEqual(args.project, "my-project") + self.assertTrue(callable(args.func)) + + def test_model_get_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["model", "get", "my-ws/my-model"]) + self.assertEqual(args.model_url, "my-ws/my-model") + self.assertTrue(callable(args.func)) + + def test_model_upload_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args( + [ + "model", + "upload", + "-p", + "proj1", + "-t", + "yolov8", + "-m", + "/path/to/model", + ] + ) + self.assertEqual(args.project, ["proj1"]) + self.assertEqual(args.model_type, "yolov8") + self.assertEqual(args.model_path, "/path/to/model") + self.assertEqual(args.filename, "weights/best.pt") + self.assertTrue(callable(args.func)) + + def test_model_upload_multiple_projects(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args( + [ + "model", + "upload", + "-p", + "proj1", + "-p", + "proj2", + "-t", + "yolov8", + "-m", + "/path/to/model", + ] + ) + self.assertEqual(args.project, ["proj1", "proj2"]) + + +class TestModelGet(unittest.TestCase): + """Test _get_model handler.""" + + def _make_args(self, **kwargs: object) -> types.SimpleNamespace: + defaults = { + "json": False, + "api_key": "test-key", + "workspace": "test-ws", + "model_url": "test-ws/test-project", + } + defaults.update(kwargs) + return types.SimpleNamespace(**defaults) + + @patch("roboflow.adapters.rfapi.get_project") + def test_get_model_success(self, mock_get_project: MagicMock) -> None: + from roboflow.cli.handlers.model import _get_model + + mock_get_project.return_value = {"project": {"name": "test"}} + + args = self._make_args(json=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _get_model(args) + finally: + sys.stdout = old_stdout + + result = json.loads(buf.getvalue()) + self.assertEqual(result["project"]["name"], "test") + + @patch("roboflow.adapters.rfapi.get_version") + def test_get_model_with_version(self, mock_get_version: MagicMock) -> None: + from roboflow.cli.handlers.model import _get_model + + mock_get_version.return_value = {"version": {"id": "test/1"}} + + args = self._make_args(model_url="test-ws/test-project/1", json=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _get_model(args) + finally: + sys.stdout = old_stdout + + result = json.loads(buf.getvalue()) + self.assertIn("version", result) + mock_get_version.assert_called_once() + + @patch("roboflow.config.load_roboflow_api_key", return_value=None) + def test_get_model_no_api_key(self, _mock_key: MagicMock) -> None: + from roboflow.cli.handlers.model import _get_model + + args = self._make_args(api_key=None) + with self.assertRaises(SystemExit) as ctx: + _get_model(args) + self.assertEqual(ctx.exception.code, 2) + + +class TestModelUpload(unittest.TestCase): + """Test _upload_model handler.""" + + def _make_args(self, **kwargs: object) -> types.SimpleNamespace: + defaults = { + "json": False, + "api_key": "test-key", + "workspace": "test-ws", + "project": ["proj1"], + "version_number": 1, + "model_type": "yolov8", + "model_path": "/path/to/model", + "filename": "weights/best.pt", + "model_name": None, + } + defaults.update(kwargs) + return types.SimpleNamespace(**defaults) + + @patch("roboflow.Roboflow") + def test_upload_single_version(self, mock_rf_cls: MagicMock) -> None: + from roboflow.cli.handlers.model import _upload_model + + mock_version = MagicMock() + mock_project = MagicMock() + mock_project.version.return_value = mock_version + mock_workspace = MagicMock() + mock_workspace.project.return_value = mock_project + mock_rf = MagicMock() + mock_rf.workspace.return_value = mock_workspace + mock_rf_cls.return_value = mock_rf + + args = self._make_args(json=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _upload_model(args) + finally: + sys.stdout = old_stdout + + result = json.loads(buf.getvalue()) + self.assertEqual(result["status"], "uploaded") + mock_version.deploy.assert_called_once_with("yolov8", "/path/to/model", "weights/best.pt") + + @patch("roboflow.Roboflow") + def test_upload_multi_project(self, mock_rf_cls: MagicMock) -> None: + from roboflow.cli.handlers.model import _upload_model + + mock_workspace = MagicMock() + mock_rf = MagicMock() + mock_rf.workspace.return_value = mock_workspace + mock_rf_cls.return_value = mock_rf + + args = self._make_args(project=["proj1", "proj2"], version_number=None, model_name="my-model", json=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _upload_model(args) + finally: + sys.stdout = old_stdout + + result = json.loads(buf.getvalue()) + self.assertEqual(result["status"], "uploaded") + mock_workspace.deploy_model.assert_called_once() + + @patch("roboflow.Roboflow") + def test_upload_no_project_errors(self, mock_rf_cls: MagicMock) -> None: + from roboflow.cli.handlers.model import _upload_model + + mock_workspace = MagicMock() + mock_rf = MagicMock() + mock_rf.workspace.return_value = mock_workspace + mock_rf_cls.return_value = mock_rf + + args = self._make_args(project=None, version_number=None) + with self.assertRaises(SystemExit): + _upload_model(args) + + +class TestParseErrorMessage(unittest.TestCase): + """Test _parse_error_message helper (centralized in _output.py).""" + + def test_plain_string(self) -> None: + from roboflow.cli._output import _parse_error_message + + parsed, human = _parse_error_message("something broke") + self.assertIsNone(parsed) + self.assertEqual(human, "something broke") + + def test_json_with_nested_error(self) -> None: + from roboflow.cli._output import _parse_error_message + + raw = '{"error": {"message": "Unsupported request"}}' + parsed, human = _parse_error_message(raw) + self.assertIsNotNone(parsed) + self.assertEqual(human, "Unsupported request") + + def test_json_with_string_error(self) -> None: + from roboflow.cli._output import _parse_error_message + + raw = '{"error": "Not found"}' + parsed, human = _parse_error_message(raw) + self.assertIsNotNone(parsed) + self.assertEqual(human, "Not found") + + +class TestModelListError(unittest.TestCase): + """Test _list_models handles API errors cleanly.""" + + def _make_args(self, **kwargs: object) -> types.SimpleNamespace: + defaults = { + "json": True, + "api_key": "test-key", + "workspace": "test-ws", + "project": "nonexistent-project", + } + defaults.update(kwargs) + return types.SimpleNamespace(**defaults) + + @patch("roboflow.Roboflow") + def test_list_models_project_not_found(self, mock_rf_cls: MagicMock) -> None: + from roboflow.cli.handlers.model import _list_models + + mock_workspace = MagicMock() + mock_workspace.project.side_effect = RuntimeError("Project not found") + mock_rf = MagicMock() + mock_rf.workspace.return_value = mock_workspace + mock_rf_cls.return_value = mock_rf + + args = self._make_args() + buf = io.StringIO() + old_stderr = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit) as ctx: + _list_models(args) + self.assertEqual(ctx.exception.code, 3) + finally: + sys.stderr = old_stderr + + result = json.loads(buf.getvalue()) + self.assertIn("error", result) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_output.py b/tests/cli/test_output.py new file mode 100644 index 00000000..ca83157b --- /dev/null +++ b/tests/cli/test_output.py @@ -0,0 +1,166 @@ +"""Unit tests for roboflow.cli._output.""" + +import io +import json +import sys +import types +import unittest + + +class TestOutput(unittest.TestCase): + """Tests for the output() helper.""" + + def _make_args(self, *, json_mode: bool = False) -> types.SimpleNamespace: + return types.SimpleNamespace(json=json_mode) + + def test_json_mode_prints_json(self) -> None: + from roboflow.cli._output import output + + args = self._make_args(json_mode=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + output(args, data={"key": "value"}, text="human text") + finally: + sys.stdout = old_stdout + result = json.loads(buf.getvalue()) + self.assertEqual(result, {"key": "value"}) + + def test_text_mode_prints_text(self) -> None: + from roboflow.cli._output import output + + args = self._make_args(json_mode=False) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + output(args, data={"key": "value"}, text="human text") + finally: + sys.stdout = old_stdout + self.assertEqual(buf.getvalue().strip(), "human text") + + def test_text_mode_falls_back_to_json_when_no_text(self) -> None: + from roboflow.cli._output import output + + args = self._make_args(json_mode=False) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + output(args, data={"fallback": True}) + finally: + sys.stdout = old_stdout + result = json.loads(buf.getvalue()) + self.assertTrue(result["fallback"]) + + def test_output_error_json_mode(self) -> None: + from roboflow.cli._output import output_error + + args = self._make_args(json_mode=True) + buf = io.StringIO() + old_stderr = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit) as ctx: + output_error(args, "something broke", hint="try again", exit_code=1) + finally: + sys.stderr = old_stderr + self.assertEqual(ctx.exception.code, 1) + result = json.loads(buf.getvalue()) + self.assertEqual(result["error"]["message"], "something broke") + self.assertEqual(result["error"]["hint"], "try again") + + def test_output_error_text_mode(self) -> None: + from roboflow.cli._output import output_error + + args = self._make_args(json_mode=False) + buf = io.StringIO() + old_stderr = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit) as ctx: + output_error(args, "not found", exit_code=3) + finally: + sys.stderr = old_stderr + self.assertEqual(ctx.exception.code, 3) + self.assertIn("not found", buf.getvalue()) + + +class TestTable(unittest.TestCase): + """Tests for the format_table() helper.""" + + def test_empty_rows(self) -> None: + from roboflow.cli._table import format_table + + result = format_table([], ["a", "b"]) + self.assertEqual(result, "(no results)") + + def test_basic_table(self) -> None: + from roboflow.cli._table import format_table + + rows = [ + {"name": "proj-a", "type": "object-detection"}, + {"name": "proj-b", "type": "classification"}, + ] + result = format_table(rows, ["name", "type"]) + lines = result.split("\n") + self.assertEqual(len(lines), 4) # header + separator + 2 rows + self.assertIn("NAME", lines[0]) + self.assertIn("TYPE", lines[0]) + self.assertIn("proj-a", lines[2]) + + +class TestResolver(unittest.TestCase): + """Tests for the resource shorthand resolver.""" + + def test_single_segment(self) -> None: + from roboflow.cli._resolver import resolve_resource + + ws, proj, ver = resolve_resource("my-project", workspace_override="default-ws") + self.assertEqual(ws, "default-ws") + self.assertEqual(proj, "my-project") + self.assertIsNone(ver) + + def test_workspace_project(self) -> None: + from roboflow.cli._resolver import resolve_resource + + ws, proj, ver = resolve_resource("my-ws/my-project") + self.assertEqual(ws, "my-ws") + self.assertEqual(proj, "my-project") + self.assertIsNone(ver) + + def test_project_version(self) -> None: + from roboflow.cli._resolver import resolve_resource + + ws, proj, ver = resolve_resource("my-project/3", workspace_override="default-ws") + self.assertEqual(ws, "default-ws") + self.assertEqual(proj, "my-project") + self.assertEqual(ver, 3) + + def test_full_triple(self) -> None: + from roboflow.cli._resolver import resolve_resource + + ws, proj, ver = resolve_resource("my-ws/my-project/42") + self.assertEqual(ws, "my-ws") + self.assertEqual(proj, "my-project") + self.assertEqual(ver, 42) + + def test_no_workspace_raises(self) -> None: + from unittest.mock import patch + + from roboflow.cli._resolver import resolve_resource + + with patch("roboflow.cli._resolver.get_conditional_configuration_variable", return_value=None): + with self.assertRaises(ValueError): + resolve_resource("my-project") # no override, no default + + def test_too_many_segments_raises(self) -> None: + from roboflow.cli._resolver import resolve_resource + + with self.assertRaises(ValueError): + resolve_resource("a/b/c/d") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_project_handler.py b/tests/cli/test_project_handler.py new file mode 100644 index 00000000..1b220c0d --- /dev/null +++ b/tests/cli/test_project_handler.py @@ -0,0 +1,78 @@ +"""Tests for the project CLI handler.""" + +import argparse +import unittest + + +def _make_parser() -> argparse.ArgumentParser: + """Build a minimal parser with just the project handler.""" + parser = argparse.ArgumentParser() + parser.add_argument("--json", action="store_true", default=False) + parser.add_argument("--api-key", dest="api_key", default=None) + parser.add_argument("--workspace", "-w", dest="workspace", default=None) + subs = parser.add_subparsers(dest="command") + + from roboflow.cli.handlers.project import register + + register(subs) + return parser + + +class TestProjectHandlerRegistration(unittest.TestCase): + """Verify that the project handler registers correctly.""" + + def test_register_creates_project_subcommand(self) -> None: + parser = _make_parser() + args = parser.parse_args(["project", "list"]) + self.assertIsNotNone(args.func) + + def test_project_list_defaults(self) -> None: + parser = _make_parser() + args = parser.parse_args(["project", "list"]) + self.assertIsNone(args.type) + + def test_project_list_with_type_filter(self) -> None: + parser = _make_parser() + args = parser.parse_args(["project", "list", "--type", "single-label-classification"]) + self.assertEqual(args.type, "single-label-classification") + + def test_project_get_requires_id(self) -> None: + parser = _make_parser() + with self.assertRaises(SystemExit): + parser.parse_args(["project", "get"]) + + def test_project_get_parses_id(self) -> None: + parser = _make_parser() + args = parser.parse_args(["project", "get", "my-project"]) + self.assertEqual(args.project_id, "my-project") + + def test_project_create_requires_name_and_type(self) -> None: + parser = _make_parser() + with self.assertRaises(SystemExit): + parser.parse_args(["project", "create"]) + + def test_project_create_parses_args(self) -> None: + parser = _make_parser() + args = parser.parse_args(["project", "create", "My Project", "--type", "object-detection"]) + self.assertEqual(args.name, "My Project") + self.assertEqual(args.type, "object-detection") + + def test_project_create_rejects_invalid_type(self) -> None: + parser = _make_parser() + with self.assertRaises(SystemExit): + parser.parse_args(["project", "create", "My Project", "--type", "invalid-type"]) + + def test_project_create_default_license(self) -> None: + parser = _make_parser() + args = parser.parse_args(["project", "create", "Test", "--type", "single-label-classification"]) + self.assertEqual(args.license, "Private") + + def test_subcommands_have_func(self) -> None: + parser = _make_parser() + for subcmd in ["list", "get my-proj", "create Foo --type single-label-classification"]: + args = parser.parse_args(["project"] + subcmd.split()) + self.assertIsNotNone(args.func, f"project {subcmd} has no func") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_search_handler.py b/tests/cli/test_search_handler.py new file mode 100644 index 00000000..fd16a514 --- /dev/null +++ b/tests/cli/test_search_handler.py @@ -0,0 +1,56 @@ +"""Tests for the search CLI handler.""" + +import unittest + + +class TestSearchRegistration(unittest.TestCase): + """Verify search handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.search import register + + self.assertTrue(callable(register)) + + def test_search_subcommand_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["search", "tag:review"]) + self.assertIsNotNone(args.func) + + def test_search_defaults(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["search", "tag:review"]) + self.assertEqual(args.query, "tag:review") + self.assertEqual(args.limit, 50) + self.assertIsNone(args.cursor) + self.assertIsNone(args.fields) + self.assertFalse(args.export) + self.assertEqual(args.format, "coco") + self.assertIsNone(args.location) + self.assertIsNone(args.dataset) + self.assertIsNone(args.name) + self.assertFalse(args.no_extract) + + def test_search_with_export_flag(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["search", "*", "--export", "-f", "yolov8", "--no-extract"]) + self.assertTrue(args.export) + self.assertEqual(args.format, "yolov8") + self.assertTrue(args.no_extract) + + def test_search_with_pagination(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["search", "class:car", "--limit", "10", "--cursor", "abc123"]) + self.assertEqual(args.limit, 10) + self.assertEqual(args.cursor, "abc123") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_train_handler.py b/tests/cli/test_train_handler.py new file mode 100644 index 00000000..44c9b079 --- /dev/null +++ b/tests/cli/test_train_handler.py @@ -0,0 +1,181 @@ +"""Unit tests for roboflow.cli.handlers.train.""" + +import io +import json +import sys +import types +import unittest +from unittest.mock import MagicMock, patch + + +class TestTrainRegister(unittest.TestCase): + """Verify train handler registers expected subcommands.""" + + def test_register_adds_train_parser(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["train", "-p", "proj", "-v", "1"]) + self.assertEqual(args.command, "train") + self.assertTrue(callable(args.func)) + + def test_train_start_subcommand(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["train", "start", "-p", "proj", "-v", "2"]) + self.assertEqual(args.project, "proj") + self.assertEqual(args.version_number, 2) + self.assertTrue(callable(args.func)) + + def test_train_without_subcommand_acts_as_start(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["train", "-p", "proj", "-v", "3", "-t", "yolov8n"]) + self.assertEqual(args.project, "proj") + self.assertEqual(args.version_number, 3) + self.assertEqual(args.model_type, "yolov8n") + self.assertTrue(callable(args.func)) + + def test_train_optional_args(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args( + [ + "train", + "-p", + "proj", + "-v", + "1", + "--checkpoint", + "abc123", + "--speed", + "fast", + "--epochs", + "50", + ] + ) + self.assertEqual(args.checkpoint, "abc123") + self.assertEqual(args.speed, "fast") + self.assertEqual(args.epochs, 50) + + +class TestTrainStart(unittest.TestCase): + """Test _start handler function.""" + + def _make_args(self, **kwargs: object) -> types.SimpleNamespace: + defaults = { + "json": False, + "api_key": "test-key", + "workspace": "test-ws", + "project": "my-project", + "version_number": 1, + "model_type": None, + "checkpoint": None, + "speed": None, + "epochs": None, + } + defaults.update(kwargs) + return types.SimpleNamespace(**defaults) + + @patch("roboflow.adapters.rfapi.start_version_training") + def test_start_success(self, mock_train: MagicMock) -> None: + from roboflow.cli.handlers.train import _start + + mock_train.return_value = True + + args = self._make_args(json=True) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _start(args) + finally: + sys.stdout = old_stdout + + result = json.loads(buf.getvalue()) + self.assertEqual(result["status"], "training_started") + self.assertEqual(result["project"], "my-project") + self.assertEqual(result["version"], 1) + + @patch("roboflow.adapters.rfapi.start_version_training") + def test_start_with_all_options(self, mock_train: MagicMock) -> None: + from roboflow.cli.handlers.train import _start + + mock_train.return_value = True + + args = self._make_args( + json=True, + model_type="yolov8n", + checkpoint="abc", + speed="fast", + epochs=50, + ) + buf = io.StringIO() + old_stdout = sys.stdout + sys.stdout = buf + try: + _start(args) + finally: + sys.stdout = old_stdout + + mock_train.assert_called_once_with( + "test-key", + "test-ws", + "my-project", + "1", + speed="fast", + checkpoint="abc", + model_type="yolov8n", + epochs=50, + ) + + @patch("roboflow.adapters.rfapi.start_version_training") + def test_start_api_error(self, mock_train: MagicMock) -> None: + from roboflow.adapters.rfapi import RoboflowError + from roboflow.cli.handlers.train import _start + + mock_train.side_effect = RoboflowError("training failed") + + args = self._make_args() + with self.assertRaises(SystemExit) as ctx: + _start(args) + self.assertEqual(ctx.exception.code, 1) + + @patch("roboflow.config.load_roboflow_api_key", return_value=None) + def test_start_no_api_key(self, _mock_key: MagicMock) -> None: + from roboflow.cli.handlers.train import _start + + args = self._make_args(api_key=None) + with self.assertRaises(SystemExit) as ctx: + _start(args) + self.assertEqual(ctx.exception.code, 2) + + @patch("roboflow.adapters.rfapi.start_version_training") + def test_start_json_error_not_double_encoded(self, mock_train: MagicMock) -> None: + from roboflow.adapters.rfapi import RoboflowError + from roboflow.cli.handlers.train import _start + + # Simulate API returning a JSON error string + mock_train.side_effect = RoboflowError('{"error": {"message": "Unsupported request"}}') + + args = self._make_args(json=True) + buf = io.StringIO() + old_stderr = sys.stderr + sys.stderr = buf + try: + with self.assertRaises(SystemExit): + _start(args) + finally: + sys.stderr = old_stderr + + result = json.loads(buf.getvalue()) + # Should be a parsed object, not a double-encoded JSON string + self.assertIsInstance(result["error"], dict) + self.assertEqual(result["error"]["message"], "Unsupported request") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_universe_handler.py b/tests/cli/test_universe_handler.py new file mode 100644 index 00000000..16962b73 --- /dev/null +++ b/tests/cli/test_universe_handler.py @@ -0,0 +1,32 @@ +"""Tests for the universe CLI handler.""" + +import unittest + + +class TestUniverseRegistration(unittest.TestCase): + """Verify universe handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.universe import register + + self.assertTrue(callable(register)) + + def test_universe_search_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["universe", "search", "cats"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.query, "cats") + + def test_universe_search_with_flags(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["universe", "search", "dogs", "--type", "model", "--limit", "5"]) + self.assertEqual(args.type, "model") + self.assertEqual(args.limit, 5) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_version_handler.py b/tests/cli/test_version_handler.py new file mode 100644 index 00000000..ceefb869 --- /dev/null +++ b/tests/cli/test_version_handler.py @@ -0,0 +1,121 @@ +"""Tests for the version CLI handler.""" + +import argparse +import unittest + + +def _make_parser() -> argparse.ArgumentParser: + """Build a minimal parser with just the version handler.""" + parser = argparse.ArgumentParser() + parser.add_argument("--json", action="store_true", default=False) + parser.add_argument("--api-key", dest="api_key", default=None) + parser.add_argument("--workspace", "-w", dest="workspace", default=None) + subs = parser.add_subparsers(dest="command") + + from roboflow.cli.handlers.version import register + + register(subs) + return parser + + +class TestVersionHandlerRegistration(unittest.TestCase): + """Verify that the version handler registers correctly.""" + + def test_register_creates_version_subcommand(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "list", "-p", "my-project"]) + self.assertIsNotNone(args.func) + + def test_version_list_requires_project(self) -> None: + parser = _make_parser() + with self.assertRaises(SystemExit): + parser.parse_args(["version", "list"]) + + def test_version_list_parses_project(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "list", "-p", "my-project"]) + self.assertEqual(args.project, "my-project") + + def test_version_get_requires_version_num(self) -> None: + parser = _make_parser() + with self.assertRaises(SystemExit): + parser.parse_args(["version", "get"]) + + def test_version_get_parses_args(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "get", "3", "-p", "my-project"]) + self.assertEqual(args.version_num, "3") + self.assertEqual(args.project, "my-project") + + def test_version_get_shorthand(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "get", "my-project/3"]) + self.assertEqual(args.version_num, "my-project/3") + + def test_version_download_parses_args(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "download", "ws/proj/1", "-f", "coco"]) + self.assertEqual(args.url_or_id, "ws/proj/1") + self.assertEqual(args.format, "coco") + + def test_version_download_default_format(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "download", "ws/proj/1"]) + self.assertEqual(args.format, "voc") + + def test_version_export_parses_args(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "export", "2", "-p", "my-project", "-f", "yolov8"]) + self.assertEqual(args.version_num, "2") + self.assertEqual(args.project, "my-project") + self.assertEqual(args.format, "yolov8") + + def test_version_create_is_stub(self) -> None: + parser = _make_parser() + args = parser.parse_args(["version", "create", "-p", "my-project"]) + self.assertIsNotNone(args.func) + + def test_subcommands_have_func(self) -> None: + parser = _make_parser() + subcmds = [ + "list -p proj", + "get 3 -p proj", + "download ws/proj/1", + "export 1 -p proj", + "create -p proj", + ] + for subcmd in subcmds: + args = parser.parse_args(["version"] + subcmd.split()) + self.assertIsNotNone(args.func, f"version {subcmd} has no func") + + +class TestParseUrl(unittest.TestCase): + """Test the _parse_url helper.""" + + def test_shorthand(self) -> None: + from roboflow.cli.handlers.version import _parse_url + + w, p, v = _parse_url("my-ws/my-project/3") + self.assertEqual(w, "my-ws") + self.assertEqual(p, "my-project") + self.assertEqual(v, "3") + + def test_full_url(self) -> None: + from roboflow.cli.handlers.version import _parse_url + + w, p, v = _parse_url("https://universe.roboflow.com/my-ws/my-project/3") + self.assertEqual(w, "my-ws") + self.assertEqual(p, "my-project") + self.assertEqual(v, "3") + + def test_no_version(self) -> None: + from roboflow.cli.handlers.version import _parse_url + + w, p, v = _parse_url("my-ws/my-project") + self.assertEqual(w, "my-ws") + self.assertEqual(p, "my-project") + self.assertIsNone(v) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_video_handler.py b/tests/cli/test_video_handler.py new file mode 100644 index 00000000..4d80d696 --- /dev/null +++ b/tests/cli/test_video_handler.py @@ -0,0 +1,42 @@ +"""Tests for the video CLI handler.""" + +import unittest + + +class TestVideoRegistration(unittest.TestCase): + """Verify video handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.video import register + + self.assertTrue(callable(register)) + + def test_video_infer_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["video", "infer", "-p", "my-project", "-v", "1", "-f", "vid.mp4"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.project, "my-project") + self.assertEqual(args.version_number, 1) + self.assertEqual(args.video_file, "vid.mp4") + self.assertEqual(args.fps, 5) + + def test_video_infer_custom_fps(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["video", "infer", "-p", "proj", "-v", "2", "-f", "vid.mp4", "--fps", "10"]) + self.assertEqual(args.fps, 10) + + def test_video_status_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["video", "status", "job-123"]) + self.assertIsNotNone(args.func) + self.assertEqual(args.job_id, "job-123") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_workflow_handler.py b/tests/cli/test_workflow_handler.py new file mode 100644 index 00000000..070776d2 --- /dev/null +++ b/tests/cli/test_workflow_handler.py @@ -0,0 +1,83 @@ +"""Tests for the workflow CLI handler.""" + +import unittest + + +class TestWorkflowRegistration(unittest.TestCase): + """Verify workflow handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.workflow import register + + self.assertTrue(callable(register)) + + def test_workflow_list_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "list"]) + self.assertIsNotNone(args.func) + + def test_workflow_get_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "get", "my-workflow"]) + self.assertEqual(args.workflow_url, "my-workflow") + self.assertIsNotNone(args.func) + + def test_workflow_create_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "create", "--name", "test-wf"]) + self.assertEqual(args.name, "test-wf") + self.assertIsNotNone(args.func) + + def test_workflow_update_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "update", "my-wf"]) + self.assertIsNotNone(args.func) + + def test_workflow_version_list_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "version", "list", "my-wf"]) + self.assertIsNotNone(args.func) + + def test_workflow_fork_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "fork", "my-wf"]) + self.assertIsNotNone(args.func) + + def test_workflow_build_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "build", "detect objects in a video"]) + self.assertEqual(args.prompt, "detect objects in a video") + self.assertIsNotNone(args.func) + + def test_workflow_run_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "run", "my-wf", "--input", "image.jpg"]) + self.assertEqual(args.input, "image.jpg") + self.assertIsNotNone(args.func) + + def test_workflow_deploy_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workflow", "deploy", "my-wf"]) + self.assertIsNotNone(args.func) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/cli/test_workspace.py b/tests/cli/test_workspace.py new file mode 100644 index 00000000..0b7a2591 --- /dev/null +++ b/tests/cli/test_workspace.py @@ -0,0 +1,37 @@ +"""Tests for the workspace CLI handler.""" + +import unittest + + +class TestWorkspaceRegistration(unittest.TestCase): + """Verify workspace handler registers expected subcommands.""" + + def test_register_callable(self) -> None: + from roboflow.cli.handlers.workspace import register + + self.assertTrue(callable(register)) + + def test_workspace_list_exists(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workspace", "list"]) + self.assertIsNotNone(args.func) + + def test_workspace_get_positional(self) -> None: + from roboflow.cli import build_parser + + parser = build_parser() + args = parser.parse_args(["workspace", "get", "my-ws"]) + self.assertEqual(args.workspace_id, "my-ws") + self.assertIsNotNone(args.func) + + def test_handler_functions_exist(self) -> None: + from roboflow.cli.handlers import workspace + + self.assertTrue(callable(workspace._list_workspaces)) + self.assertTrue(callable(workspace._get_workspace)) + + +if __name__ == "__main__": + unittest.main()