diff --git a/.vscode/settings.json b/.vscode/settings.json index e96a7121..98ff10b3 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,15 +1,4 @@ { - "yaml.validate": true, - "yaml.disableAdditionalProperties": true, - "yaml.completion": true, - "yaml.extension.recommendations": true, - "yaml.hover": true, - "yaml.format.singleQuote": false, - "yaml.format.printWidth": 120, - "yaml.format.proseWrap": "always", - "yaml.schemas": { - "schema/plugin.schema.json": ["/plugins/**", "/themes/**"] - }, "[json]": { "editor.defaultFormatter": "esbenp.prettier-vscode" }, @@ -18,5 +7,16 @@ }, "[yaml]": { "editor.defaultFormatter": "esbenp.prettier-vscode" - } + }, + "yaml.completion": true, + "yaml.disableAdditionalProperties": true, + "yaml.extension.recommendations": true, + "yaml.format.printWidth": 120, + "yaml.format.proseWrap": "always", + "yaml.format.singleQuote": false, + "yaml.hover": true, + "yaml.schemas": { + "./validator/plugin.schema.json": ["/plugins/**", "/themes/**"] + }, + "yaml.validate": true } diff --git a/plugins/AITagger/README.md b/archive/AITagger/README.md similarity index 100% rename from plugins/AITagger/README.md rename to archive/AITagger/README.md diff --git a/plugins/AITagger/ai_server.py b/archive/AITagger/ai_server.py similarity index 100% rename from plugins/AITagger/ai_server.py rename to archive/AITagger/ai_server.py diff --git a/plugins/AITagger/ai_tagger.py b/archive/AITagger/ai_tagger.py similarity index 100% rename from plugins/AITagger/ai_tagger.py rename to archive/AITagger/ai_tagger.py diff --git a/plugins/AITagger/ai_tagger.yml b/archive/AITagger/ai_tagger.yml similarity index 100% rename from plugins/AITagger/ai_tagger.yml rename to archive/AITagger/ai_tagger.yml diff --git a/plugins/AITagger/ai_video_result.py b/archive/AITagger/ai_video_result.py similarity index 100% rename from plugins/AITagger/ai_video_result.py rename to archive/AITagger/ai_video_result.py diff --git a/plugins/AITagger/config.py b/archive/AITagger/config.py similarity index 100% rename from plugins/AITagger/config.py rename to archive/AITagger/config.py diff --git a/plugins/AITagger/media_handler.py b/archive/AITagger/media_handler.py similarity index 100% rename from plugins/AITagger/media_handler.py rename to archive/AITagger/media_handler.py diff --git a/plugins/AITagger/requirements.txt b/archive/AITagger/requirements.txt similarity index 100% rename from plugins/AITagger/requirements.txt rename to archive/AITagger/requirements.txt diff --git a/plugins/AITagger/utility.py b/archive/AITagger/utility.py similarity index 100% rename from plugins/AITagger/utility.py rename to archive/AITagger/utility.py diff --git a/archive/README.md b/archive/README.md index 9766b431..caee1de8 100644 --- a/archive/README.md +++ b/archive/README.md @@ -2,4 +2,5 @@ 1. [renamerOnUpdate](./renamerOnUpdate) - issue [#483](https://github.com/stashapp/CommunityScripts/issues/483) 2. [visage](./visage/) - issue [#532](https://github.com/stashapp/CommunityScripts/issues/532) -3. [stashRealBooru](./stashRealbooru/) - issue [#540](https://github.com/stashapp/CommunityScripts/issues/540) \ No newline at end of file +3. [stashRealBooru](./stashRealbooru/) - issue [#540](https://github.com/stashapp/CommunityScripts/issues/540) +4. [AITagger](./AITagger/) - issue [#691](https://github.com/stashapp/CommunityScripts/issues/691) \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/CHANGELOG.md b/plugins/AHavenVLMConnector/CHANGELOG.md new file mode 100644 index 00000000..5f571e98 --- /dev/null +++ b/plugins/AHavenVLMConnector/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +All notable changes to the A Haven VLM Connector project will be documented in this file. + +## [1.1.1] - 2026-04-05 +### Fixes +- Mitigate TorchVision v0.26.0 release https://github.com/Haven-hvn/haven-vlm-engine-package/commit/7a3a6f9dd931237c93c5205f3d31df3e285ae21d + +## [1.0.0] - 2025-06-29 + +### Added +- **Initial release** diff --git a/plugins/AHavenVLMConnector/README.md b/plugins/AHavenVLMConnector/README.md new file mode 100644 index 00000000..c1b30b4a --- /dev/null +++ b/plugins/AHavenVLMConnector/README.md @@ -0,0 +1,145 @@ +# A Haven VLM Connector + +https://discourse.stashapp.cc/t/haven-vlm-connector/5464 + +A StashApp plugin for Vision-Language Model (VLM) based content tagging and analysis. This plugin is designed with a **local-first philosophy**, empowering users to run analysis on their own hardware (using CPU or GPU) and their local network. It also supports cloud-based VLM endpoints for additional flexibility. The Haven VLM Engine provides advanced automatic content detection and tagging, delivering superior accuracy compared to traditional image classification methods. + +## Features + +- **Local Network Empowerment**: Distribute processing across home/office computers without cloud dependencies +- **Context-Aware Detection**: Leverages Vision-Language Models' understanding of visual relationships +- **Advanced Dependency Management**: Uses PythonDepManager for automatic dependency installation +- **Enjoying Funscript Haven?** Check out more tools and projects at https://github.com/Haven-hvn + +## Requirements + +- Python 3.8+ +- StashApp +- PythonDepManager plugin (automatically handles dependencies) +- OpenAI-compatible VLM endpoints (local or cloud-based) + +## Installation + +1. Clone or download this plugin to your StashApp plugins directory +2. Ensure PythonDepManager is installed in your StashApp plugins +3. Configure your VLM endpoints in `haven_vlm_config.py` (local network endpoints recommended) +4. Restart StashApp + +The plugin automatically manages all dependencies. + +## Why Local-First? + +- **Complete Control**: Process sensitive content on your own hardware +- **Cost Effective**: Avoid cloud processing fees by using existing resources +- **Flexible Scaling**: Add more computers to your local network for increased capacity +- **Privacy Focused**: Keep your media completely private +- **Hybrid Options**: Combine local and cloud endpoints for optimal flexibility + +```mermaid +graph LR +A[User's Computer] --> B[Local GPU Machine] +A --> C[Local CPU Machine 1] +A --> D[Local CPU Machine 2] +A --> E[Cloud Endpoint] +``` + +## Configuration + +### Easy Setup with LM Studio + +[LM Studio](https://lmstudio.ai/) provides the easiest way to configure local endpoints: + +1. Download and install [LM Studio](https://lmstudio.ai/) +2. [Search for or download](https://huggingface.co/models) a vision-capable model; tested with : (in order of high to low accuracy) zai-org/glm-4.6v-flash, huihui-mistral-small-3.2-24b-instruct-2506-abliterated-v2, qwen/qwen3-vl-8b, lfm2.5-vl +3. Load your desired Model +4. On the developer tab start the local server using the start toggle +5. Optionally click the Settings gear then toggle *Serve on local network* +5. Optionally configure `haven_vlm_config.py`: + +By default locahost is included in the config, **remove cloud endpoint if you don't want automatic failover** +```python +{ + "base_url": "http://localhost:1234/v1", # LM Studio default + "api_key": "", # API key not required + "name": "lm-studio-local", + "weight": 5, + "is_fallback": False +} +``` + +### Tag Configuration + +```python +"tag_list": [ + "Basketball point", "Foul", "Break-away", "Turnover" +] +``` + +### Processing Settings + +```python +VIDEO_FRAME_INTERVAL = 2.0 # Process every 2 seconds +CONCURRENT_TASK_LIMIT = 8 # Adjust based on local hardware +``` + +## Usage + +### Tag Videos +1. Tag scenes with `VLM_TagMe` +2. Run "Tag Videos" task +3. Plugin processes content using local/network resources + +### Performance Tips +- Start with 2-3 local machines for load balancing +- Assign higher weights to GPU-enabled machines +- Adjust `CONCURRENT_TASK_LIMIT` based on total system resources +- Use SSD storage for better I/O performance + +## File Structure + +``` +AHavenVLMConnector/ +├── ahavenvlmconnector.yml +├── haven_vlm_connector.py +├── haven_vlm_config.py +├── haven_vlm_engine.py +├── haven_media_handler.py +├── haven_vlm_utility.py +├── requirements.txt +└── README.md +``` + +## Troubleshooting + +### Local Network Setup +- Ensure firewalls allow communication between machines +- Verify all local endpoints are running VLM services +- Use static IPs for local machines +- Check `http://local-machine-ip:port/v1` responds correctly + +### Performance Optimization +- **Distribute Load**: Use multiple mid-range machines instead of one high-end +- **GPU Prioritization**: Assign highest weight to GPU machines +- **Network Speed**: Use wired Ethernet connections for faster transfer +- **Resource Monitoring**: Watch system resources during processing + +## Development + +### Adding Local Endpoints +1. Install VLM service on network machines +2. Add endpoint configuration with local IPs +3. Set appropriate weights based on hardware capability + +### Custom Models +Use any OpenAI-compatible models that support: +- POST requests to `/v1/chat/completions` +- Vision capabilities with image input +- Local deployment options + +### Log Messages + +Check StashApp logs for detailed processing information and error messages. + +## License + +This project is part of the StashApp Community Scripts collection. \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/ahavenvlmconnector.yml b/plugins/AHavenVLMConnector/ahavenvlmconnector.yml new file mode 100644 index 00000000..89827cb0 --- /dev/null +++ b/plugins/AHavenVLMConnector/ahavenvlmconnector.yml @@ -0,0 +1,22 @@ +name: Haven VLM Connector +# requires: PythonDepManager +description: Tag videos with Vision-Language Models using any OpenAI-compatible VLM endpoint +version: 1.1.1 +url: https://discourse.stashapp.cc/t/haven-vlm-connector/5464 +exec: + - python + - "{pluginDir}/haven_vlm_connector.py" +interface: raw +tasks: + - name: Tag Videos + description: Run VLM analysis on videos with VLM_TagMe tag + defaultArgs: + mode: tag_videos + - name: Collect Incorrect Markers and Images + description: Collects data from markers and images that were VLM tagged but were manually marked with VLM_Incorrect due to the VLM making a mistake. This will collect the data and output as a file which can be used to improve the VLM models. + defaultArgs: + mode: collect_incorrect_markers + - name: Find Marker Settings + description: Find Optimal Marker Settings based on a video that has manually tuned markers and has been processed by the VLM previously. Only 1 video should have VLM_TagMe before running. + defaultArgs: + mode: find_marker_settings diff --git a/plugins/AHavenVLMConnector/exit_tracker.py b/plugins/AHavenVLMConnector/exit_tracker.py new file mode 100644 index 00000000..74a4cea8 --- /dev/null +++ b/plugins/AHavenVLMConnector/exit_tracker.py @@ -0,0 +1,98 @@ +""" +Comprehensive sys.exit tracking module +Instruments all sys.exit() calls with full call stack and context +""" + +import sys +import traceback +from typing import Optional + +# Store original sys.exit +original_exit = sys.exit + +# Track if we've already patched +_exit_tracker_patched = False + +def install_exit_tracker(logger=None) -> None: + """ + Install the exit tracker by monkey-patching sys.exit + + Args: + logger: Optional logger instance (will use fallback print if None) + """ + global _exit_tracker_patched, original_exit + + if _exit_tracker_patched: + return + + # Store original if not already stored + if hasattr(sys, 'exit') and sys.exit is not original_exit: + original_exit = sys.exit + + def tracked_exit(code: int = 0) -> None: + """Track sys.exit() calls with full call stack""" + # Get current stack trace (not from exception, but current call stack) + stack = traceback.extract_stack() + + # Format the stack trace, excluding this tracking function + stack_lines = [] + for frame in stack: + # Skip internal Python frames and this tracker + if ('tracked_exit' not in frame.filename and + '/usr/lib' not in frame.filename and + '/System/Library' not in frame.filename and + 'exit_tracker.py' not in frame.filename): + stack_lines.append( + f" File \"{frame.filename}\", line {frame.lineno}, in {frame.name}\n {frame.line}" + ) + + # Take last 15 frames to see the full call chain + stack_str = '\n'.join(stack_lines[-15:]) + + # Get current exception info if available + exc_info = sys.exc_info() + exc_str = "" + if exc_info[0] is not None: + exc_str = f"\n Active Exception: {exc_info[0].__name__}: {exc_info[1]}" + + # Build the error message + error_msg = f"""[DEBUG_EXIT_CODE] ========================================== +[DEBUG_EXIT_CODE] sys.exit() called with code: {code} +[DEBUG_EXIT_CODE] Call stack (last 15 frames): +{stack_str} +{exc_str} +[DEBUG_EXIT_CODE] ==========================================""" + + # Log using provided logger or fallback to print + if logger: + try: + logger.error(error_msg) + except Exception as log_error: + print(f"[EXIT_TRACKER_LOGGER_ERROR] Failed to log: {log_error}") + print(error_msg) + else: + print(error_msg) + + # Call original exit + original_exit(code) + + # Install the tracker + sys.exit = tracked_exit + _exit_tracker_patched = True + + if logger: + logger.debug("[DEBUG_EXIT_CODE] Exit tracker installed successfully") + else: + print("[DEBUG_EXIT_CODE] Exit tracker installed successfully") + +def uninstall_exit_tracker() -> None: + """Uninstall the exit tracker and restore original sys.exit""" + global _exit_tracker_patched, original_exit + + if _exit_tracker_patched: + sys.exit = original_exit + _exit_tracker_patched = False + +# Auto-install on import (can be disabled by calling uninstall_exit_tracker()) +if not _exit_tracker_patched: + install_exit_tracker() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/haven_media_handler.py b/plugins/AHavenVLMConnector/haven_media_handler.py new file mode 100644 index 00000000..163562a4 --- /dev/null +++ b/plugins/AHavenVLMConnector/haven_media_handler.py @@ -0,0 +1,333 @@ +""" +Haven Media Handler Module +Handles StashApp media operations and tag management +""" + +import os +import zipfile +import shutil +from typing import List, Dict, Any, Optional, Tuple, Set +from datetime import datetime +import json + +# Use PythonDepManager for dependency management +try: + from PythonDepManager import ensure_import + ensure_import("stashapi:stashapp-tools==0.2.58") + + from stashapi.stashapp import StashInterface, StashVersion + import stashapi.log as log +except ImportError as e: + print(f"stashapp-tools not found: {e}") + print("Please ensure PythonDepManager is available and stashapp-tools is accessible") + raise + +import haven_vlm_config as config + +# Global variables +tag_id_cache: Dict[str, int] = {} +vlm_tag_ids_cache: Set[int] = set() +stash_version: Optional[StashVersion] = None +end_seconds_support: bool = False + +# Tag IDs +stash: Optional[StashInterface] = None +vlm_errored_tag_id: Optional[int] = None +vlm_tagme_tag_id: Optional[int] = None +vlm_base_tag_id: Optional[int] = None +vlm_tagged_tag_id: Optional[int] = None +vr_tag_id: Optional[int] = None +vlm_incorrect_tag_id: Optional[int] = None + +def initialize(connection: Dict[str, Any]) -> None: + """Initialize the media handler with StashApp connection""" + global stash, vlm_errored_tag_id, vlm_tagme_tag_id, vlm_base_tag_id + global vlm_tagged_tag_id, vr_tag_id, end_seconds_support, stash_version + global vlm_incorrect_tag_id + + # Initialize the Stash API + stash = StashInterface(connection) + + # Initialize "metadata" tags + vlm_errored_tag_id = stash.find_tag(config.config.vlm_errored_tag_name, create=True)["id"] + vlm_tagme_tag_id = stash.find_tag(config.config.vlm_tagme_tag_name, create=True)["id"] + vlm_base_tag_id = stash.find_tag(config.config.vlm_base_tag_name, create=True)["id"] + vlm_tagged_tag_id = stash.find_tag(config.config.vlm_tagged_tag_name, create=True)["id"] + vlm_incorrect_tag_id = stash.find_tag(config.config.vlm_incorrect_tag_name, create=True)["id"] + + # Get VR tag from configuration + vr_tag_name = stash.get_configuration()["ui"].get("vrTag", None) + if not vr_tag_name: + log.warning("No VR tag found in configuration") + vr_tag_id = None + else: + vr_tag_id = stash.find_tag(vr_tag_name)["id"] + + stash_version = get_stash_version() + end_second_support_beyond = StashVersion("v0.27.2-76648") + end_seconds_support = stash_version > end_second_support_beyond + +def get_stash_version() -> StashVersion: + """Get the current StashApp version""" + if not stash: + raise RuntimeError("Stash interface not initialized") + return stash.stash_version() + +# ----------------- Tag Management Methods ----------------- + +def get_tag_ids(tag_names: List[str], create: bool = False) -> List[int]: + """Get tag IDs for multiple tag names""" + return [get_tag_id(tag_name, create) for tag_name in tag_names] + +def get_tag_id(tag_name: str, create: bool = False) -> Optional[int]: + """Get tag ID for a single tag name""" + if tag_name not in tag_id_cache: + stashtag = stash.find_tag(tag_name) + if stashtag: + tag_id_cache[tag_name] = stashtag["id"] + return stashtag["id"] + else: + if not create: + return None + tag = stash.create_tag({ + "name": tag_name, + "ignore_auto_tag": True, + "parent_ids": [vlm_base_tag_id] + })['id'] + tag_id_cache[tag_name] = tag + vlm_tag_ids_cache.add(tag) + return tag + return tag_id_cache.get(tag_name) + +def get_vlm_tags() -> List[int]: + """Get all VLM-generated tags""" + if len(vlm_tag_ids_cache) == 0: + vlm_tags = [ + item['id'] for item in stash.find_tags( + f={"parents": {"value": vlm_base_tag_id, "modifier": "INCLUDES"}}, + fragment="id" + ) + ] + vlm_tag_ids_cache.update(vlm_tags) + else: + vlm_tags = list(vlm_tag_ids_cache) + return vlm_tags + +def is_scene_tagged(tags: List[Dict[str, Any]]) -> bool: + """Check if a scene has been tagged by VLM""" + for tag in tags: + if tag['id'] == vlm_tagged_tag_id: + return True + return False + +def is_vr_scene(tags: List[Dict[str, Any]]) -> bool: + """Check if a scene is VR content""" + for tag in tags: + if tag['id'] == vr_tag_id: + return True + return False + +# ----------------- Scene Management Methods ----------------- + +def add_tags_to_video(video_id: int, tag_ids: List[int], add_tagged: bool = True) -> None: + """Add tags to a video scene""" + if add_tagged: + tag_ids.append(vlm_tagged_tag_id) + stash.update_scenes({ + "ids": [video_id], + "tag_ids": {"ids": tag_ids, "mode": "ADD"} + }) + +def clear_all_tags_from_video(scene: Dict[str, Any]) -> None: + """Clear all tags from a video scene using existing scene data""" + scene_id = scene.get('id') + if scene_id is None: + log.error("Scene missing 'id' field") + return + + current_tag_ids = [tag['id'] for tag in scene.get('tags', [])] + if current_tag_ids: + stash.update_scenes({ + "ids": [scene_id], + "tag_ids": {"ids": current_tag_ids, "mode": "REMOVE"} + }) + log.info(f"Cleared {len(current_tag_ids)} tags from scene {scene_id}") + +def clear_all_markers_from_video(video_id: int) -> None: + """Clear all markers from a video scene""" + markers = get_scene_markers(video_id) + if markers: + delete_markers(markers) + log.info(f"Cleared all {len(markers)} markers from scene {video_id}") + +def remove_vlm_tags_from_video( + video_id: int, + remove_tagme: bool = True, + remove_errored: bool = True +) -> None: + """Remove all VLM tags from a video scene""" + vlm_tags = get_vlm_tags() + if remove_tagme: + vlm_tags.append(vlm_tagme_tag_id) + if remove_errored: + vlm_tags.append(vlm_errored_tag_id) + stash.update_scenes({ + "ids": [video_id], + "tag_ids": {"ids": vlm_tags, "mode": "REMOVE"} + }) + +def get_tagme_scenes() -> List[Dict[str, Any]]: + """Get scenes tagged with VLM_TagMe""" + return stash.find_scenes( + f={"tags": {"value": vlm_tagme_tag_id, "modifier": "INCLUDES"}}, + fragment="id tags {id} files {path duration fingerprint(type: \"phash\")}" + ) + +def add_error_scene(scene_id: int) -> None: + """Add error tag to a scene""" + stash.update_scenes({ + "ids": [scene_id], + "tag_ids": {"ids": [vlm_errored_tag_id], "mode": "ADD"} + }) + +def remove_tagme_tag_from_scene(scene_id: int) -> None: + """Remove VLM_TagMe tag from a scene""" + stash.update_scenes({ + "ids": [scene_id], + "tag_ids": {"ids": [vlm_tagme_tag_id], "mode": "REMOVE"} + }) + +# ----------------- Marker Management Methods ----------------- + +def add_markers_to_video_from_dict( + video_id: int, + tag_timespans_dict: Dict[str, Dict[str, List[Any]]] +) -> None: + """Add markers to video from timespan dictionary""" + for _, tag_timespan_dict in tag_timespans_dict.items(): + for tag_name, time_frames in tag_timespan_dict.items(): + tag_id = get_tag_id(tag_name, create=True) + if tag_id: + add_markers_to_video(video_id, tag_id, tag_name, time_frames) + +def get_incorrect_markers() -> List[Dict[str, Any]]: + """Get markers tagged with VLM_Incorrect""" + if end_seconds_support: + return stash.find_scene_markers( + {"tags": {"value": vlm_incorrect_tag_id, "modifier": "INCLUDES"}}, + fragment="id scene {id files{path}} primary_tag {id, name} seconds end_seconds" + ) + else: + return stash.find_scene_markers( + {"tags": {"value": vlm_incorrect_tag_id, "modifier": "INCLUDES"}}, + fragment="id scene {id files{path}} primary_tag {id, name} seconds" + ) + +def add_markers_to_video( + video_id: int, + tag_id: int, + tag_name: str, + time_frames: List[Any] +) -> None: + """Add markers to video for specific time frames""" + for time_frame in time_frames: + if end_seconds_support: + stash.create_scene_marker({ + "scene_id": video_id, + "primary_tag_id": tag_id, + "tag_ids": [tag_id], + "seconds": time_frame.start, + "end_seconds": time_frame.end, + "title": tag_name + }) + else: + stash.create_scene_marker({ + "scene_id": video_id, + "primary_tag_id": tag_id, + "tag_ids": [tag_id], + "seconds": time_frame.start, + "title": tag_name + }) + +def get_scene_markers(video_id: int) -> List[Dict[str, Any]]: + """Get all markers for a scene""" + return stash.get_scene_markers(video_id) + +def write_scene_marker_to_file( + marker: Dict[str, Any], + scene_file: str, + output_folder: str +) -> None: + """Write scene marker data to file for analysis""" + try: + marker_id = marker['id'] + scene_id = marker['scene']['id'] + tag_name = marker['primary_tag']['name'] + + # Create output filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"marker_{marker_id}_scene_{scene_id}_{tag_name}_{timestamp}.json" + output_path = os.path.join(output_folder, filename) + + # Prepare marker data + marker_data = { + "marker_id": marker_id, + "scene_id": scene_id, + "tag_name": tag_name, + "seconds": marker.get("seconds"), + "end_seconds": marker.get("end_seconds"), + "scene_file": scene_file, + "timestamp": timestamp + } + + # Write to file + with open(output_path, 'w') as f: + json.dump(marker_data, f, indent=2) + + except Exception as e: + log.error(f"Failed to write marker data: {e}") + +def delete_markers(markers: List[Dict[str, Any]]) -> None: + """Delete markers from StashApp""" + for marker in markers: + try: + stash.destroy_scene_marker(marker['id']) + except Exception as e: + log.error(f"Failed to delete marker {marker['id']}: {e}") + +def get_scene_markers_by_tag( + video_id: int, + error_if_no_end_seconds: bool = True +) -> List[Dict[str, Any]]: + """Get scene markers by tag with end_seconds support check""" + if end_seconds_support: + return stash.get_scene_markers(video_id) + else: + if error_if_no_end_seconds: + log.error("End seconds not supported in this StashApp version") + raise RuntimeError("End seconds not supported") + return stash.get_scene_markers(video_id) + +def remove_incorrect_tag_from_markers(markers: List[Dict[str, Any]]) -> None: + """Remove VLM_Incorrect tag from markers""" + marker_ids = [marker['id'] for marker in markers] + for marker_id in marker_ids: + try: + stash.update_scene_marker({ + "id": marker_id, + "tag_ids": {"ids": [vlm_incorrect_tag_id], "mode": "REMOVE"} + }) + except Exception as e: + log.error(f"Failed to remove incorrect tag from marker {marker_id}: {e}") + +def remove_vlm_markers_from_video(video_id: int) -> None: + """Remove all VLM markers from a video""" + markers = get_scene_markers(video_id) + vlm_tag_ids = get_vlm_tags() + + for marker in markers: + if marker['primary_tag']['id'] in vlm_tag_ids: + try: + stash.destroy_scene_marker(marker['id']) + except Exception as e: + log.error(f"Failed to delete VLM marker {marker['id']}: {e}") \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/haven_vlm_config.py b/plugins/AHavenVLMConnector/haven_vlm_config.py new file mode 100644 index 00000000..e6ac0c3b --- /dev/null +++ b/plugins/AHavenVLMConnector/haven_vlm_config.py @@ -0,0 +1,446 @@ +""" +Configuration for A Haven VLM Connector +A StashApp plugin for Vision-Language Model based content tagging +""" + +from typing import Dict, List, Optional +from dataclasses import dataclass +import os +import yaml + +# ----------------- Core Settings ----------------- + +# VLM Engine Configuration +VLM_ENGINE_CONFIG = { + "active_ai_models": ["vlm_multiplexer_model"], + "trace_logging": True, + "pipelines": { + "video_pipeline_dynamic": { + "inputs": [ + "video_path", + "return_timestamps", + "time_interval", + "threshold", + "return_confidence", + "vr_video", + "existing_video_data", + "skipped_categories", + ], + "output": "results", + "short_name": "dynamic_video", + "version": 1.0, + "models": [ + { + "name": "dynamic_video_ai", + "inputs": [ + "video_path", "return_timestamps", "time_interval", + "threshold", "return_confidence", "vr_video", + "existing_video_data", "skipped_categories" + ], + "outputs": "results", + }, + ], + } + }, + "models": { + "binary_search_processor_dynamic": { + "type": "binary_search_processor", + "model_file_name": "binary_search_processor_dynamic" + }, + "vlm_multiplexer_model": { + "type": "vlm_model", + "model_file_name": "vlm_multiplexer_model", + "model_category": "actiondetection", + "model_id": "zai-org/glm-4.6v-flash", + "model_identifier": 93848, + "model_version": "1.0", + "use_multiplexer": True, + "max_concurrent_requests": 13, + "instance_count": 10, + "max_batch_size": 4, + "multiplexer_endpoints": [ + { + "base_url": "http://localhost:1234/v1", + "api_key": "", + "name": "lm-studio-primary", + "weight": 9, + "is_fallback": False, + "max_concurrent": 10 + }, + { + "base_url": "https://cloudagnostic.com:443/v1", + "api_key": "", + "name": "cloud-fallback", + "weight": 1, + "is_fallback": True, + "max_concurrent": 2 + } + ], + "tag_list": [ + "Anal Fucking", "Ass Licking", "Ass Penetration", "Ball Licking/Sucking", "Blowjob", "Cum on Person", + "Cum Swapping", "Cumshot", "Deepthroat", "Double Penetration", "Fingering", "Fisting", "Footjob", + "Gangbang", "Gloryhole", "Grabbing Ass", "Grabbing Boobs", "Grabbing Hair/Head", "Handjob", "Kissing", + "Licking Penis", "Masturbation", "Pissing", "Pussy Licking (Clearly Visible)", "Pussy Licking", + "Pussy Rubbing", "Sucking Fingers", "Sucking Toy/Dildo", "Wet (Genitals)", "Titjob", "Tribbing/Scissoring", + "Undressing", "Vaginal Penetration", "Vaginal Fucking", "Vibrating" + ] + }, + "result_coalescer": { + "type": "python", + "model_file_name": "result_coalescer" + }, + "result_finisher": { + "type": "python", + "model_file_name": "result_finisher" + }, + "batch_awaiter": { + "type": "python", + "model_file_name": "batch_awaiter" + }, + "video_result_postprocessor": { + "type": "python", + "model_file_name": "video_result_postprocessor" + }, + }, + "category_config": { + "actiondetection": { + "69": { + "RenamedTag": "69", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Anal Fucking": { + "RenamedTag": "Anal Fucking", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Ass Licking": { + "RenamedTag": "Ass Licking", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Ass Penetration": { + "RenamedTag": "Ass Penetration", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Ball Licking/Sucking": { + "RenamedTag": "Ball Licking/Sucking", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Blowjob": { + "RenamedTag": "Blowjob", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Cum on Person": { + "RenamedTag": "Cum on Person", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Cum Swapping": { + "RenamedTag": "Cum Swapping", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Cumshot": { + "RenamedTag": "Cumshot", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Deepthroat": { + "RenamedTag": "Deepthroat", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Double Penetration": { + "RenamedTag": "Double Penetration", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Fingering": { + "RenamedTag": "Fingering", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Fisting": { + "RenamedTag": "Fisting", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Footjob": { + "RenamedTag": "Footjob", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Gangbang": { + "RenamedTag": "Gangbang", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Gloryhole": { + "RenamedTag": "Gloryhole", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Grabbing Ass": { + "RenamedTag": "Grabbing Ass", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Grabbing Boobs": { + "RenamedTag": "Grabbing Boobs", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Grabbing Hair/Head": { + "RenamedTag": "Grabbing Hair/Head", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Handjob": { + "RenamedTag": "Handjob", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Kissing": { + "RenamedTag": "Kissing", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Licking Penis": { + "RenamedTag": "Licking Penis", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Masturbation": { + "RenamedTag": "Masturbation", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Pissing": { + "RenamedTag": "Pissing", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Pussy Licking (Clearly Visible)": { + "RenamedTag": "Pussy Licking (Clearly Visible)", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Pussy Licking": { + "RenamedTag": "Pussy Licking", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Pussy Rubbing": { + "RenamedTag": "Pussy Rubbing", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Sucking Fingers": { + "RenamedTag": "Sucking Fingers", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Sucking Toy/Dildo": { + "RenamedTag": "Sucking Toy/Dildo", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Wet (Genitals)": { + "RenamedTag": "Wet (Genitals)", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Titjob": { + "RenamedTag": "Titjob", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Tribbing/Scissoring": { + "RenamedTag": "Tribbing/Scissoring", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Undressing": { + "RenamedTag": "Undressing", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Vaginal Penetration": { + "RenamedTag": "Vaginal Penetration", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Vaginal Fucking": { + "RenamedTag": "Vaginal Fucking", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + }, + "Vibrating": { + "RenamedTag": "Vibrating", + "MinMarkerDuration": "1s", + "MaxGap": "30s", + "RequiredDuration": "1s", + "TagThreshold": 0.5, + } + } + } +} + +# ----------------- Processing Settings ----------------- + +# Video processing settings +VIDEO_FRAME_INTERVAL = 80 # Process every 80 seconds +VIDEO_THRESHOLD = 0.3 +VIDEO_CONFIDENCE_RETURN = True + +# Concurrency settings +CONCURRENT_TASK_LIMIT = 20 # Increased for better parallel video processing +SERVER_TIMEOUT = 3700 + +# ----------------- Tag Configuration ----------------- + +# Tag names for StashApp integration +VLM_BASE_TAG_NAME = "VLM" +VLM_TAGME_TAG_NAME = "VLM_TagMe" +VLM_UPDATEME_TAG_NAME = "VLM_UpdateMe" +VLM_TAGGED_TAG_NAME = "VLM_Tagged" +VLM_ERRORED_TAG_NAME = "VLM_Errored" +VLM_INCORRECT_TAG_NAME = "VLM_Incorrect" + +# ----------------- File System Settings ----------------- + +# Directory paths +OUTPUT_DATA_DIR = "./output_data" + +# File management +DELETE_INCORRECT_MARKERS = True +CREATE_MARKERS = True + +# Path mutations for different environments +PATH_MUTATION = {} + +# ----------------- Configuration Loading ----------------- + +@dataclass +class VLMConnectorConfig: + """Configuration class for the VLM Connector""" + vlm_engine_config: Dict + video_frame_interval: float + video_threshold: float + video_confidence_return: bool + concurrent_task_limit: int + server_timeout: int + vlm_base_tag_name: str + vlm_tagme_tag_name: str + vlm_updateme_tag_name: str + vlm_tagged_tag_name: str + vlm_errored_tag_name: str + vlm_incorrect_tag_name: str + output_data_dir: str + delete_incorrect_markers: bool + create_markers: bool + path_mutation: Dict + +def load_config_from_yaml(config_path: Optional[str] = None) -> VLMConnectorConfig: + """Load configuration from YAML file or use defaults""" + if config_path and os.path.exists(config_path): + with open(config_path, 'r') as f: + yaml_config = yaml.safe_load(f) + return VLMConnectorConfig(**yaml_config) + + # Return default configuration + return VLMConnectorConfig( + vlm_engine_config=VLM_ENGINE_CONFIG, + video_frame_interval=VIDEO_FRAME_INTERVAL, + video_threshold=VIDEO_THRESHOLD, + video_confidence_return=VIDEO_CONFIDENCE_RETURN, + concurrent_task_limit=CONCURRENT_TASK_LIMIT, + server_timeout=SERVER_TIMEOUT, + vlm_base_tag_name=VLM_BASE_TAG_NAME, + vlm_tagme_tag_name=VLM_TAGME_TAG_NAME, + vlm_updateme_tag_name=VLM_UPDATEME_TAG_NAME, + vlm_tagged_tag_name=VLM_TAGGED_TAG_NAME, + vlm_errored_tag_name=VLM_ERRORED_TAG_NAME, + vlm_incorrect_tag_name=VLM_INCORRECT_TAG_NAME, + output_data_dir=OUTPUT_DATA_DIR, + delete_incorrect_markers=DELETE_INCORRECT_MARKERS, + create_markers=CREATE_MARKERS, + path_mutation=PATH_MUTATION + ) + +# Global configuration instance +config = load_config_from_yaml() diff --git a/plugins/AHavenVLMConnector/haven_vlm_connector.py b/plugins/AHavenVLMConnector/haven_vlm_connector.py new file mode 100644 index 00000000..176a717b --- /dev/null +++ b/plugins/AHavenVLMConnector/haven_vlm_connector.py @@ -0,0 +1,516 @@ +""" +A Haven VLM Connector +A StashApp plugin for Vision-Language Model based content tagging +""" + +import os +import sys +import json +import shutil +import traceback +import asyncio +import logging +import time +from typing import Dict, Any, List, Optional +from datetime import datetime + +# Import and install sys.exit tracking FIRST (before any other imports that might call sys.exit) +try: + from exit_tracker import install_exit_tracker + import stashapi.log as log + + install_exit_tracker(log) +except ImportError as e: + print(f"Warning: exit_tracker not available: {e}") + print("sys.exit tracking will not be available") + +# ----------------- Setup and Dependencies ----------------- + +# Use PythonDepManager for dependency management +try: + from PythonDepManager import ensure_import + + # Install and ensure all required dependencies with specific versions + ensure_import( + "stashapi:stashapp-tools==0.2.58", + "aiohttp==3.12.13", + "pydantic==2.12.5", + "vlm-engine==1.0.1", + "pyyaml==6.0.2", + ) + + # Import the dependencies after ensuring they're available + import stashapi.log as log + from stashapi.stashapp import StashInterface + import aiohttp + import pydantic + import yaml + +except ImportError as e: + print(f"Failed to import PythonDepManager or required dependencies: {e}") + print("Please ensure PythonDepManager is installed and available.") + sys.exit(1) +except Exception as e: + print(f"Error during dependency management: {e}") + print(f"Stack trace: {traceback.format_exc()}") + sys.exit(1) + +# Import local modules +try: + import haven_vlm_config as config +except ModuleNotFoundError: + log.error("Please provide a haven_vlm_config.py file with the required variables.") + raise Exception( + "Please provide a haven_vlm_config.py file with the required variables." + ) + +import haven_media_handler as media_handler +import haven_vlm_engine as vlm_engine +from haven_vlm_engine import TimeFrame + +log.debug("Python instance is running at: " + sys.executable) + +# ----------------- Global Variables ----------------- + +semaphore: Optional[asyncio.Semaphore] = None +progress: float = 0.0 +increment: float = 0.0 +completed_tasks: int = 0 +total_tasks: int = 0 +video_progress: Dict[str, float] = {} + +# ----------------- Main Execution ----------------- + + +async def main() -> None: + """Main entry point for the plugin""" + global semaphore + + # Semaphore initialization logging for hypothesis A + log.debug( + f"[DEBUG_HYPOTHESIS_A] Initializing semaphore with limit {config.config.concurrent_task_limit}" + ) + + semaphore = asyncio.Semaphore(config.config.concurrent_task_limit) + + # Post-semaphore creation logging + log.debug( + f"[DEBUG_HYPOTHESIS_A] Semaphore created successfully (limit: {config.config.concurrent_task_limit})" + ) + + json_input = read_json_input() + output = {} + await run(json_input, output) + out = json.dumps(output) + print(out + "\n") + + +def read_json_input() -> Dict[str, Any]: + """Read JSON input from stdin""" + json_input = sys.stdin.read() + return json.loads(json_input) + + +async def run(json_input: Dict[str, Any], output: Dict[str, Any]) -> None: + """Main execution logic""" + plugin_args = None + try: + log.debug(json_input["server_connection"]) + os.chdir(json_input["server_connection"]["PluginDir"]) + media_handler.initialize(json_input["server_connection"]) + except Exception as e: + log.error(f"Failed to initialize media handler: {e}") + raise + + try: + plugin_args = json_input["args"]["mode"] + except KeyError: + pass + + if plugin_args == "tag_videos": + await tag_videos() + output["output"] = "ok" + return + elif plugin_args == "find_marker_settings": + await find_marker_settings() + output["output"] = "ok" + return + elif plugin_args == "collect_incorrect_markers": + collect_incorrect_markers_and_images() + output["output"] = "ok" + return + + output["output"] = "ok" + return + + +# ----------------- High Level Processing Functions ----------------- + + +async def tag_videos() -> None: + """Tag videos with VLM analysis using improved async orchestration""" + global completed_tasks, total_tasks + + scenes = media_handler.get_tagme_scenes() + if not scenes: + log.info( + "No videos to tag. Have you tagged any scenes with the VLM_TagMe tag to get processed?" + ) + return + + total_tasks = len(scenes) + completed_tasks = 0 + + video_progress.clear() + for scene in scenes: + video_progress[scene.get("id", "unknown")] = 0.0 + log.progress(0.0) + + log.info( + f"🚀 Starting video processing for {total_tasks} scenes with semaphore limit of {config.config.concurrent_task_limit}" + ) + + # Create tasks with proper indexing for debugging + tasks = [] + for i, scene in enumerate(scenes): + # Pre-task creation logging for hypothesis A (semaphore deadlock) and E (signal termination) + scene_id = scene.get("id") + log.debug( + f"[DEBUG_HYPOTHESIS_A] Creating task {i + 1}/{total_tasks} for scene {scene_id}, semaphore limit: {config.config.concurrent_task_limit}" + ) + + task = asyncio.create_task(__tag_video_with_timing(scene, i)) + tasks.append(task) + + # Use asyncio.as_completed to process results as they finish (proves concurrency) + completed_task_futures = asyncio.as_completed(tasks) + + batch_start_time = asyncio.get_event_loop().time() + + for completed_task in completed_task_futures: + try: + await completed_task + completed_tasks += 1 + + except Exception as e: + completed_tasks += 1 + # Exception logging for hypothesis E (signal termination) + error_type = type(e).__name__ + log.debug( + f"[DEBUG_HYPOTHESIS_E] Task failed with exception: {error_type}: {str(e)} (Task {completed_tasks}/{total_tasks})" + ) + + log.error(f"❌ Task failed: {e}") + + total_time = asyncio.get_event_loop().time() - batch_start_time + + log.info( + f"🎉 All {total_tasks} videos completed in {total_time:.2f}s (avg: {total_time / total_tasks:.2f}s/video)" + ) + log.progress(1.0) + + +async def find_marker_settings() -> None: + """Find optimal marker settings based on a single tagged video""" + scenes = media_handler.get_tagme_scenes() + if len(scenes) != 1: + log.error( + "Please tag exactly one scene with the VLM_TagMe tag to get processed." + ) + return + scene = scenes[0] + await __find_marker_settings(scene) + + +def collect_incorrect_markers_and_images() -> None: + """Collect data from incorrectly tagged markers and images""" + incorrect_images = media_handler.get_incorrect_images() + image_paths, image_ids, temp_files = media_handler.get_image_paths_and_ids( + incorrect_images + ) + incorrect_markers = media_handler.get_incorrect_markers() + + if not (len(incorrect_images) > 0 or len(incorrect_markers) > 0): + log.info("No incorrect images or markers to collect.") + return + + current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + + try: + # Process images + image_folder = os.path.join(config.config.output_data_dir, "images") + os.makedirs(image_folder, exist_ok=True) + for image_path in image_paths: + try: + shutil.copy(image_path, image_folder) + except Exception as e: + log.error(f"Failed to copy image {image_path} to {image_folder}: {e}") + except Exception as e: + log.error(f"Failed to process images: {e}") + raise e + finally: + # Clean up temp files + for temp_file in temp_files: + try: + if os.path.isdir(temp_file): + shutil.rmtree(temp_file) + else: + os.remove(temp_file) + except Exception as e: + log.debug(f"Failed to remove temp file {temp_file}: {e}") + + # Process markers + scene_folder = os.path.join(config.config.output_data_dir, "scenes") + os.makedirs(scene_folder, exist_ok=True) + tag_folders = {} + + for marker in incorrect_markers: + scene_path = marker["scene"]["files"][0]["path"] + if not scene_path: + log.error(f"Marker {marker['id']} has no scene path") + continue + try: + tag_name = marker["primary_tag"]["name"] + if tag_name not in tag_folders: + tag_folders[tag_name] = os.path.join(scene_folder, tag_name) + os.makedirs(tag_folders[tag_name], exist_ok=True) + media_handler.write_scene_marker_to_file( + marker, scene_path, tag_folders[tag_name] + ) + except Exception as e: + log.error(f"Failed to collect scene: {e}") + + # Remove incorrect tags from images + image_ids = [image["id"] for image in incorrect_images] + media_handler.remove_incorrect_tag_from_images(image_ids) + + +# ----------------- Low Level Processing Functions ----------------- + + +async def __tag_video_with_timing(scene: Dict[str, Any], scene_index: int) -> None: + """Tag a single video scene with timing diagnostics""" + start_time = asyncio.get_event_loop().time() + scene_id = scene.get("id", "unknown") + + log.info(f"🎬 Starting video {scene_index + 1}: Scene {scene_id}") + + try: + await __tag_video(scene) + end_time = asyncio.get_event_loop().time() + duration = end_time - start_time + log.info( + f"✅ Completed video {scene_index + 1} (Scene {scene_id}) in {duration:.2f}s" + ) + + except Exception as e: + end_time = asyncio.get_event_loop().time() + duration = end_time - start_time + log.error( + f"❌ Failed video {scene_index + 1} (Scene {scene_id}) after {duration:.2f}s: {e}" + ) + raise + + +async def __tag_video(scene: Dict[str, Any]) -> None: + """Tag a single video scene with semaphore timing instrumentation""" + scene_id = scene.get("id") + + # Pre-semaphore acquisition logging for hypothesis A (semaphore deadlock) + task_start_time = asyncio.get_event_loop().time() + acquisition_start_time = task_start_time + log.debug( + f"[DEBUG_HYPOTHESIS_A] Task starting for scene {scene_id} at {task_start_time:.3f}s" + ) + + async with semaphore: + try: + # Semaphore acquisition successful logging + acquisition_end_time = asyncio.get_event_loop().time() + acquisition_time = acquisition_end_time - acquisition_start_time + log.debug( + f"[DEBUG_HYPOTHESIS_A] Semaphore acquired for scene {scene_id} after {acquisition_time:.3f}s" + ) + + if scene_id is None: + log.error("Scene missing 'id' field") + return + + files = scene.get("files", []) + if not files: + log.error(f"Scene {scene_id} has no files") + return + + scene_file = files[0].get("path") + if scene_file is None: + log.error(f"Scene {scene_id} file has no path") + return + + # Check if scene is VR + is_vr = media_handler.is_vr_scene(scene.get("tags", [])) + + def progress_cb(p: int) -> None: + global video_progress, total_tasks + video_progress[scene_id] = p / 100.0 + total_prog = sum(video_progress.values()) / total_tasks + + stats = vlm_engine.vlm_engine.get_performance_stats() + total_frames = stats.get("total_frames_processed", 0) + elapsed_seconds = stats.get("elapsed_time", 0.0) + + log.info(f"[Throughput] total_frames: {total_frames}") + log.info(f"[Throughput] elapsed_seconds: {elapsed_seconds:.2f}") + + if elapsed_seconds > 0: + fpm = (total_frames / elapsed_seconds) * 60.0 + else: + fpm = 0.0 + + log.info(f"[Throughput] calculated_fpm: {fpm:.1f}") + log.info( + f"[Throughput] Frame ~{(p / 100) * 100:.0f}: {fpm:.1f} FPM | progress: {p}%" + ) + log.progress(total_prog) + + # Process video through VLM Engine with HTTP timing for hypothesis B + processing_start_time = asyncio.get_event_loop().time() + + # HTTP request lifecycle tracking start + log.debug( + f"[DEBUG_HYPOTHESIS_B] Starting VLM processing for scene {scene_id}: {scene_file}" + ) + + video_result = await vlm_engine.process_video_async( + scene_file, + vr_video=is_vr, + frame_interval=config.config.video_frame_interval, + threshold=config.config.video_threshold, + return_confidence=config.config.video_confidence_return, + progress_callback=progress_cb, + ) + + # Extract detected tags + detected_tags = set() + for category_tags in video_result.video_tags.values(): + detected_tags.update(category_tags) + + # Post-VLM processing logging + processing_end_time = asyncio.get_event_loop().time() + processing_duration = processing_end_time - processing_start_time + log.debug( + f"[DEBUG_HYPOTHESIS_B] VLM processing completed for scene {scene_id} in {processing_duration:.2f}s ({len(detected_tags)} detected tags)" + ) + + if detected_tags: + # Clear all existing tags and markers before adding new ones + media_handler.clear_all_tags_from_video(scene) + media_handler.clear_all_markers_from_video(scene_id) + + # Add tags to scene + tag_ids = media_handler.get_tag_ids(list(detected_tags), create=True) + media_handler.add_tags_to_video(scene_id, tag_ids) + log.info(f"Added tags {list(detected_tags)} to scene {scene_id}") + + # Add markers if enabled + if config.config.create_markers: + media_handler.add_markers_to_video_from_dict( + scene_id, video_result.tag_timespans + ) + log.info(f"Added markers to scene {scene_id}") + + # Remove VLM_TagMe tag from processed scene + media_handler.remove_tagme_tag_from_scene(scene_id) + + # Task completion logging + task_end_time = asyncio.get_event_loop().time() + total_task_time = task_end_time - task_start_time + log.debug( + f"[DEBUG_HYPOTHESIS_A] Task completed for scene {scene_id} in {total_task_time:.2f}s" + ) + + except Exception as e: + # Exception handling with detailed logging for hypothesis E + exception_time = asyncio.get_event_loop().time() + error_type = type(e).__name__ + log.debug( + f"[DEBUG_HYPOTHESIS_E] Task exception for scene {scene_id}: {error_type}: {str(e)} at {exception_time:.3f}s" + ) + + scene_id = scene.get("id", "unknown") + log.error(f"Error processing video scene {scene_id}: {e}") + # Add error tag to failed scene if we have a valid ID + if scene_id != "unknown": + media_handler.add_error_scene(scene_id) + + +async def __find_marker_settings(scene: Dict[str, Any]) -> None: + """Find optimal marker settings for a scene""" + try: + scene_id = scene.get("id") + if scene_id is None: + log.error("Scene missing 'id' field") + return + + files = scene.get("files", []) + if not files: + log.error(f"Scene {scene_id} has no files") + return + + scene_file = files[0].get("path") + if scene_file is None: + log.error(f"Scene {scene_id} file has no path") + return + + # Get existing markers for the scene + existing_markers = media_handler.get_scene_markers(scene_id) + + # Convert markers to desired timespan format + desired_timespan_data = {} + for marker in existing_markers: + tag_name = marker["primary_tag"]["name"] + desired_timespan_data[tag_name] = TimeFrame( + start=marker["seconds"], + end=marker.get("end_seconds", marker["seconds"] + 1), + total_confidence=1.0, + ) + + # Find optimal settings + optimal_settings = await vlm_engine.find_optimal_marker_settings_async( + existing_json={}, # No existing JSON data + desired_timespan_data=desired_timespan_data, + ) + + # Output results + log.info(f"Optimal marker settings found for scene {scene_id}:") + log.info(json.dumps(optimal_settings, indent=2)) + + except Exception as e: + scene_id = scene.get("id", "unknown") + log.error(f"Error finding marker settings for scene {scene_id}: {e}") + + +# ----------------- Cleanup ----------------- + + +async def cleanup() -> None: + """Cleanup resources""" + if vlm_engine.vlm_engine: + await vlm_engine.vlm_engine.shutdown() + + +# Run main function if script is executed directly +if __name__ == "__main__": + try: + asyncio.run(main()) + except KeyboardInterrupt: + log.info("Plugin interrupted by user") + sys.exit(0) + except SystemExit as e: + # Re-raise system exit with the exit code + log.debug(f"[DEBUG_EXIT_CODE] Caught SystemExit with code: {e.code}") + raise + except Exception as e: + log.error(f"Plugin failed: {e}") + sys.exit(1) + finally: + asyncio.run(cleanup()) diff --git a/plugins/AHavenVLMConnector/haven_vlm_engine.py b/plugins/AHavenVLMConnector/haven_vlm_engine.py new file mode 100644 index 00000000..db9c03ef --- /dev/null +++ b/plugins/AHavenVLMConnector/haven_vlm_engine.py @@ -0,0 +1,352 @@ +""" +Haven VLM Engine Integration Module +Provides integration with the Haven VLM Engine for video and image processing +""" + +import asyncio +import logging +from typing import Any, Dict, List, Optional, Set, Union, Callable +from dataclasses import dataclass +from datetime import datetime +import json + +# Use PythonDepManager for dependency management +from vlm_engine import VLMEngine +from vlm_engine.config_models import ( + EngineConfig, + PipelineConfig, + ModelConfig, + PipelineModelConfig, +) + +import haven_vlm_config as config + +# Configure logging +logging.basicConfig( + level=logging.WARNING, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +@dataclass +class TimeFrame: + """Represents a time frame with start and end times""" + + start: float + end: float + total_confidence: Optional[float] = None + + def to_json(self) -> str: + """Convert to JSON string""" + return json.dumps( + { + "start": self.start, + "end": self.end, + "total_confidence": self.total_confidence, + } + ) + + def __str__(self) -> str: + return f"TimeFrame(start={self.start}, end={self.end}, confidence={self.total_confidence})" + + +@dataclass +class VideoTagInfo: + """Represents video tagging information""" + + video_duration: float + video_tags: Dict[str, Set[str]] + tag_totals: Dict[str, Dict[str, float]] + tag_timespans: Dict[str, Dict[str, List[TimeFrame]]] + + @classmethod + def from_json(cls, json_data: Dict[str, Any]) -> "VideoTagInfo": + """Create VideoTagInfo from JSON data""" + logger.debug(f"Creating VideoTagInfo from JSON: {json_data}") + + # Convert tag_timespans to TimeFrame objects + tag_timespans = {} + for category, tags in json_data.get("tag_timespans", {}).items(): + tag_timespans[category] = {} + for tag_name, timeframes in tags.items(): + tag_timespans[category][tag_name] = [ + TimeFrame( + start=tf["start"], + end=tf["end"], + total_confidence=tf.get("total_confidence"), + ) + for tf in timeframes + ] + + return cls( + video_duration=json_data.get("video_duration", 0.0), + video_tags=json_data.get("video_tags", {}), + tag_totals=json_data.get("tag_totals", {}), + tag_timespans=tag_timespans, + ) + + def __str__(self) -> str: + return f"VideoTagInfo(duration={self.video_duration}, tags={len(self.video_tags)}, timespans={len(self.tag_timespans)})" + + +class HavenVLMEngine: + """Main VLM Engine integration class""" + + def __init__(self): + self.engine: Optional[VLMEngine] = None + self.engine_config: Optional[EngineConfig] = None + self._initialized = False + + def _configure_logging(self) -> None: + """Configure logging levels based on plugin config.""" + vlm_config = config.config.vlm_engine_config + trace_enabled = vlm_config.get("trace_logging", False) + + if trace_enabled: + logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + ) + logging.getLogger("logger").setLevel(logging.DEBUG) + logging.getLogger("multiplexer_llm").setLevel(logging.DEBUG) + logger.debug("Trace logging enabled for vlm-engine and multiplexer-llm") + else: + logger.setLevel(logging.WARNING) + + async def initialize(self) -> None: + """Initialize the VLM Engine with configuration""" + if self._initialized: + return + + try: + self._configure_logging() + logger.info("Initializing Haven VLM Engine...") + + # Convert config dict to EngineConfig objects + self.engine_config = self._create_engine_config() + + # Create and initialize the engine + self.engine = VLMEngine(config=self.engine_config) + await self.engine.initialize() + + self._initialized = True + logger.info("Haven VLM Engine initialized successfully") + + except Exception as e: + logger.error(f"Failed to initialize VLM Engine: {e}") + raise + + def _create_engine_config(self) -> EngineConfig: + """Create EngineConfig from the configuration""" + vlm_config = config.config.vlm_engine_config + + # Create pipeline configs + pipelines = {} + for pipeline_name, pipeline_data in vlm_config["pipelines"].items(): + models = [ + PipelineModelConfig( + name=model["name"], inputs=model["inputs"], outputs=model["outputs"] + ) + for model in pipeline_data["models"] + ] + + pipelines[pipeline_name] = PipelineConfig( + inputs=pipeline_data["inputs"], + output=pipeline_data["output"], + short_name=pipeline_data["short_name"], + version=pipeline_data["version"], + models=models, + ) + + # Create model configs with new architectural changes + models = {} + for model_name, model_data in vlm_config["models"].items(): + if model_data["type"] == "vlm_model": + # Process multiplexer_endpoints and validate max_concurrent + multiplexer_endpoints = [] + for endpoint in model_data.get("multiplexer_endpoints", []): + # Validate that max_concurrent is present + if "max_concurrent" not in endpoint: + raise ValueError( + f"Endpoint '{endpoint.get('name', 'unnamed')}' is missing required 'max_concurrent' parameter" + ) + + multiplexer_endpoints.append( + { + "base_url": endpoint["base_url"], + "api_key": endpoint.get("api_key", ""), + "name": endpoint["name"], + "weight": endpoint.get("weight", 5), + "is_fallback": endpoint.get("is_fallback", False), + "max_concurrent": endpoint["max_concurrent"], + } + ) + + models[model_name] = ModelConfig( + type=model_data["type"], + model_file_name=model_data["model_file_name"], + model_category=model_data["model_category"], + model_id=model_data["model_id"], + model_identifier=model_data["model_identifier"], + model_version=model_data["model_version"], + use_multiplexer=model_data.get("use_multiplexer", False), + max_concurrent_requests=model_data.get( + "max_concurrent_requests", 10 + ), + instance_count=model_data.get("instance_count", 1), + max_batch_size=model_data.get("max_batch_size", 1), + multiplexer_endpoints=multiplexer_endpoints, + tag_list=model_data.get("tag_list", []), + ) + else: + models[model_name] = ModelConfig( + type=model_data["type"], + model_file_name=model_data["model_file_name"], + ) + + return EngineConfig( + active_ai_models=vlm_config["active_ai_models"], + pipelines=pipelines, + models=models, + category_config=vlm_config["category_config"], + loglevel="DEBUG" if vlm_config.get("trace_logging", False) else "WARNING", + ) + + async def process_video( + self, + video_path: str, + vr_video: bool = False, + frame_interval: Optional[float] = None, + threshold: Optional[float] = None, + return_confidence: Optional[bool] = None, + existing_json: Optional[Dict[str, Any]] = None, + progress_callback: Optional[Callable[[int], None]] = None, + ) -> VideoTagInfo: + """Process a video using the VLM Engine""" + if not self._initialized: + await self.initialize() + + try: + logger.info(f"Processing video: {video_path}") + + # Use config defaults if not provided + frame_interval = frame_interval or config.config.video_frame_interval + threshold = threshold or config.config.video_threshold + return_confidence = ( + return_confidence + if return_confidence is not None + else config.config.video_confidence_return + ) + + # Process video through the engine + results = await self.engine.process_video( + video_path, + frame_interval=frame_interval, + progress_callback=progress_callback, + ) + + logger.info(f"Video processing completed for: {video_path}") + logger.debug(f"Raw results structure: {type(results)}") + + # Extract video_tag_info from the nested structure + if isinstance(results, dict) and "video_tag_info" in results: + video_tag_data = results["video_tag_info"] + logger.debug( + f"Using video_tag_info from results: {video_tag_data.keys()}" + ) + else: + # Fallback: assume results is already in the correct format + video_tag_data = results + logger.debug( + f"Using results directly: {video_tag_data.keys() if isinstance(video_tag_data, dict) else type(video_tag_data)}" + ) + + return VideoTagInfo.from_json(video_tag_data) + + except Exception as e: + logger.error(f"Error processing video {video_path}: {e}") + raise + + def get_performance_stats(self) -> Dict[str, Any]: + """Get performance statistics from the VLM Engine.""" + if not self._initialized or not self.engine: + return {} + return self.engine.get_performance_stats() + + async def find_optimal_marker_settings( + self, existing_json: Dict[str, Any], desired_timespan_data: Dict[str, TimeFrame] + ) -> Dict[str, Any]: + """Find optimal marker settings based on existing data""" + if not self._initialized: + await self.initialize() + + try: + logger.info("Finding optimal marker settings...") + + # Convert TimeFrame objects to dict format + desired_data = {} + for key, timeframe in desired_timespan_data.items(): + desired_data[key] = { + "start": timeframe.start, + "end": timeframe.end, + "total_confidence": timeframe.total_confidence, + } + + # Call the engine's optimization method + results = await self.engine.optimize_timeframe_settings( + existing_json_data=existing_json, desired_timespan_data=desired_data + ) + + logger.info("Optimal marker settings found") + return results + + except Exception as e: + logger.error(f"Error finding optimal marker settings: {e}") + raise + + async def shutdown(self) -> None: + """Shutdown the VLM Engine""" + if self.engine and self._initialized: + try: + # VLMEngine doesn't have a shutdown method, just perform basic cleanup + logger.info("VLM Engine cleanup completed") + self._initialized = False + + except Exception as e: + logger.error(f"Error during VLM Engine cleanup: {e}") + self._initialized = False + + +# Global VLM Engine instance +vlm_engine = HavenVLMEngine() + + +# Convenience functions for backward compatibility +async def process_video_async( + video_path: str, + vr_video: bool = False, + frame_interval: Optional[float] = None, + threshold: Optional[float] = None, + return_confidence: Optional[bool] = None, + existing_json: Optional[Dict[str, Any]] = None, + progress_callback: Optional[Callable[[int], None]] = None, +) -> VideoTagInfo: + """Process video asynchronously""" + return await vlm_engine.process_video( + video_path, + vr_video, + frame_interval, + threshold, + return_confidence, + existing_json, + progress_callback=progress_callback, + ) + + +async def find_optimal_marker_settings_async( + existing_json: Dict[str, Any], desired_timespan_data: Dict[str, TimeFrame] +) -> Dict[str, Any]: + """Find optimal marker settings asynchronously""" + return await vlm_engine.find_optimal_marker_settings( + existing_json, desired_timespan_data + ) diff --git a/plugins/AHavenVLMConnector/haven_vlm_utility.py b/plugins/AHavenVLMConnector/haven_vlm_utility.py new file mode 100644 index 00000000..1a1e032f --- /dev/null +++ b/plugins/AHavenVLMConnector/haven_vlm_utility.py @@ -0,0 +1,316 @@ +""" +Haven VLM Utility Module +Utility functions for the A Haven VLM Connector plugin +""" + +import os +import json +import logging +from typing import Dict, Any, List, Optional, Union +from pathlib import Path +import yaml + +logger = logging.getLogger(__name__) + +def apply_path_mutations(path: str, mutations: Dict[str, str]) -> str: + """ + Apply path mutations for different environments + + Args: + path: Original file path + mutations: Dictionary of path mutations (e.g., {"E:": "F:", "G:": "D:"}) + + Returns: + Mutated path string + """ + if not mutations: + return path + + mutated_path = path + for old_path, new_path in mutations.items(): + if mutated_path.startswith(old_path): + mutated_path = mutated_path.replace(old_path, new_path, 1) + break + + return mutated_path + +def ensure_directory_exists(directory_path: str) -> None: + """ + Ensure a directory exists, creating it if necessary + + Args: + directory_path: Path to the directory + """ + Path(directory_path).mkdir(parents=True, exist_ok=True) + +def safe_file_operation(operation_func, *args, **kwargs) -> Optional[Any]: + """ + Safely execute a file operation with error handling + + Args: + operation_func: Function to execute + *args: Arguments for the function + **kwargs: Keyword arguments for the function + + Returns: + Result of the operation or None if failed + """ + try: + return operation_func(*args, **kwargs) + except (OSError, IOError) as e: + logger.error(f"File operation failed: {e}") + return None + except Exception as e: + logger.error(f"Unexpected error in file operation: {e}") + return None + +def load_yaml_config(config_path: str) -> Optional[Dict[str, Any]]: + """ + Load configuration from YAML file + + Args: + config_path: Path to the YAML configuration file + + Returns: + Configuration dictionary or None if failed + """ + try: + with open(config_path, 'r', encoding='utf-8') as f: + config = yaml.safe_load(f) + logger.info(f"Configuration loaded from {config_path}") + return config + except FileNotFoundError: + logger.warning(f"Configuration file not found: {config_path}") + return None + except yaml.YAMLError as e: + logger.error(f"Error parsing YAML configuration: {e}") + return None + except Exception as e: + logger.error(f"Unexpected error loading configuration: {e}") + return None + +def save_yaml_config(config: Dict[str, Any], config_path: str) -> bool: + """ + Save configuration to YAML file + + Args: + config: Configuration dictionary + config_path: Path to save the configuration file + + Returns: + True if successful, False otherwise + """ + try: + ensure_directory_exists(os.path.dirname(config_path)) + with open(config_path, 'w', encoding='utf-8') as f: + yaml.dump(config, f, default_flow_style=False, indent=2) + logger.info(f"Configuration saved to {config_path}") + return True + except Exception as e: + logger.error(f"Error saving configuration: {e}") + return False + +def validate_file_path(file_path: str) -> bool: + """ + Validate if a file path exists and is accessible + + Args: + file_path: Path to validate + + Returns: + True if file exists and is accessible, False otherwise + """ + try: + return os.path.isfile(file_path) and os.access(file_path, os.R_OK) + except Exception: + return False + +def get_file_extension(file_path: str) -> str: + """ + Get the file extension from a file path + + Args: + file_path: Path to the file + + Returns: + File extension (including the dot) + """ + return Path(file_path).suffix.lower() + +def is_video_file(file_path: str) -> bool: + """ + Check if a file is a video file based on its extension + + Args: + file_path: Path to the file + + Returns: + True if it's a video file, False otherwise + """ + video_extensions = {'.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv', '.webm', '.m4v'} + return get_file_extension(file_path) in video_extensions + +def is_image_file(file_path: str) -> bool: + """ + Check if a file is an image file based on its extension + + Args: + file_path: Path to the file + + Returns: + True if it's an image file, False otherwise + """ + image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp'} + return get_file_extension(file_path) in image_extensions + +def format_duration(seconds: float) -> str: + """ + Format duration in seconds to human-readable string + + Args: + seconds: Duration in seconds + + Returns: + Formatted duration string (e.g., "1h 23m 45s") + """ + if seconds < 60: + return f"{seconds:.1f}s" + elif seconds < 3600: + minutes = int(seconds // 60) + remaining_seconds = seconds % 60 + return f"{minutes}m {remaining_seconds:.1f}s" + else: + hours = int(seconds // 3600) + remaining_minutes = int((seconds % 3600) // 60) + remaining_seconds = seconds % 60 + return f"{hours}h {remaining_minutes}m {remaining_seconds:.1f}s" + +def format_file_size(bytes_size: int) -> str: + """ + Format file size in bytes to human-readable string + + Args: + bytes_size: Size in bytes + + Returns: + Formatted size string (e.g., "1.5 MB") + """ + for unit in ['B', 'KB', 'MB', 'GB', 'TB']: + if bytes_size < 1024.0: + return f"{bytes_size:.1f} {unit}" + bytes_size /= 1024.0 + return f"{bytes_size:.1f} PB" + +def sanitize_filename(filename: str) -> str: + """ + Sanitize a filename by removing or replacing invalid characters + + Args: + filename: Original filename + + Returns: + Sanitized filename + """ + # Replace invalid characters with underscores + invalid_chars = '<>:"/\\|?*' + for char in invalid_chars: + filename = filename.replace(char, '_') + + # Remove leading/trailing spaces and dots + filename = filename.strip(' .') + + # Ensure filename is not empty + if not filename: + filename = "unnamed" + + return filename + +def create_backup_file(file_path: str, backup_suffix: str = ".backup") -> Optional[str]: + """ + Create a backup of a file + + Args: + file_path: Path to the file to backup + backup_suffix: Suffix for the backup file + + Returns: + Path to the backup file or None if failed + """ + try: + if not os.path.exists(file_path): + logger.warning(f"File does not exist: {file_path}") + return None + + backup_path = file_path + backup_suffix + import shutil + shutil.copy2(file_path, backup_path) + logger.info(f"Backup created: {backup_path}") + return backup_path + except Exception as e: + logger.error(f"Failed to create backup: {e}") + return None + +def merge_dictionaries(dict1: Dict[str, Any], dict2: Dict[str, Any], overwrite: bool = True) -> Dict[str, Any]: + """ + Merge two dictionaries, with option to overwrite existing keys + + Args: + dict1: First dictionary + dict2: Second dictionary + overwrite: Whether to overwrite existing keys in dict1 + + Returns: + Merged dictionary + """ + result = dict1.copy() + + for key, value in dict2.items(): + if key not in result or overwrite: + result[key] = value + elif isinstance(result[key], dict) and isinstance(value, dict): + result[key] = merge_dictionaries(result[key], value, overwrite) + + return result + +def chunk_list(lst: List[Any], chunk_size: int) -> List[List[Any]]: + """ + Split a list into chunks of specified size + + Args: + lst: List to chunk + chunk_size: Size of each chunk + + Returns: + List of chunks + """ + return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)] + +def retry_operation(operation_func, max_retries: int = 3, delay: float = 1.0, *args, **kwargs) -> Optional[Any]: + """ + Retry an operation with exponential backoff + + Args: + operation_func: Function to retry + max_retries: Maximum number of retries + delay: Initial delay between retries + *args: Arguments for the function + **kwargs: Keyword arguments for the function + + Returns: + Result of the operation or None if all retries failed + """ + import time + + for attempt in range(max_retries + 1): + try: + return operation_func(*args, **kwargs) + except Exception as e: + if attempt == max_retries: + logger.error(f"Operation failed after {max_retries} retries: {e}") + return None + + wait_time = delay * (2 ** attempt) + logger.warning(f"Operation failed (attempt {attempt + 1}/{max_retries + 1}), retrying in {wait_time}s: {e}") + time.sleep(wait_time) + + return None \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/requirements.txt b/plugins/AHavenVLMConnector/requirements.txt new file mode 100644 index 00000000..6d704f53 --- /dev/null +++ b/plugins/AHavenVLMConnector/requirements.txt @@ -0,0 +1,8 @@ +# Core dependencies managed by PythonDepManager +# These are automatically handled by the plugin's dependency management system +# PythonDepManager will ensure the correct versions are installed + +# Development and testing dependencies +coverage>=7.0.0 +pytest>=7.0.0 +pytest-cov>=4.0.0 diff --git a/plugins/AHavenVLMConnector/run_tests.py b/plugins/AHavenVLMConnector/run_tests.py new file mode 100644 index 00000000..bc8e0500 --- /dev/null +++ b/plugins/AHavenVLMConnector/run_tests.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +""" +Test runner for A Haven VLM Connector +Runs all unit tests with coverage reporting +""" + +import sys +import os +import subprocess +import unittest +from pathlib import Path + +def install_test_dependencies(): + """Install test dependencies if not already installed""" + test_deps = [ + 'coverage', + 'pytest', + 'pytest-cov' + ] + + for dep in test_deps: + try: + __import__(dep.replace('-', '_')) + except ImportError: + print(f"Installing {dep}...") + subprocess.check_call([sys.executable, "-m", "pip", "install", dep]) + +def run_tests_with_coverage(): + """Run tests with coverage reporting""" + # Install test dependencies + install_test_dependencies() + + # Get the directory containing this script + script_dir = Path(__file__).parent + + # Discover and run tests + loader = unittest.TestLoader() + start_dir = script_dir + suite = loader.discover(start_dir, pattern='test_*.py') + + # Run tests with coverage + import coverage + + # Start coverage measurement + cov = coverage.Coverage( + source=['haven_vlm_config.py', 'haven_vlm_engine.py', 'haven_media_handler.py', + 'haven_vlm_connector.py', 'haven_vlm_utility.py'], + omit=['*/test_*.py', '*/__pycache__/*', '*/venv/*', '*/env/*'] + ) + cov.start() + + # Run the tests + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + # Stop coverage measurement + cov.stop() + cov.save() + + # Generate coverage report + print("\n" + "="*60) + print("COVERAGE REPORT") + print("="*60) + cov.report() + + # Generate HTML coverage report + cov.html_report(directory='htmlcov') + print(f"\nHTML coverage report generated in: {script_dir}/htmlcov/index.html") + + return result.wasSuccessful() + +def run_specific_test(test_file): + """Run a specific test file""" + if not test_file.endswith('.py'): + test_file += '.py' + + test_path = Path(__file__).parent / test_file + + if not test_path.exists(): + print(f"Test file not found: {test_path}") + return False + + # Run the specific test + loader = unittest.TestLoader() + suite = loader.loadTestsFromName(test_file[:-3]) + + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + return result.wasSuccessful() + +def main(): + """Main entry point""" + if len(sys.argv) > 1: + # Run specific test file + test_file = sys.argv[1] + success = run_specific_test(test_file) + else: + # Run all tests with coverage + success = run_tests_with_coverage() + + if success: + print("\n✅ All tests passed!") + sys.exit(0) + else: + print("\n❌ Some tests failed!") + sys.exit(1) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/test_dependency_management.py b/plugins/AHavenVLMConnector/test_dependency_management.py new file mode 100644 index 00000000..190f5dbe --- /dev/null +++ b/plugins/AHavenVLMConnector/test_dependency_management.py @@ -0,0 +1,98 @@ +""" +Unit tests for dependency management functionality using PythonDepManager +""" + +import unittest +import sys +from unittest.mock import patch, MagicMock, mock_open +import tempfile +import os + +class TestPythonDepManagerIntegration(unittest.TestCase): + """Test cases for PythonDepManager integration""" + + def setUp(self): + """Set up test fixtures""" + # Mock PythonDepManager module + self.mock_python_dep_manager = MagicMock() + sys.modules['PythonDepManager'] = self.mock_python_dep_manager + + def tearDown(self): + """Clean up after tests""" + if 'PythonDepManager' in sys.modules: + del sys.modules['PythonDepManager'] + + @patch('builtins.print') + def test_dependency_import_failure(self, mock_print): + """Test dependency import failure handling""" + # Mock ensure_import to raise ImportError + self.mock_python_dep_manager.ensure_import = MagicMock(side_effect=ImportError("Package not found")) + + # Test that the error is handled gracefully + with self.assertRaises(SystemExit): + import haven_vlm_connector + + def test_error_messages(self): + """Test that appropriate error messages are displayed""" + # Mock ensure_import to raise ImportError + self.mock_python_dep_manager.ensure_import = MagicMock(side_effect=ImportError("Package not found")) + + with patch('builtins.print') as mock_print: + with self.assertRaises(SystemExit): + import haven_vlm_connector + + # Check that appropriate error messages were printed + print_calls = [call[0][0] for call in mock_print.call_args_list] + self.assertTrue(any("Failed to import PythonDepManager" in msg for msg in print_calls if isinstance(msg, str))) + self.assertTrue(any("Please ensure PythonDepManager is installed" in msg for msg in print_calls if isinstance(msg, str))) + + +class TestDependencyManagementEdgeCases(unittest.TestCase): + """Test edge cases in dependency management""" + + def setUp(self): + """Set up test fixtures""" + self.mock_python_dep_manager = MagicMock() + sys.modules['PythonDepManager'] = self.mock_python_dep_manager + + def tearDown(self): + """Clean up after tests""" + if 'PythonDepManager' in sys.modules: + del sys.modules['PythonDepManager'] + + def test_missing_python_dep_manager(self): + """Test behavior when PythonDepManager is not available""" + # Remove PythonDepManager from sys.modules + if 'PythonDepManager' in sys.modules: + del sys.modules['PythonDepManager'] + + with patch('builtins.print') as mock_print: + with self.assertRaises(SystemExit): + import haven_vlm_connector + + # Check that appropriate error message was printed + print_calls = [call[0][0] for call in mock_print.call_args_list] + self.assertTrue(any("Failed to import PythonDepManager" in msg for msg in print_calls if isinstance(msg, str))) + + def test_partial_dependency_failure(self): + """Test behavior when some dependencies fail to import""" + # Mock ensure_import to succeed but some imports to fail + self.mock_python_dep_manager.ensure_import = MagicMock() + + # Mock some successful imports but not all + mock_stashapi = MagicMock() + sys.modules['stashapi.log'] = mock_stashapi + sys.modules['stashapi.stashapp'] = mock_stashapi + + # Don't mock aiohttp, so it should fail + with patch('builtins.print') as mock_print: + with self.assertRaises(SystemExit): + import haven_vlm_connector + + # Check that appropriate error message was printed + print_calls = [call[0][0] for call in mock_print.call_args_list] + self.assertTrue(any("Error during dependency management" in msg for msg in print_calls if isinstance(msg, str))) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/test_haven_media_handler.py b/plugins/AHavenVLMConnector/test_haven_media_handler.py new file mode 100644 index 00000000..ed81b5fc --- /dev/null +++ b/plugins/AHavenVLMConnector/test_haven_media_handler.py @@ -0,0 +1,387 @@ +""" +Unit tests for Haven Media Handler Module +Tests StashApp media operations and tag management +""" + +import unittest +from unittest.mock import Mock, patch, MagicMock +from typing import List, Dict, Any, Optional +import sys +import os + +# Add the current directory to the path to import the module +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +# Mock the dependencies before importing the module +sys.modules['PythonDepManager'] = Mock() +sys.modules['stashapi.stashapp'] = Mock() +sys.modules['stashapi.log'] = Mock() +sys.modules['haven_vlm_config'] = Mock() + +# Import the module after mocking dependencies +import haven_media_handler + + +class TestHavenMediaHandler(unittest.TestCase): + """Test cases for Haven Media Handler""" + + def setUp(self) -> None: + """Set up test fixtures""" + # Mock the stash interface + self.mock_stash = Mock() + self.mock_stash.find_tag.return_value = {"id": 1} + self.mock_stash.get_configuration.return_value = {"ui": {"vrTag": "VR"}} + self.mock_stash.stash_version.return_value = Mock() + + # Mock the log module + self.mock_log = Mock() + + # Patch the global variables + haven_media_handler.stash = self.mock_stash + haven_media_handler.log = self.mock_log + + # Mock tag IDs + haven_media_handler.vlm_errored_tag_id = 1 + haven_media_handler.vlm_tagme_tag_id = 2 + haven_media_handler.vlm_base_tag_id = 3 + haven_media_handler.vlm_tagged_tag_id = 4 + haven_media_handler.vr_tag_id = 5 + haven_media_handler.vlm_incorrect_tag_id = 6 + + def tearDown(self) -> None: + """Clean up after tests""" + # Clear any cached data + haven_media_handler.tag_id_cache.clear() + haven_media_handler.vlm_tag_ids_cache.clear() + + def test_clear_all_tags_from_video_with_tags(self) -> None: + """Test clearing all tags from a video that has tags""" + # Mock scene with tags + mock_scene = { + "id": 123, + "tags": [ + {"id": 10, "name": "Tag1"}, + {"id": 20, "name": "Tag2"}, + {"id": 30, "name": "Tag3"} + ] + } + # Call the function + haven_media_handler.clear_all_tags_from_video(mock_scene) + # Verify tags were removed + self.mock_stash.update_scenes.assert_called_once_with({ + "ids": [123], + "tag_ids": {"ids": [10, 20, 30], "mode": "REMOVE"} + }) + # Verify log message + self.mock_log.info.assert_called_once_with("Cleared 3 tags from scene 123") + + def test_clear_all_tags_from_video_no_tags(self) -> None: + """Test clearing all tags from a video that has no tags""" + # Mock scene without tags + mock_scene = {"id": 123, "tags": []} + # Call the function + haven_media_handler.clear_all_tags_from_video(mock_scene) + # Verify no update was called since there are no tags + self.mock_stash.update_scenes.assert_not_called() + # Verify no log message + self.mock_log.info.assert_not_called() + + def test_clear_all_tags_from_video_scene_without_tags_key(self) -> None: + """Test clearing all tags from a scene that doesn't have a tags key""" + # Mock scene without tags key + mock_scene = {"id": 123} + # Call the function + haven_media_handler.clear_all_tags_from_video(mock_scene) + # Verify no update was called + self.mock_stash.update_scenes.assert_not_called() + + @patch('haven_media_handler.get_scene_markers') + @patch('haven_media_handler.delete_markers') + def test_clear_all_markers_from_video_with_markers(self, mock_delete_markers: Mock, mock_get_markers: Mock) -> None: + """Test clearing all markers from a video that has markers""" + # Mock markers + mock_markers = [ + {"id": 1, "title": "Marker1"}, + {"id": 2, "title": "Marker2"} + ] + mock_get_markers.return_value = mock_markers + + # Call the function + haven_media_handler.clear_all_markers_from_video(123) + + # Verify markers were retrieved + mock_get_markers.assert_called_once_with(123) + + # Verify markers were deleted + mock_delete_markers.assert_called_once_with(mock_markers) + + # Verify log message + self.mock_log.info.assert_called_once_with("Cleared all 2 markers from scene 123") + + @patch('haven_media_handler.get_scene_markers') + @patch('haven_media_handler.delete_markers') + def test_clear_all_markers_from_video_no_markers(self, mock_delete_markers: Mock, mock_get_markers: Mock) -> None: + """Test clearing all markers from a video that has no markers""" + # Mock no markers + mock_get_markers.return_value = [] + + # Call the function + haven_media_handler.clear_all_markers_from_video(123) + + # Verify markers were retrieved + mock_get_markers.assert_called_once_with(123) + + # Verify no deletion was called + mock_delete_markers.assert_not_called() + + # Verify no log message + self.mock_log.info.assert_not_called() + + def test_add_tags_to_video_with_tagged(self) -> None: + """Test adding tags to video with tagged flag enabled""" + # Call the function + haven_media_handler.add_tags_to_video(123, [10, 20, 30], add_tagged=True) + + # Verify tags were added (including tagged tag) + self.mock_stash.update_scenes.assert_called_once_with({ + "ids": [123], + "tag_ids": {"ids": [10, 20, 30, 4], "mode": "ADD"} + }) + + def test_add_tags_to_video_without_tagged(self) -> None: + """Test adding tags to video with tagged flag disabled""" + # Call the function + haven_media_handler.add_tags_to_video(123, [10, 20, 30], add_tagged=False) + + # Verify tags were added (without tagged tag) + self.mock_stash.update_scenes.assert_called_once_with({ + "ids": [123], + "tag_ids": {"ids": [10, 20, 30], "mode": "ADD"} + }) + + @patch('haven_media_handler.get_vlm_tags') + def test_remove_vlm_tags_from_video(self, mock_get_vlm_tags: Mock) -> None: + """Test removing VLM tags from video""" + # Mock VLM tags + mock_get_vlm_tags.return_value = [100, 200, 300] + + # Call the function + haven_media_handler.remove_vlm_tags_from_video(123, remove_tagme=True, remove_errored=True) + + # Verify VLM tags were retrieved + mock_get_vlm_tags.assert_called_once() + + # Verify tags were removed (including tagme and errored tags) + self.mock_stash.update_scenes.assert_called_once_with({ + "ids": [123], + "tag_ids": {"ids": [100, 200, 300, 2, 1], "mode": "REMOVE"} + }) + + def test_get_tagme_scenes(self) -> None: + """Test getting scenes tagged with VLM_TagMe""" + # Mock scenes + mock_scenes = [{"id": 1}, {"id": 2}] + self.mock_stash.find_scenes.return_value = mock_scenes + + # Call the function + result = haven_media_handler.get_tagme_scenes() + + # Verify scenes were found + self.mock_stash.find_scenes.assert_called_once_with( + f={"tags": {"value": 2, "modifier": "INCLUDES"}}, + fragment="id tags {id} files {path duration fingerprint(type: \"phash\")}" + ) + + # Verify result + self.assertEqual(result, mock_scenes) + + def test_add_error_scene(self) -> None: + """Test adding error tag to a scene""" + # Call the function + haven_media_handler.add_error_scene(123) + + # Verify error tag was added + self.mock_stash.update_scenes.assert_called_once_with({ + "ids": [123], + "tag_ids": {"ids": [1], "mode": "ADD"} + }) + + def test_remove_tagme_tag_from_scene(self) -> None: + """Test removing VLM_TagMe tag from a scene""" + # Call the function + haven_media_handler.remove_tagme_tag_from_scene(123) + + # Verify tagme tag was removed + self.mock_stash.update_scenes.assert_called_once_with({ + "ids": [123], + "tag_ids": {"ids": [2], "mode": "REMOVE"} + }) + + def test_is_scene_tagged_true(self) -> None: + """Test checking if a scene is tagged (true case)""" + # Mock tags including tagged tag + tags = [ + {"id": 10, "name": "Tag1"}, + {"id": 4, "name": "VLM_Tagged"}, # This is the tagged tag + {"id": 20, "name": "Tag2"} + ] + + # Call the function + result = haven_media_handler.is_scene_tagged(tags) + + # Verify result + self.assertTrue(result) + + def test_is_scene_tagged_false(self) -> None: + """Test checking if a scene is tagged (false case)""" + # Mock tags without tagged tag + tags = [ + {"id": 10, "name": "Tag1"}, + {"id": 20, "name": "Tag2"} + ] + + # Call the function + result = haven_media_handler.is_scene_tagged(tags) + + # Verify result + self.assertFalse(result) + + def test_is_vr_scene_true(self) -> None: + """Test checking if a scene is VR (true case)""" + # Mock tags including VR tag + tags = [ + {"id": 10, "name": "Tag1"}, + {"id": 5, "name": "VR"}, # This is the VR tag + {"id": 20, "name": "Tag2"} + ] + + # Call the function + result = haven_media_handler.is_vr_scene(tags) + + # Verify result + self.assertTrue(result) + + def test_is_vr_scene_false(self) -> None: + """Test checking if a scene is VR (false case)""" + # Mock tags without VR tag + tags = [ + {"id": 10, "name": "Tag1"}, + {"id": 20, "name": "Tag2"} + ] + + # Call the function + result = haven_media_handler.is_vr_scene(tags) + + # Verify result + self.assertFalse(result) + + def test_get_tag_id_existing(self) -> None: + """Test getting tag ID for existing tag""" + # Mock existing tag + self.mock_stash.find_tag.return_value = {"id": 123, "name": "TestTag"} + + # Call the function + result = haven_media_handler.get_tag_id("TestTag", create=False) + + # Verify tag was found + self.mock_stash.find_tag.assert_called_once_with("TestTag") + + # Verify result + self.assertEqual(result, 123) + + def test_get_tag_id_not_existing_no_create(self) -> None: + """Test getting tag ID for non-existing tag without create""" + # Mock non-existing tag + self.mock_stash.find_tag.return_value = None + + # Call the function + result = haven_media_handler.get_tag_id("TestTag", create=False) + + # Verify tag was searched + self.mock_stash.find_tag.assert_called_once_with("TestTag") + + # Verify result is None + self.assertIsNone(result) + + def test_get_tag_id_create_new(self) -> None: + """Test getting tag ID for non-existing tag with create""" + # Mock non-existing tag + self.mock_stash.find_tag.return_value = None + + # Mock created tag + self.mock_stash.create_tag.return_value = {"id": 456, "name": "TestTag"} + + # Call the function + result = haven_media_handler.get_tag_id("TestTag", create=True) + + # Verify tag was searched + self.mock_stash.find_tag.assert_called_once_with("TestTag") + + # Verify tag was created + self.mock_stash.create_tag.assert_called_once_with({ + "name": "TestTag", + "ignore_auto_tag": True, + "parent_ids": [3] + }) + + # Verify result + self.assertEqual(result, 456) + + def test_get_tag_ids(self) -> None: + """Test getting multiple tag IDs""" + # Mock tag IDs + with patch('haven_media_handler.get_tag_id') as mock_get_tag_id: + mock_get_tag_id.side_effect = [10, 20, 30] + + # Call the function + result = haven_media_handler.get_tag_ids(["Tag1", "Tag2", "Tag3"], create=True) + + # Verify individual tag IDs were retrieved + self.assertEqual(mock_get_tag_id.call_count, 3) + mock_get_tag_id.assert_any_call("Tag1", True) + mock_get_tag_id.assert_any_call("Tag2", True) + mock_get_tag_id.assert_any_call("Tag3", True) + + # Verify result + self.assertEqual(result, [10, 20, 30]) + + @patch('haven_media_handler.vlm_tag_ids_cache') + def test_get_vlm_tags_from_cache(self, mock_cache: Mock) -> None: + """Test getting VLM tags from cache""" + # Mock cached tags + mock_cache.__len__.return_value = 3 + mock_cache.__iter__.return_value = iter([100, 200, 300]) + + # Call the function + result = haven_media_handler.get_vlm_tags() + + # Verify result from cache + self.assertEqual(result, [100, 200, 300]) + + def test_get_vlm_tags_from_stash(self) -> None: + """Test getting VLM tags from stash when cache is empty""" + # Mock empty cache + haven_media_handler.vlm_tag_ids_cache.clear() + + # Mock stash tags + mock_tags = [ + {"id": 100, "name": "VLM_Tag1"}, + {"id": 200, "name": "VLM_Tag2"} + ] + self.mock_stash.find_tags.return_value = mock_tags + + # Call the function + result = haven_media_handler.get_vlm_tags() + + # Verify tags were found + self.mock_stash.find_tags.assert_called_once_with( + f={"parents": {"value": 3, "modifier": "INCLUDES"}}, + fragment="id" + ) + + # Verify result + self.assertEqual(result, [100, 200]) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/test_haven_vlm_config.py b/plugins/AHavenVLMConnector/test_haven_vlm_config.py new file mode 100644 index 00000000..464e295f --- /dev/null +++ b/plugins/AHavenVLMConnector/test_haven_vlm_config.py @@ -0,0 +1,286 @@ +""" +Unit tests for haven_vlm_config module +""" + +import unittest +import tempfile +import os +import yaml +from unittest.mock import patch, mock_open +from dataclasses import dataclass + +import haven_vlm_config + + +class TestVLMConnectorConfig(unittest.TestCase): + """Test cases for VLMConnectorConfig dataclass""" + + def test_vlm_connector_config_creation(self): + """Test creating VLMConnectorConfig with all required fields""" + config = haven_vlm_config.VLMConnectorConfig( + vlm_engine_config={"test": "config"}, + video_frame_interval=2.0, + video_threshold=0.3, + video_confidence_return=True, + image_threshold=0.5, + image_batch_size=320, + image_confidence_return=False, + concurrent_task_limit=10, + server_timeout=3700, + vlm_base_tag_name="VLM", + vlm_tagme_tag_name="VLM_TagMe", + vlm_updateme_tag_name="VLM_UpdateMe", + vlm_tagged_tag_name="VLM_Tagged", + vlm_errored_tag_name="VLM_Errored", + vlm_incorrect_tag_name="VLM_Incorrect", + temp_image_dir="./temp_images", + output_data_dir="./output_data", + delete_incorrect_markers=True, + create_markers=True, + path_mutation={} + ) + + self.assertEqual(config.video_frame_interval, 2.0) + self.assertEqual(config.video_threshold, 0.3) + self.assertEqual(config.image_threshold, 0.5) + self.assertEqual(config.concurrent_task_limit, 10) + self.assertEqual(config.vlm_base_tag_name, "VLM") + self.assertEqual(config.temp_image_dir, "./temp_images") + + def test_vlm_connector_config_defaults(self): + """Test VLMConnectorConfig with minimal required fields""" + config = haven_vlm_config.VLMConnectorConfig( + vlm_engine_config={}, + video_frame_interval=1.0, + video_threshold=0.1, + video_confidence_return=False, + image_threshold=0.1, + image_batch_size=100, + image_confidence_return=False, + concurrent_task_limit=5, + server_timeout=1000, + vlm_base_tag_name="TEST", + vlm_tagme_tag_name="TEST_TagMe", + vlm_updateme_tag_name="TEST_UpdateMe", + vlm_tagged_tag_name="TEST_Tagged", + vlm_errored_tag_name="TEST_Errored", + vlm_incorrect_tag_name="TEST_Incorrect", + temp_image_dir="./test_temp", + output_data_dir="./test_output", + delete_incorrect_markers=False, + create_markers=False, + path_mutation={"test": "mutation"} + ) + + self.assertEqual(config.video_frame_interval, 1.0) + self.assertEqual(config.video_threshold, 0.1) + self.assertEqual(config.path_mutation, {"test": "mutation"}) + + +class TestLoadConfigFromYaml(unittest.TestCase): + """Test cases for load_config_from_yaml function""" + + def setUp(self): + """Set up test fixtures""" + self.test_config = { + "vlm_engine_config": { + "active_ai_models": ["test_model"], + "pipelines": {}, + "models": {}, + "category_config": {} + }, + "video_frame_interval": 3.0, + "video_threshold": 0.4, + "video_confidence_return": True, + "image_threshold": 0.6, + "image_batch_size": 500, + "image_confidence_return": True, + "concurrent_task_limit": 15, + "server_timeout": 5000, + "vlm_base_tag_name": "TEST_VLM", + "vlm_tagme_tag_name": "TEST_VLM_TagMe", + "vlm_updateme_tag_name": "TEST_VLM_UpdateMe", + "vlm_tagged_tag_name": "TEST_VLM_Tagged", + "vlm_errored_tag_name": "TEST_VLM_Errored", + "vlm_incorrect_tag_name": "TEST_VLM_Incorrect", + "temp_image_dir": "./test_temp_images", + "output_data_dir": "./test_output_data", + "delete_incorrect_markers": False, + "create_markers": False, + "path_mutation": {"E:": "F:"} + } + + def test_load_config_from_yaml_with_valid_file(self): + """Test loading configuration from a valid YAML file""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as f: + yaml.dump(self.test_config, f) + config_path = f.name + + try: + config = haven_vlm_config.load_config_from_yaml(config_path) + + self.assertIsInstance(config, haven_vlm_config.VLMConnectorConfig) + self.assertEqual(config.video_frame_interval, 3.0) + self.assertEqual(config.video_threshold, 0.4) + self.assertEqual(config.image_threshold, 0.6) + self.assertEqual(config.concurrent_task_limit, 15) + self.assertEqual(config.vlm_base_tag_name, "TEST_VLM") + self.assertEqual(config.path_mutation, {"E:": "F:"}) + finally: + os.unlink(config_path) + + def test_load_config_from_yaml_with_nonexistent_file(self): + """Test loading configuration with nonexistent file path""" + config = haven_vlm_config.load_config_from_yaml("nonexistent_file.yml") + + # Should return default configuration + self.assertIsInstance(config, haven_vlm_config.VLMConnectorConfig) + self.assertEqual(config.video_frame_interval, haven_vlm_config.VIDEO_FRAME_INTERVAL) + self.assertEqual(config.video_threshold, haven_vlm_config.VIDEO_THRESHOLD) + + def test_load_config_from_yaml_with_none_path(self): + """Test loading configuration with None path""" + config = haven_vlm_config.load_config_from_yaml(None) + + # Should return default configuration + self.assertIsInstance(config, haven_vlm_config.VLMConnectorConfig) + self.assertEqual(config.video_frame_interval, haven_vlm_config.VIDEO_FRAME_INTERVAL) + + def test_load_config_from_yaml_with_invalid_yaml(self): + """Test loading configuration with invalid YAML content""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as f: + f.write("invalid: yaml: content: [") + config_path = f.name + + try: + config = haven_vlm_config.load_config_from_yaml(config_path) + + # Should return default configuration on YAML error + self.assertIsInstance(config, haven_vlm_config.VLMConnectorConfig) + self.assertEqual(config.video_frame_interval, haven_vlm_config.VIDEO_FRAME_INTERVAL) + finally: + os.unlink(config_path) + + def test_load_config_from_yaml_with_file_permission_error(self): + """Test loading configuration with file permission error""" + with patch('builtins.open', side_effect=PermissionError("Permission denied")): + config = haven_vlm_config.load_config_from_yaml("test.yml") + + # Should return default configuration on file error + self.assertIsInstance(config, haven_vlm_config.VLMConnectorConfig) + self.assertEqual(config.video_frame_interval, haven_vlm_config.VIDEO_FRAME_INTERVAL) + + +class TestConfigurationConstants(unittest.TestCase): + """Test cases for configuration constants""" + + def test_vlm_engine_config_structure(self): + """Test that VLM_ENGINE_CONFIG has the expected structure""" + config = haven_vlm_config.VLM_ENGINE_CONFIG + + # Check required top-level keys + self.assertIn("active_ai_models", config) + self.assertIn("pipelines", config) + self.assertIn("models", config) + self.assertIn("category_config", config) + + # Check active_ai_models is a list + self.assertIsInstance(config["active_ai_models"], list) + self.assertIn("vlm_multiplexer_model", config["active_ai_models"]) + + # Check pipelines structure + self.assertIn("video_pipeline_dynamic", config["pipelines"]) + pipeline = config["pipelines"]["video_pipeline_dynamic"] + self.assertIn("inputs", pipeline) + self.assertIn("output", pipeline) + self.assertIn("models", pipeline) + + # Check models structure + self.assertIn("vlm_multiplexer_model", config["models"]) + model = config["models"]["vlm_multiplexer_model"] + self.assertIn("type", model) + self.assertIn("multiplexer_endpoints", model) + self.assertIn("tag_list", model) + + def test_processing_settings(self): + """Test that processing settings have valid values""" + self.assertGreater(haven_vlm_config.VIDEO_FRAME_INTERVAL, 0) + self.assertGreaterEqual(haven_vlm_config.VIDEO_THRESHOLD, 0) + self.assertLessEqual(haven_vlm_config.VIDEO_THRESHOLD, 1) + self.assertGreaterEqual(haven_vlm_config.IMAGE_THRESHOLD, 0) + self.assertLessEqual(haven_vlm_config.IMAGE_THRESHOLD, 1) + self.assertGreater(haven_vlm_config.IMAGE_BATCH_SIZE, 0) + self.assertGreater(haven_vlm_config.CONCURRENT_TASK_LIMIT, 0) + self.assertGreater(haven_vlm_config.SERVER_TIMEOUT, 0) + + def test_tag_names(self): + """Test that tag names are valid strings""" + tag_names = [ + haven_vlm_config.VLM_BASE_TAG_NAME, + haven_vlm_config.VLM_TAGME_TAG_NAME, + haven_vlm_config.VLM_UPDATEME_TAG_NAME, + haven_vlm_config.VLM_TAGGED_TAG_NAME, + haven_vlm_config.VLM_ERRORED_TAG_NAME, + haven_vlm_config.VLM_INCORRECT_TAG_NAME + ] + + for tag_name in tag_names: + self.assertIsInstance(tag_name, str) + self.assertGreater(len(tag_name), 0) + + def test_directory_paths(self): + """Test that directory paths are valid strings""" + self.assertIsInstance(haven_vlm_config.TEMP_IMAGE_DIR, str) + self.assertIsInstance(haven_vlm_config.OUTPUT_DATA_DIR, str) + self.assertGreater(len(haven_vlm_config.TEMP_IMAGE_DIR), 0) + self.assertGreater(len(haven_vlm_config.OUTPUT_DATA_DIR), 0) + + def test_boolean_settings(self): + """Test that boolean settings are valid""" + self.assertIsInstance(haven_vlm_config.DELETE_INCORRECT_MARKERS, bool) + self.assertIsInstance(haven_vlm_config.CREATE_MARKERS, bool) + + def test_path_mutation(self): + """Test that path mutation is a dictionary""" + self.assertIsInstance(haven_vlm_config.PATH_MUTATION, dict) + + +class TestGlobalConfigInstance(unittest.TestCase): + """Test cases for the global config instance""" + + def test_global_config_exists(self): + """Test that the global config instance exists and is valid""" + self.assertIsInstance(haven_vlm_config.config, haven_vlm_config.VLMConnectorConfig) + + def test_global_config_has_required_attributes(self): + """Test that the global config has all required attributes""" + config = haven_vlm_config.config + + # Check that all required attributes exist + required_attrs = [ + 'vlm_engine_config', 'video_frame_interval', 'video_threshold', + 'video_confidence_return', 'image_threshold', 'image_batch_size', + 'image_confidence_return', 'concurrent_task_limit', 'server_timeout', + 'vlm_base_tag_name', 'vlm_tagme_tag_name', 'vlm_updateme_tag_name', + 'vlm_tagged_tag_name', 'vlm_errored_tag_name', 'vlm_incorrect_tag_name', + 'temp_image_dir', 'output_data_dir', 'delete_incorrect_markers', + 'create_markers', 'path_mutation' + ] + + for attr in required_attrs: + self.assertTrue(hasattr(config, attr), f"Missing attribute: {attr}") + + def test_global_config_values(self): + """Test that the global config has expected default values""" + config = haven_vlm_config.config + + self.assertEqual(config.video_frame_interval, haven_vlm_config.VIDEO_FRAME_INTERVAL) + self.assertEqual(config.video_threshold, haven_vlm_config.VIDEO_THRESHOLD) + self.assertEqual(config.image_threshold, haven_vlm_config.IMAGE_THRESHOLD) + self.assertEqual(config.concurrent_task_limit, haven_vlm_config.CONCURRENT_TASK_LIMIT) + self.assertEqual(config.vlm_base_tag_name, haven_vlm_config.VLM_BASE_TAG_NAME) + self.assertEqual(config.temp_image_dir, haven_vlm_config.TEMP_IMAGE_DIR) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/test_haven_vlm_connector.py b/plugins/AHavenVLMConnector/test_haven_vlm_connector.py new file mode 100644 index 00000000..c77f9122 --- /dev/null +++ b/plugins/AHavenVLMConnector/test_haven_vlm_connector.py @@ -0,0 +1,451 @@ +""" +Unit tests for haven_vlm_connector module +""" + +import unittest +import asyncio +import json +import tempfile +import os +from unittest.mock import patch, MagicMock, AsyncMock, mock_open +import sys + +# Mock the stashapi imports +sys.modules['stashapi.log'] = MagicMock() +sys.modules['stashapi.stashapp'] = MagicMock() + +# Mock the vlm_engine imports +sys.modules['vlm_engine'] = MagicMock() +sys.modules['vlm_engine.config_models'] = MagicMock() + +import haven_vlm_connector + + +class TestMainExecution(unittest.TestCase): + """Test cases for main execution functions""" + + def setUp(self): + """Set up test fixtures""" + self.sample_json_input = { + "server_connection": { + "PluginDir": "/tmp/plugin" + }, + "args": { + "mode": "tag_videos" + } + } + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.tag_videos') + @patch('haven_vlm_connector.os.chdir') + def test_run_tag_videos(self, mock_chdir, mock_tag_videos, mock_media_handler): + """Test running tag_videos mode""" + output = {} + + with patch('haven_vlm_connector.read_json_input', return_value=self.sample_json_input): + asyncio.run(haven_vlm_connector.run(self.sample_json_input, output)) + + mock_chdir.assert_called_once_with("/tmp/plugin") + mock_media_handler.initialize.assert_called_once_with(self.sample_json_input["server_connection"]) + mock_tag_videos.assert_called_once() + self.assertEqual(output["output"], "ok") + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.tag_images') + @patch('haven_vlm_connector.os.chdir') + def test_run_tag_images(self, mock_chdir, mock_tag_images, mock_media_handler): + """Test running tag_images mode""" + json_input = self.sample_json_input.copy() + json_input["args"]["mode"] = "tag_images" + output = {} + + asyncio.run(haven_vlm_connector.run(json_input, output)) + + mock_tag_images.assert_called_once() + self.assertEqual(output["output"], "ok") + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.find_marker_settings') + @patch('haven_vlm_connector.os.chdir') + def test_run_find_marker_settings(self, mock_chdir, mock_find_marker_settings, mock_media_handler): + """Test running find_marker_settings mode""" + json_input = self.sample_json_input.copy() + json_input["args"]["mode"] = "find_marker_settings" + output = {} + + asyncio.run(haven_vlm_connector.run(json_input, output)) + + mock_find_marker_settings.assert_called_once() + self.assertEqual(output["output"], "ok") + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.collect_incorrect_markers_and_images') + @patch('haven_vlm_connector.os.chdir') + def test_run_collect_incorrect_markers(self, mock_chdir, mock_collect, mock_media_handler): + """Test running collect_incorrect_markers mode""" + json_input = self.sample_json_input.copy() + json_input["args"]["mode"] = "collect_incorrect_markers" + output = {} + + asyncio.run(haven_vlm_connector.run(json_input, output)) + + mock_collect.assert_called_once() + self.assertEqual(output["output"], "ok") + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.os.chdir') + def test_run_no_mode(self, mock_chdir, mock_media_handler): + """Test running with no mode specified""" + json_input = self.sample_json_input.copy() + del json_input["args"]["mode"] + output = {} + + asyncio.run(haven_vlm_connector.run(json_input, output)) + + self.assertEqual(output["output"], "ok") + + @patch('haven_vlm_connector.media_handler') + def test_run_media_handler_initialization_error(self, mock_media_handler): + """Test handling media handler initialization error""" + mock_media_handler.initialize.side_effect = Exception("Initialization failed") + output = {} + + with self.assertRaises(Exception): + asyncio.run(haven_vlm_connector.run(self.sample_json_input, output)) + + def test_read_json_input(self): + """Test reading JSON input from stdin""" + test_input = '{"test": "data"}' + + with patch('sys.stdin.read', return_value=test_input): + result = haven_vlm_connector.read_json_input() + + self.assertEqual(result, {"test": "data"}) + + +class TestHighLevelProcessingFunctions(unittest.TestCase): + """Test cases for high-level processing functions""" + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.__tag_images') + @patch('haven_vlm_connector.asyncio.gather') + def test_tag_images_with_images(self, mock_gather, mock_tag_images, mock_media_handler): + """Test tagging images when images are available""" + mock_images = [{"id": 1}, {"id": 2}, {"id": 3}] + mock_media_handler.get_tagme_images.return_value = mock_images + + asyncio.run(haven_vlm_connector.tag_images()) + + mock_media_handler.get_tagme_images.assert_called_once() + mock_gather.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + def test_tag_images_no_images(self, mock_media_handler): + """Test tagging images when no images are available""" + mock_media_handler.get_tagme_images.return_value = [] + + asyncio.run(haven_vlm_connector.tag_images()) + + mock_media_handler.get_tagme_images.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.__tag_video') + @patch('haven_vlm_connector.asyncio.gather') + def test_tag_videos_with_scenes(self, mock_gather, mock_tag_video, mock_media_handler): + """Test tagging videos when scenes are available""" + mock_scenes = [{"id": 1}, {"id": 2}] + mock_media_handler.get_tagme_scenes.return_value = mock_scenes + + asyncio.run(haven_vlm_connector.tag_videos()) + + mock_media_handler.get_tagme_scenes.assert_called_once() + mock_gather.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + def test_tag_videos_no_scenes(self, mock_media_handler): + """Test tagging videos when no scenes are available""" + mock_media_handler.get_tagme_scenes.return_value = [] + + asyncio.run(haven_vlm_connector.tag_videos()) + + mock_media_handler.get_tagme_scenes.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.__find_marker_settings') + def test_find_marker_settings_single_scene(self, mock_find_settings, mock_media_handler): + """Test finding marker settings with single scene""" + mock_scenes = [{"id": 1}] + mock_media_handler.get_tagme_scenes.return_value = mock_scenes + + asyncio.run(haven_vlm_connector.find_marker_settings()) + + mock_media_handler.get_tagme_scenes.assert_called_once() + mock_find_settings.assert_called_once_with(mock_scenes[0]) + + @patch('haven_vlm_connector.media_handler') + def test_find_marker_settings_no_scenes(self, mock_media_handler): + """Test finding marker settings with no scenes""" + mock_media_handler.get_tagme_scenes.return_value = [] + + asyncio.run(haven_vlm_connector.find_marker_settings()) + + mock_media_handler.get_tagme_scenes.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + def test_find_marker_settings_multiple_scenes(self, mock_media_handler): + """Test finding marker settings with multiple scenes""" + mock_scenes = [{"id": 1}, {"id": 2}] + mock_media_handler.get_tagme_scenes.return_value = mock_scenes + + asyncio.run(haven_vlm_connector.find_marker_settings()) + + mock_media_handler.get_tagme_scenes.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.os.makedirs') + @patch('haven_vlm_connector.shutil.copy') + def test_collect_incorrect_markers_and_images_with_data(self, mock_copy, mock_makedirs, mock_media_handler): + """Test collecting incorrect markers and images with data""" + mock_images = [{"id": 1, "files": [{"path": "/path/to/image.jpg"}]}] + mock_markers = [{"id": 1, "scene": {"files": [{"path": "/path/to/video.mp4"}]}, "primary_tag": {"name": "test"}}] + mock_media_handler.get_incorrect_images.return_value = mock_images + mock_media_handler.get_incorrect_markers.return_value = mock_markers + mock_media_handler.get_image_paths_and_ids.return_value = (["/path/to/image.jpg"], [1], []) + + haven_vlm_connector.collect_incorrect_markers_and_images() + + mock_media_handler.get_incorrect_images.assert_called_once() + mock_media_handler.get_incorrect_markers.assert_called_once() + mock_media_handler.remove_incorrect_tag_from_images.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + def test_collect_incorrect_markers_and_images_no_data(self, mock_media_handler): + """Test collecting incorrect markers and images with no data""" + mock_media_handler.get_incorrect_images.return_value = [] + mock_media_handler.get_incorrect_markers.return_value = [] + + haven_vlm_connector.collect_incorrect_markers_and_images() + + mock_media_handler.get_incorrect_images.assert_called_once() + mock_media_handler.get_incorrect_markers.assert_called_once() + + +class TestLowLevelProcessingFunctions(unittest.TestCase): + """Test cases for low-level processing functions""" + + @patch('haven_vlm_connector.vlm_engine') + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.semaphore') + def test_tag_images_success(self, mock_semaphore, mock_media_handler, mock_vlm_engine): + """Test successful image tagging""" + mock_images = [{"id": 1}, {"id": 2}] + mock_media_handler.get_image_paths_and_ids.return_value = (["/path1.jpg", "/path2.jpg"], [1, 2], []) + mock_vlm_engine.process_images_async.return_value = MagicMock(result=[{"tags": ["tag1"]}, {"tags": ["tag2"]}]) + mock_media_handler.get_tag_ids.return_value = [100, 200] + + # Mock semaphore context manager + mock_semaphore.__aenter__ = AsyncMock() + mock_semaphore.__aexit__ = AsyncMock() + + asyncio.run(haven_vlm_connector.__tag_images(mock_images)) + + mock_media_handler.get_image_paths_and_ids.assert_called_once_with(mock_images) + mock_vlm_engine.process_images_async.assert_called_once() + mock_media_handler.remove_tagme_tags_from_images.assert_called_once() + + @patch('haven_vlm_connector.vlm_engine') + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.semaphore') + def test_tag_images_error(self, mock_semaphore, mock_media_handler, mock_vlm_engine): + """Test image tagging with error""" + mock_images = [{"id": 1}] + mock_vlm_engine.process_images_async.side_effect = Exception("Processing error") + + # Mock semaphore context manager + mock_semaphore.__aenter__ = AsyncMock() + mock_semaphore.__aexit__ = AsyncMock() + + asyncio.run(haven_vlm_connector.__tag_images(mock_images)) + + mock_media_handler.add_error_images.assert_called_once() + + @patch('haven_vlm_connector.vlm_engine') + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.semaphore') + def test_tag_video_success(self, mock_semaphore, mock_media_handler, mock_vlm_engine): + """Test successful video tagging""" + mock_scene = { + "id": 1, + "files": [{"path": "/path/to/video.mp4"}], + "tags": [] + } + mock_vlm_engine.process_video_async.return_value = MagicMock( + video_tags={"category": ["tag1", "tag2"]}, + tag_timespans={} + ) + mock_media_handler.is_vr_scene.return_value = False + mock_media_handler.get_tag_ids.return_value = [100, 200] + + # Mock semaphore context manager + mock_semaphore.__aenter__ = AsyncMock() + mock_semaphore.__aexit__ = AsyncMock() + + asyncio.run(haven_vlm_connector.__tag_video(mock_scene)) + + mock_vlm_engine.process_video_async.assert_called_once() + + # Verify tags and markers were cleared before adding new ones + mock_media_handler.clear_all_tags_from_video.assert_called_once_with(1) + mock_media_handler.clear_all_markers_from_video.assert_called_once_with(1) + + mock_media_handler.add_tags_to_video.assert_called_once() + mock_media_handler.remove_tagme_tag_from_scene.assert_called_once() + + @patch('haven_vlm_connector.vlm_engine') + @patch('haven_vlm_connector.media_handler') + @patch('haven_vlm_connector.semaphore') + def test_tag_video_error(self, mock_semaphore, mock_media_handler, mock_vlm_engine): + """Test video tagging with error""" + mock_scene = { + "id": 1, + "files": [{"path": "/path/to/video.mp4"}], + "tags": [] + } + mock_vlm_engine.process_video_async.side_effect = Exception("Processing error") + + # Mock semaphore context manager + mock_semaphore.__aenter__ = AsyncMock() + mock_semaphore.__aexit__ = AsyncMock() + + asyncio.run(haven_vlm_connector.__tag_video(mock_scene)) + + mock_media_handler.add_error_scene.assert_called_once() + + @patch('haven_vlm_connector.vlm_engine') + @patch('haven_vlm_connector.media_handler') + def test_find_marker_settings_success(self, mock_media_handler, mock_vlm_engine): + """Test successful marker settings finding""" + mock_scene = { + "id": 1, + "files": [{"path": "/path/to/video.mp4"}] + } + mock_markers = [ + { + "primary_tag": {"name": "tag1"}, + "seconds": 10.0, + "end_seconds": 15.0 + } + ] + mock_media_handler.get_scene_markers.return_value = mock_markers + mock_vlm_engine.find_optimal_marker_settings_async.return_value = {"optimal": "settings"} + + asyncio.run(haven_vlm_connector.__find_marker_settings(mock_scene)) + + mock_media_handler.get_scene_markers.assert_called_once_with(1) + mock_vlm_engine.find_optimal_marker_settings_async.assert_called_once() + + @patch('haven_vlm_connector.media_handler') + def test_find_marker_settings_error(self, mock_media_handler): + """Test marker settings finding with error""" + mock_scene = { + "id": 1, + "files": [{"path": "/path/to/video.mp4"}] + } + mock_media_handler.get_scene_markers.side_effect = Exception("Marker error") + + asyncio.run(haven_vlm_connector.__find_marker_settings(mock_scene)) + + mock_media_handler.get_scene_markers.assert_called_once() + + +class TestUtilityFunctions(unittest.TestCase): + """Test cases for utility functions""" + + def test_increment_progress(self): + """Test progress increment""" + haven_vlm_connector.progress = 0.0 + haven_vlm_connector.increment = 0.1 + + haven_vlm_connector.increment_progress() + + self.assertEqual(haven_vlm_connector.progress, 0.1) + + @patch('haven_vlm_connector.vlm_engine') + async def test_cleanup(self, mock_vlm_engine): + """Test cleanup function""" + mock_vlm_engine.vlm_engine = MagicMock() + + await haven_vlm_connector.cleanup() + + mock_vlm_engine.vlm_engine.shutdown.assert_called_once() + + +class TestMainFunction(unittest.TestCase): + """Test cases for main function""" + + @patch('haven_vlm_connector.run') + @patch('haven_vlm_connector.read_json_input') + @patch('haven_vlm_connector.json.dumps') + @patch('builtins.print') + def test_main_success(self, mock_print, mock_json_dumps, mock_read_input, mock_run): + """Test successful main execution""" + mock_read_input.return_value = {"test": "data"} + mock_json_dumps.return_value = '{"output": "ok"}' + + asyncio.run(haven_vlm_connector.main()) + + mock_read_input.assert_called_once() + mock_run.assert_called_once() + mock_json_dumps.assert_called_once() + mock_print.assert_called() + + +class TestErrorHandling(unittest.TestCase): + """Test cases for error handling""" + + @patch('haven_vlm_connector.media_handler') + def test_tag_images_empty_paths(self, mock_media_handler): + """Test image tagging with empty paths""" + mock_images = [{"id": 1}] + mock_media_handler.get_image_paths_and_ids.return_value = ([], [1], []) + + # Mock semaphore context manager + with patch('haven_vlm_connector.semaphore') as mock_semaphore: + mock_semaphore.__aenter__ = AsyncMock() + mock_semaphore.__aexit__ = AsyncMock() + + asyncio.run(haven_vlm_connector.__tag_images(mock_images)) + + mock_media_handler.get_image_paths_and_ids.assert_called_once() + + @patch('haven_vlm_connector.vlm_engine') + @patch('haven_vlm_connector.media_handler') + def test_tag_video_no_detected_tags(self, mock_media_handler, mock_vlm_engine): + """Test video tagging with no detected tags""" + mock_scene = { + "id": 1, + "files": [{"path": "/path/to/video.mp4"}], + "tags": [] + } + mock_vlm_engine.process_video_async.return_value = MagicMock( + video_tags={}, + tag_timespans={} + ) + mock_media_handler.is_vr_scene.return_value = False + + # Mock semaphore context manager + with patch('haven_vlm_connector.semaphore') as mock_semaphore: + mock_semaphore.__aenter__ = AsyncMock() + mock_semaphore.__aexit__ = AsyncMock() + + asyncio.run(haven_vlm_connector.__tag_video(mock_scene)) + + # Verify clearing functions are NOT called when no tags are detected + mock_media_handler.clear_all_tags_from_video.assert_not_called() + mock_media_handler.clear_all_markers_from_video.assert_not_called() + + mock_media_handler.remove_tagme_tag_from_scene.assert_called_once() + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/test_haven_vlm_engine.py b/plugins/AHavenVLMConnector/test_haven_vlm_engine.py new file mode 100644 index 00000000..65adcc55 --- /dev/null +++ b/plugins/AHavenVLMConnector/test_haven_vlm_engine.py @@ -0,0 +1,544 @@ +""" +Unit tests for haven_vlm_engine module +""" + +import unittest +import asyncio +import json +import tempfile +import os +from unittest.mock import patch, MagicMock, AsyncMock, mock_open +import sys + +# Mock the vlm_engine imports +sys.modules['vlm_engine'] = MagicMock() +sys.modules['vlm_engine.config_models'] = MagicMock() + +import haven_vlm_engine + + +class TestTimeFrame(unittest.TestCase): + """Test cases for TimeFrame dataclass""" + + def test_timeframe_creation(self): + """Test creating TimeFrame with all parameters""" + timeframe = haven_vlm_engine.TimeFrame( + start=10.0, + end=15.0, + total_confidence=0.85 + ) + + self.assertEqual(timeframe.start, 10.0) + self.assertEqual(timeframe.end, 15.0) + self.assertEqual(timeframe.total_confidence, 0.85) + + def test_timeframe_creation_without_confidence(self): + """Test creating TimeFrame without confidence""" + timeframe = haven_vlm_engine.TimeFrame( + start=10.0, + end=15.0 + ) + + self.assertEqual(timeframe.start, 10.0) + self.assertEqual(timeframe.end, 15.0) + self.assertIsNone(timeframe.total_confidence) + + def test_timeframe_to_json(self): + """Test TimeFrame to_json method""" + timeframe = haven_vlm_engine.TimeFrame( + start=10.0, + end=15.0, + total_confidence=0.85 + ) + + json_str = timeframe.to_json() + json_data = json.loads(json_str) + + self.assertEqual(json_data["start"], 10.0) + self.assertEqual(json_data["end"], 15.0) + self.assertEqual(json_data["total_confidence"], 0.85) + + def test_timeframe_to_json_without_confidence(self): + """Test TimeFrame to_json method without confidence""" + timeframe = haven_vlm_engine.TimeFrame( + start=10.0, + end=15.0 + ) + + json_str = timeframe.to_json() + json_data = json.loads(json_str) + + self.assertEqual(json_data["start"], 10.0) + self.assertEqual(json_data["end"], 15.0) + self.assertIsNone(json_data["total_confidence"]) + + def test_timeframe_str(self): + """Test TimeFrame string representation""" + timeframe = haven_vlm_engine.TimeFrame( + start=10.0, + end=15.0, + total_confidence=0.85 + ) + + str_repr = str(timeframe) + self.assertIn("10.0", str_repr) + self.assertIn("15.0", str_repr) + self.assertIn("0.85", str_repr) + + +class TestVideoTagInfo(unittest.TestCase): + """Test cases for VideoTagInfo dataclass""" + + def test_videotaginfo_creation(self): + """Test creating VideoTagInfo with all parameters""" + video_tags = {"category1": {"tag1", "tag2"}} + tag_totals = {"tag1": {"total": 0.8}} + tag_timespans = {"category1": {"tag1": [haven_vlm_engine.TimeFrame(10.0, 15.0)]}} + + video_info = haven_vlm_engine.VideoTagInfo( + video_duration=120.0, + video_tags=video_tags, + tag_totals=tag_totals, + tag_timespans=tag_timespans + ) + + self.assertEqual(video_info.video_duration, 120.0) + self.assertEqual(video_info.video_tags, video_tags) + self.assertEqual(video_info.tag_totals, tag_totals) + self.assertEqual(video_info.tag_timespans, tag_timespans) + + def test_videotaginfo_from_json(self): + """Test creating VideoTagInfo from JSON data""" + json_data = { + "video_duration": 120.0, + "video_tags": {"category1": ["tag1", "tag2"]}, + "tag_totals": {"tag1": {"total": 0.8}}, + "tag_timespans": { + "category1": { + "tag1": [ + {"start": 10.0, "end": 15.0, "total_confidence": 0.85} + ] + } + } + } + + video_info = haven_vlm_engine.VideoTagInfo.from_json(json_data) + + self.assertEqual(video_info.video_duration, 120.0) + self.assertEqual(video_info.video_tags, {"category1": ["tag1", "tag2"]}) + self.assertEqual(video_info.tag_totals, {"tag1": {"total": 0.8}}) + + # Check that tag_timespans contains TimeFrame objects + self.assertIn("category1", video_info.tag_timespans) + self.assertIn("tag1", video_info.tag_timespans["category1"]) + self.assertIsInstance(video_info.tag_timespans["category1"]["tag1"][0], haven_vlm_engine.TimeFrame) + + def test_videotaginfo_from_json_without_confidence(self): + """Test creating VideoTagInfo from JSON data without confidence""" + json_data = { + "video_duration": 120.0, + "video_tags": {"category1": ["tag1"]}, + "tag_totals": {"tag1": {"total": 0.8}}, + "tag_timespans": { + "category1": { + "tag1": [ + {"start": 10.0, "end": 15.0} + ] + } + } + } + + video_info = haven_vlm_engine.VideoTagInfo.from_json(json_data) + + timeframe = video_info.tag_timespans["category1"]["tag1"][0] + self.assertEqual(timeframe.start, 10.0) + self.assertEqual(timeframe.end, 15.0) + self.assertIsNone(timeframe.total_confidence) + + def test_videotaginfo_from_json_empty_timespans(self): + """Test creating VideoTagInfo from JSON data with empty timespans""" + json_data = { + "video_duration": 120.0, + "video_tags": {"category1": ["tag1"]}, + "tag_totals": {"tag1": {"total": 0.8}}, + "tag_timespans": {} + } + + video_info = haven_vlm_engine.VideoTagInfo.from_json(json_data) + + self.assertEqual(video_info.video_duration, 120.0) + self.assertEqual(video_info.tag_timespans, {}) + + def test_videotaginfo_str(self): + """Test VideoTagInfo string representation""" + video_info = haven_vlm_engine.VideoTagInfo( + video_duration=120.0, + video_tags={"category1": {"tag1"}}, + tag_totals={"tag1": {"total": 0.8}}, + tag_timespans={"category1": {"tag1": []}} + ) + + str_repr = str(video_info) + self.assertIn("120.0", str_repr) + self.assertIn("1", str_repr) # number of tags + self.assertIn("1", str_repr) # number of timespans + + +class TestImageResult(unittest.TestCase): + """Test cases for ImageResult dataclass""" + + def test_imageresult_creation(self): + """Test creating ImageResult with valid data""" + result_data = [{"tags": ["tag1"], "confidence": 0.8}] + image_result = haven_vlm_engine.ImageResult(result=result_data) + + self.assertEqual(image_result.result, result_data) + + def test_imageresult_creation_empty_list(self): + """Test creating ImageResult with empty list""" + with self.assertRaises(ValueError): + haven_vlm_engine.ImageResult(result=[]) + + def test_imageresult_creation_none_result(self): + """Test creating ImageResult with None result""" + with self.assertRaises(ValueError): + haven_vlm_engine.ImageResult(result=None) + + +class TestHavenVLMEngine(unittest.TestCase): + """Test cases for HavenVLMEngine class""" + + def setUp(self): + """Set up test fixtures""" + self.engine = haven_vlm_engine.HavenVLMEngine() + + def test_engine_initialization(self): + """Test engine initialization""" + self.assertIsNone(self.engine.engine) + self.assertIsNone(self.engine.engine_config) + self.assertFalse(self.engine._initialized) + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_initialize_success(self, mock_vlm_engine_class, mock_config): + """Test successful engine initialization""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_config.config.vlm_engine_config = {"test": "config"} + + await self.engine.initialize() + + self.assertTrue(self.engine._initialized) + mock_vlm_engine_class.assert_called_once() + mock_engine_instance.initialize.assert_called_once() + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_initialize_already_initialized(self, mock_vlm_engine_class, mock_config): + """Test initialization when already initialized""" + self.engine._initialized = True + + await self.engine.initialize() + + # Should not call VLMEngine constructor again + mock_vlm_engine_class.assert_not_called() + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_initialize_error(self, mock_vlm_engine_class, mock_config): + """Test initialization with error""" + mock_vlm_engine_class.side_effect = Exception("Initialization failed") + mock_config.config.vlm_engine_config = {"test": "config"} + + with self.assertRaises(Exception): + await self.engine.initialize() + + self.assertFalse(self.engine._initialized) + + @patch('haven_vlm_engine.config') + def test_create_engine_config(self, mock_config): + """Test creating engine configuration""" + mock_config.config.vlm_engine_config = { + "active_ai_models": ["model1"], + "pipelines": { + "pipeline1": { + "inputs": ["input1"], + "output": "output1", + "short_name": "short1", + "version": 1.0, + "models": [ + { + "name": "model1", + "inputs": ["input1"], + "outputs": "output1" + } + ] + } + }, + "models": { + "model1": { + "type": "vlm_model", + "model_file_name": "model1.py", + "model_category": "test", + "model_id": "test_model", + "model_identifier": 123, + "model_version": "1.0", + "use_multiplexer": True, + "max_concurrent_requests": 10, + "connection_pool_size": 20, + "multiplexer_endpoints": [], + "tag_list": ["tag1"] + } + }, + "category_config": {"test": {}} + } + + config = self.engine._create_engine_config() + + self.assertIsNotNone(config) + # Note: We can't easily test the exact structure without the actual VLM Engine classes + # but we can verify the method doesn't raise exceptions + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_process_video_success(self, mock_vlm_engine_class, mock_config): + """Test successful video processing""" + # Setup mocks + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_config.config.video_frame_interval = 2.0 + mock_config.config.video_threshold = 0.3 + mock_config.config.video_confidence_return = True + + # Mock the engine's process_video method + mock_engine_instance.process_video.return_value = { + "video_duration": 120.0, + "video_tags": {"category1": ["tag1"]}, + "tag_totals": {"tag1": {"total": 0.8}}, + "tag_timespans": {} + } + + # Initialize engine + await self.engine.initialize() + + # Process video + result = await self.engine.process_video("/path/to/video.mp4") + + self.assertIsInstance(result, haven_vlm_engine.VideoTagInfo) + mock_engine_instance.process_video.assert_called_once() + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_process_video_not_initialized(self, mock_vlm_engine_class, mock_config): + """Test video processing when not initialized""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_config.config.video_frame_interval = 2.0 + mock_config.config.video_threshold = 0.3 + mock_config.config.video_confidence_return = True + + mock_engine_instance.process_video.return_value = { + "video_duration": 120.0, + "video_tags": {"category1": ["tag1"]}, + "tag_totals": {"tag1": {"total": 0.8}}, + "tag_timespans": {} + } + + # Process video without explicit initialization + result = await self.engine.process_video("/path/to/video.mp4") + + self.assertIsInstance(result, haven_vlm_engine.VideoTagInfo) + mock_engine_instance.initialize.assert_called_once() + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_process_video_error(self, mock_vlm_engine_class, mock_config): + """Test video processing with error""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_config.config.video_frame_interval = 2.0 + mock_config.config.video_threshold = 0.3 + mock_config.config.video_confidence_return = True + + mock_engine_instance.process_video.side_effect = Exception("Processing failed") + + await self.engine.initialize() + + with self.assertRaises(Exception): + await self.engine.process_video("/path/to/video.mp4") + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_process_images_success(self, mock_vlm_engine_class, mock_config): + """Test successful image processing""" + # Setup mocks + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_config.config.image_threshold = 0.5 + mock_config.config.image_confidence_return = False + + # Mock the engine's process_images method + mock_engine_instance.process_images.return_value = [ + {"tags": ["tag1"], "confidence": 0.8} + ] + + # Initialize engine + await self.engine.initialize() + + # Process images + result = await self.engine.process_images(["/path/to/image1.jpg"]) + + self.assertIsInstance(result, haven_vlm_engine.ImageResult) + mock_engine_instance.process_images.assert_called_once() + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_process_images_error(self, mock_vlm_engine_class, mock_config): + """Test image processing with error""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_config.config.image_threshold = 0.5 + mock_config.config.image_confidence_return = False + + mock_engine_instance.process_images.side_effect = Exception("Processing failed") + + await self.engine.initialize() + + with self.assertRaises(Exception): + await self.engine.process_images(["/path/to/image1.jpg"]) + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_find_optimal_marker_settings_success(self, mock_vlm_engine_class, mock_config): + """Test successful marker settings optimization""" + # Setup mocks + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_engine_instance.optimize_timeframe_settings.return_value = {"optimal": "settings"} + + # Initialize engine + await self.engine.initialize() + + # Test data + existing_json = {"existing": "data"} + desired_timespan_data = { + "tag1": haven_vlm_engine.TimeFrame(10.0, 15.0, 0.8) + } + + # Find optimal settings + result = await self.engine.find_optimal_marker_settings(existing_json, desired_timespan_data) + + self.assertEqual(result, {"optimal": "settings"}) + mock_engine_instance.optimize_timeframe_settings.assert_called_once() + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_find_optimal_marker_settings_error(self, mock_vlm_engine_class, mock_config): + """Test marker settings optimization with error""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_engine_instance.optimize_timeframe_settings.side_effect = Exception("Optimization failed") + + await self.engine.initialize() + + existing_json = {"existing": "data"} + desired_timespan_data = { + "tag1": haven_vlm_engine.TimeFrame(10.0, 15.0, 0.8) + } + + with self.assertRaises(Exception): + await self.engine.find_optimal_marker_settings(existing_json, desired_timespan_data) + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_shutdown_success(self, mock_vlm_engine_class, mock_config): + """Test successful engine shutdown""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + + await self.engine.initialize() + await self.engine.shutdown() + + mock_engine_instance.shutdown.assert_called_once() + self.assertFalse(self.engine._initialized) + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_shutdown_not_initialized(self, mock_vlm_engine_class, mock_config): + """Test shutdown when not initialized""" + await self.engine.shutdown() + + # Should not raise any exceptions + self.assertFalse(self.engine._initialized) + + @patch('haven_vlm_engine.config') + @patch('haven_vlm_engine.VLMEngine') + async def test_shutdown_error(self, mock_vlm_engine_class, mock_config): + """Test shutdown with error""" + mock_engine_instance = MagicMock() + mock_vlm_engine_class.return_value = mock_engine_instance + mock_engine_instance.shutdown.side_effect = Exception("Shutdown failed") + + await self.engine.initialize() + await self.engine.shutdown() + + # Should handle the error gracefully + self.assertFalse(self.engine._initialized) + + +class TestConvenienceFunctions(unittest.TestCase): + """Test cases for convenience functions""" + + @patch('haven_vlm_engine.vlm_engine') + async def test_process_video_async(self, mock_vlm_engine): + """Test process_video_async convenience function""" + mock_vlm_engine.process_video.return_value = MagicMock() + + result = await haven_vlm_engine.process_video_async("/path/to/video.mp4") + + mock_vlm_engine.process_video.assert_called_once() + self.assertEqual(result, mock_vlm_engine.process_video.return_value) + + @patch('haven_vlm_engine.vlm_engine') + async def test_process_images_async(self, mock_vlm_engine): + """Test process_images_async convenience function""" + mock_vlm_engine.process_images.return_value = MagicMock() + + result = await haven_vlm_engine.process_images_async(["/path/to/image.jpg"]) + + mock_vlm_engine.process_images.assert_called_once() + self.assertEqual(result, mock_vlm_engine.process_images.return_value) + + @patch('haven_vlm_engine.vlm_engine') + async def test_find_optimal_marker_settings_async(self, mock_vlm_engine): + """Test find_optimal_marker_settings_async convenience function""" + mock_vlm_engine.find_optimal_marker_settings.return_value = {"optimal": "settings"} + + existing_json = {"existing": "data"} + desired_timespan_data = { + "tag1": haven_vlm_engine.TimeFrame(10.0, 15.0, 0.8) + } + + result = await haven_vlm_engine.find_optimal_marker_settings_async(existing_json, desired_timespan_data) + + mock_vlm_engine.find_optimal_marker_settings.assert_called_once() + self.assertEqual(result, {"optimal": "settings"}) + + +class TestGlobalVLMEngineInstance(unittest.TestCase): + """Test cases for global VLM engine instance""" + + def test_global_vlm_engine_exists(self): + """Test that global VLM engine instance exists""" + self.assertIsInstance(haven_vlm_engine.vlm_engine, haven_vlm_engine.HavenVLMEngine) + + def test_global_vlm_engine_is_singleton(self): + """Test that global VLM engine is a singleton""" + engine1 = haven_vlm_engine.vlm_engine + engine2 = haven_vlm_engine.vlm_engine + + self.assertIs(engine1, engine2) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/plugins/AHavenVLMConnector/test_haven_vlm_utility.py b/plugins/AHavenVLMConnector/test_haven_vlm_utility.py new file mode 100644 index 00000000..be3d6263 --- /dev/null +++ b/plugins/AHavenVLMConnector/test_haven_vlm_utility.py @@ -0,0 +1,604 @@ +""" +Unit tests for haven_vlm_utility module +""" + +import unittest +import tempfile +import os +import shutil +import time +from unittest.mock import patch, mock_open, MagicMock +import yaml + +import haven_vlm_utility + + +class TestPathMutations(unittest.TestCase): + """Test cases for path mutation functions""" + + def test_apply_path_mutations_with_mutations(self): + """Test applying path mutations with valid mutations""" + mutations = {"E:": "F:", "G:": "D:"} + path = "E:\\videos\\test.mp4" + + result = haven_vlm_utility.apply_path_mutations(path, mutations) + + self.assertEqual(result, "F:\\videos\\test.mp4") + + def test_apply_path_mutations_without_mutations(self): + """Test applying path mutations with empty mutations""" + mutations = {} + path = "E:\\videos\\test.mp4" + + result = haven_vlm_utility.apply_path_mutations(path, mutations) + + self.assertEqual(result, path) + + def test_apply_path_mutations_with_none_mutations(self): + """Test applying path mutations with None mutations""" + mutations = None + path = "E:\\videos\\test.mp4" + + result = haven_vlm_utility.apply_path_mutations(path, mutations) + + self.assertEqual(result, path) + + def test_apply_path_mutations_no_match(self): + """Test applying path mutations when no mutation matches""" + mutations = {"E:": "F:", "G:": "D:"} + path = "C:\\videos\\test.mp4" + + result = haven_vlm_utility.apply_path_mutations(path, mutations) + + self.assertEqual(result, path) + + def test_apply_path_mutations_multiple_matches(self): + """Test applying path mutations with multiple possible matches""" + mutations = {"E:": "F:", "E:\\videos": "F:\\movies"} + path = "E:\\videos\\test.mp4" + + result = haven_vlm_utility.apply_path_mutations(path, mutations) + + # Should use the first match + self.assertEqual(result, "F:\\videos\\test.mp4") + + +class TestDirectoryOperations(unittest.TestCase): + """Test cases for directory operations""" + + def test_ensure_directory_exists_new_directory(self): + """Test creating a new directory""" + with tempfile.TemporaryDirectory() as temp_dir: + new_dir = os.path.join(temp_dir, "test_subdir") + + haven_vlm_utility.ensure_directory_exists(new_dir) + + self.assertTrue(os.path.exists(new_dir)) + self.assertTrue(os.path.isdir(new_dir)) + + def test_ensure_directory_exists_existing_directory(self): + """Test ensuring directory exists when it already exists""" + with tempfile.TemporaryDirectory() as temp_dir: + haven_vlm_utility.ensure_directory_exists(temp_dir) + + self.assertTrue(os.path.exists(temp_dir)) + self.assertTrue(os.path.isdir(temp_dir)) + + def test_ensure_directory_exists_nested_directories(self): + """Test creating nested directories""" + with tempfile.TemporaryDirectory() as temp_dir: + nested_dir = os.path.join(temp_dir, "level1", "level2", "level3") + + haven_vlm_utility.ensure_directory_exists(nested_dir) + + self.assertTrue(os.path.exists(nested_dir)) + self.assertTrue(os.path.isdir(nested_dir)) + + +class TestSafeFileOperations(unittest.TestCase): + """Test cases for safe file operations""" + + def test_safe_file_operation_success(self): + """Test successful file operation""" + def test_func(a, b, c=10): + return a + b + c + + result = haven_vlm_utility.safe_file_operation(test_func, 1, 2, c=5) + + self.assertEqual(result, 8) + + def test_safe_file_operation_os_error(self): + """Test file operation with OSError""" + def test_func(): + raise OSError("File not found") + + result = haven_vlm_utility.safe_file_operation(test_func) + + self.assertIsNone(result) + + def test_safe_file_operation_io_error(self): + """Test file operation with IOError""" + def test_func(): + raise IOError("Permission denied") + + result = haven_vlm_utility.safe_file_operation(test_func) + + self.assertIsNone(result) + + def test_safe_file_operation_unexpected_error(self): + """Test file operation with unexpected error""" + def test_func(): + raise ValueError("Unexpected error") + + result = haven_vlm_utility.safe_file_operation(test_func) + + self.assertIsNone(result) + + +class TestYamlConfigOperations(unittest.TestCase): + """Test cases for YAML configuration operations""" + + def setUp(self): + """Set up test fixtures""" + self.test_config = { + "video_frame_interval": 2.0, + "video_threshold": 0.3, + "image_threshold": 0.5, + "endpoints": [ + {"url": "http://localhost:1234", "weight": 5}, + {"url": "https://cloud.example.com", "weight": 1} + ] + } + + def test_load_yaml_config_success(self): + """Test successfully loading YAML configuration""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as f: + yaml.dump(self.test_config, f) + config_path = f.name + + try: + result = haven_vlm_utility.load_yaml_config(config_path) + + self.assertEqual(result, self.test_config) + finally: + os.unlink(config_path) + + def test_load_yaml_config_file_not_found(self): + """Test loading YAML configuration from nonexistent file""" + result = haven_vlm_utility.load_yaml_config("nonexistent_file.yml") + + self.assertIsNone(result) + + def test_load_yaml_config_invalid_yaml(self): + """Test loading YAML configuration with invalid YAML""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as f: + f.write("invalid: yaml: content: [") + config_path = f.name + + try: + result = haven_vlm_utility.load_yaml_config(config_path) + + self.assertIsNone(result) + finally: + os.unlink(config_path) + + def test_load_yaml_config_permission_error(self): + """Test loading YAML configuration with permission error""" + with patch('builtins.open', side_effect=PermissionError("Permission denied")): + result = haven_vlm_utility.load_yaml_config("test.yml") + + self.assertIsNone(result) + + def test_save_yaml_config_success(self): + """Test successfully saving YAML configuration""" + with tempfile.TemporaryDirectory() as temp_dir: + config_path = os.path.join(temp_dir, "test_config.yml") + + result = haven_vlm_utility.save_yaml_config(self.test_config, config_path) + + self.assertTrue(result) + self.assertTrue(os.path.exists(config_path)) + + # Verify the saved content + with open(config_path, 'r') as f: + loaded_config = yaml.safe_load(f) + + self.assertEqual(loaded_config, self.test_config) + + def test_save_yaml_config_with_nested_directories(self): + """Test saving YAML configuration to nested directory""" + with tempfile.TemporaryDirectory() as temp_dir: + config_path = os.path.join(temp_dir, "nested", "dir", "test_config.yml") + + result = haven_vlm_utility.save_yaml_config(self.test_config, config_path) + + self.assertTrue(result) + self.assertTrue(os.path.exists(config_path)) + + def test_save_yaml_config_permission_error(self): + """Test saving YAML configuration with permission error""" + with patch('builtins.open', side_effect=PermissionError("Permission denied")): + result = haven_vlm_utility.save_yaml_config(self.test_config, "test.yml") + + self.assertFalse(result) + + +class TestFileValidation(unittest.TestCase): + """Test cases for file validation functions""" + + def test_validate_file_path_existing_file(self): + """Test validating an existing file path""" + with tempfile.NamedTemporaryFile(delete=False) as f: + file_path = f.name + + try: + result = haven_vlm_utility.validate_file_path(file_path) + self.assertTrue(result) + finally: + os.unlink(file_path) + + def test_validate_file_path_nonexistent_file(self): + """Test validating a nonexistent file path""" + result = haven_vlm_utility.validate_file_path("nonexistent_file.txt") + self.assertFalse(result) + + def test_validate_file_path_directory(self): + """Test validating a directory path""" + with tempfile.TemporaryDirectory() as temp_dir: + result = haven_vlm_utility.validate_file_path(temp_dir) + self.assertFalse(result) + + def test_validate_file_path_permission_error(self): + """Test validating file path with permission error""" + with patch('os.path.isfile', side_effect=OSError("Permission denied")): + result = haven_vlm_utility.validate_file_path("test.txt") + self.assertFalse(result) + + +class TestFileExtensionFunctions(unittest.TestCase): + """Test cases for file extension functions""" + + def test_get_file_extension_with_extension(self): + """Test getting file extension from file with extension""" + result = haven_vlm_utility.get_file_extension("test.mp4") + self.assertEqual(result, ".mp4") + + def test_get_file_extension_without_extension(self): + """Test getting file extension from file without extension""" + result = haven_vlm_utility.get_file_extension("test") + self.assertEqual(result, "") + + def test_get_file_extension_multiple_dots(self): + """Test getting file extension from file with multiple dots""" + result = haven_vlm_utility.get_file_extension("test.backup.mp4") + self.assertEqual(result, ".mp4") + + def test_get_file_extension_uppercase(self): + """Test getting file extension from file with uppercase extension""" + result = haven_vlm_utility.get_file_extension("test.MP4") + self.assertEqual(result, ".mp4") + + def test_is_video_file_valid_extensions(self): + """Test video file detection with valid extensions""" + video_files = ["test.mp4", "test.avi", "test.mkv", "test.mov", "test.wmv", "test.flv", "test.webm", "test.m4v"] + + for video_file in video_files: + result = haven_vlm_utility.is_video_file(video_file) + self.assertTrue(result, f"Failed for {video_file}") + + def test_is_video_file_invalid_extensions(self): + """Test video file detection with invalid extensions""" + non_video_files = ["test.jpg", "test.txt", "test.pdf", "test.exe"] + + for non_video_file in non_video_files: + result = haven_vlm_utility.is_video_file(non_video_file) + self.assertFalse(result, f"Failed for {non_video_file}") + + def test_is_image_file_valid_extensions(self): + """Test image file detection with valid extensions""" + image_files = ["test.jpg", "test.jpeg", "test.png", "test.gif", "test.bmp", "test.tiff", "test.webp"] + + for image_file in image_files: + result = haven_vlm_utility.is_image_file(image_file) + self.assertTrue(result, f"Failed for {image_file}") + + def test_is_image_file_invalid_extensions(self): + """Test image file detection with invalid extensions""" + non_image_files = ["test.mp4", "test.txt", "test.pdf", "test.exe"] + + for non_image_file in non_image_files: + result = haven_vlm_utility.is_image_file(non_image_file) + self.assertFalse(result, f"Failed for {non_image_file}") + + +class TestFormattingFunctions(unittest.TestCase): + """Test cases for formatting functions""" + + def test_format_duration_seconds(self): + """Test formatting duration in seconds""" + result = haven_vlm_utility.format_duration(45.5) + self.assertEqual(result, "45.5s") + + def test_format_duration_minutes(self): + """Test formatting duration in minutes""" + result = haven_vlm_utility.format_duration(125.3) + self.assertEqual(result, "2m 5.3s") + + def test_format_duration_hours(self): + """Test formatting duration in hours""" + result = haven_vlm_utility.format_duration(7325.7) + self.assertEqual(result, "2h 2m 5.7s") + + def test_format_duration_zero(self): + """Test formatting zero duration""" + result = haven_vlm_utility.format_duration(0) + self.assertEqual(result, "0.0s") + + def test_format_file_size_bytes(self): + """Test formatting file size in bytes""" + result = haven_vlm_utility.format_file_size(512) + self.assertEqual(result, "512.0 B") + + def test_format_file_size_kilobytes(self): + """Test formatting file size in kilobytes""" + result = haven_vlm_utility.format_file_size(1536) + self.assertEqual(result, "1.5 KB") + + def test_format_file_size_megabytes(self): + """Test formatting file size in megabytes""" + result = haven_vlm_utility.format_file_size(1572864) + self.assertEqual(result, "1.5 MB") + + def test_format_file_size_gigabytes(self): + """Test formatting file size in gigabytes""" + result = haven_vlm_utility.format_file_size(1610612736) + self.assertEqual(result, "1.5 GB") + + def test_format_file_size_zero(self): + """Test formatting zero file size""" + result = haven_vlm_utility.format_file_size(0) + self.assertEqual(result, "0.0 B") + + +class TestSanitizationFunctions(unittest.TestCase): + """Test cases for sanitization functions""" + + def test_sanitize_filename_valid(self): + """Test sanitizing a valid filename""" + result = haven_vlm_utility.sanitize_filename("valid_filename.txt") + self.assertEqual(result, "valid_filename.txt") + + def test_sanitize_filename_invalid_chars(self): + """Test sanitizing filename with invalid characters""" + result = haven_vlm_utility.sanitize_filename("file:with/invalid\\chars|?*") + self.assertEqual(result, "file_name__with_invalid_chars___") + + def test_sanitize_filename_leading_trailing_spaces(self): + """Test sanitizing filename with leading/trailing spaces""" + result = haven_vlm_utility.sanitize_filename(" filename.txt ") + self.assertEqual(result, "filename.txt") + + def test_sanitize_filename_leading_trailing_dots(self): + """Test sanitizing filename with leading/trailing dots""" + result = haven_vlm_utility.sanitize_filename("...filename.txt...") + self.assertEqual(result, "filename.txt") + + def test_sanitize_filename_empty(self): + """Test sanitizing empty filename""" + result = haven_vlm_utility.sanitize_filename("") + self.assertEqual(result, "unnamed") + + def test_sanitize_filename_only_spaces(self): + """Test sanitizing filename with only spaces""" + result = haven_vlm_utility.sanitize_filename(" ") + self.assertEqual(result, "unnamed") + + +class TestBackupFunctions(unittest.TestCase): + """Test cases for backup functions""" + + def test_create_backup_file_success(self): + """Test successfully creating a backup file""" + with tempfile.NamedTemporaryFile(delete=False) as f: + original_file = f.name + f.write(b"test content") + + try: + result = haven_vlm_utility.create_backup_file(original_file) + + self.assertIsNotNone(result) + self.assertTrue(os.path.exists(result)) + self.assertTrue(result.endswith(".backup")) + + # Verify backup content + with open(result, 'rb') as f: + content = f.read() + self.assertEqual(content, b"test content") + + # Clean up backup + os.unlink(result) + finally: + os.unlink(original_file) + + def test_create_backup_file_custom_suffix(self): + """Test creating backup file with custom suffix""" + with tempfile.NamedTemporaryFile(delete=False) as f: + original_file = f.name + f.write(b"test content") + + try: + result = haven_vlm_utility.create_backup_file(original_file, ".custom") + + self.assertIsNotNone(result) + self.assertTrue(result.endswith(".custom")) + + # Clean up backup + os.unlink(result) + finally: + os.unlink(original_file) + + def test_create_backup_file_nonexistent(self): + """Test creating backup of nonexistent file""" + result = haven_vlm_utility.create_backup_file("nonexistent_file.txt") + self.assertIsNone(result) + + def test_create_backup_file_permission_error(self): + """Test creating backup file with permission error""" + with patch('shutil.copy2', side_effect=PermissionError("Permission denied")): + with tempfile.NamedTemporaryFile(delete=False) as f: + original_file = f.name + + try: + result = haven_vlm_utility.create_backup_file(original_file) + self.assertIsNone(result) + finally: + os.unlink(original_file) + + +class TestDictionaryOperations(unittest.TestCase): + """Test cases for dictionary operations""" + + def test_merge_dictionaries_simple(self): + """Test simple dictionary merging""" + dict1 = {"a": 1, "b": 2} + dict2 = {"c": 3, "d": 4} + + result = haven_vlm_utility.merge_dictionaries(dict1, dict2) + + expected = {"a": 1, "b": 2, "c": 3, "d": 4} + self.assertEqual(result, expected) + + def test_merge_dictionaries_overwrite(self): + """Test dictionary merging with overwrite""" + dict1 = {"a": 1, "b": 2} + dict2 = {"b": 3, "c": 4} + + result = haven_vlm_utility.merge_dictionaries(dict1, dict2, overwrite=True) + + expected = {"a": 1, "b": 3, "c": 4} + self.assertEqual(result, expected) + + def test_merge_dictionaries_no_overwrite(self): + """Test dictionary merging without overwrite""" + dict1 = {"a": 1, "b": 2} + dict2 = {"b": 3, "c": 4} + + result = haven_vlm_utility.merge_dictionaries(dict1, dict2, overwrite=False) + + expected = {"a": 1, "b": 2, "c": 4} + self.assertEqual(result, expected) + + def test_merge_dictionaries_nested(self): + """Test merging nested dictionaries""" + dict1 = {"a": 1, "b": {"x": 10, "y": 20}} + dict2 = {"c": 3, "b": {"y": 25, "z": 30}} + + result = haven_vlm_utility.merge_dictionaries(dict1, dict2, overwrite=True) + + expected = {"a": 1, "b": {"x": 10, "y": 25, "z": 30}, "c": 3} + self.assertEqual(result, expected) + + def test_merge_dictionaries_empty(self): + """Test merging with empty dictionaries""" + dict1 = {} + dict2 = {"a": 1, "b": 2} + + result = haven_vlm_utility.merge_dictionaries(dict1, dict2) + + self.assertEqual(result, dict2) + + +class TestListOperations(unittest.TestCase): + """Test cases for list operations""" + + def test_chunk_list_even_chunks(self): + """Test chunking list into even chunks""" + lst = [1, 2, 3, 4, 5, 6] + result = haven_vlm_utility.chunk_list(lst, 2) + + expected = [[1, 2], [3, 4], [5, 6]] + self.assertEqual(result, expected) + + def test_chunk_list_uneven_chunks(self): + """Test chunking list into uneven chunks""" + lst = [1, 2, 3, 4, 5] + result = haven_vlm_utility.chunk_list(lst, 2) + + expected = [[1, 2], [3, 4], [5]] + self.assertEqual(result, expected) + + def test_chunk_list_empty_list(self): + """Test chunking empty list""" + lst = [] + result = haven_vlm_utility.chunk_list(lst, 3) + + expected = [] + self.assertEqual(result, expected) + + def test_chunk_list_chunk_size_larger_than_list(self): + """Test chunking when chunk size is larger than list""" + lst = [1, 2, 3] + result = haven_vlm_utility.chunk_list(lst, 5) + + expected = [[1, 2, 3]] + self.assertEqual(result, expected) + + +class TestRetryOperations(unittest.TestCase): + """Test cases for retry operations""" + + def test_retry_operation_success_first_try(self): + """Test retry operation that succeeds on first try""" + def test_func(): + return "success" + + result = haven_vlm_utility.retry_operation(test_func) + + self.assertEqual(result, "success") + + def test_retry_operation_success_after_retries(self): + """Test retry operation that succeeds after some retries""" + call_count = 0 + + def test_func(): + nonlocal call_count + call_count += 1 + if call_count < 3: + raise ValueError("Temporary error") + return "success" + + result = haven_vlm_utility.retry_operation(test_func, max_retries=3, delay=0.1) + + self.assertEqual(result, "success") + self.assertEqual(call_count, 3) + + def test_retry_operation_all_retries_fail(self): + """Test retry operation that fails all retries""" + def test_func(): + raise ValueError("Persistent error") + + result = haven_vlm_utility.retry_operation(test_func, max_retries=2, delay=0.1) + + self.assertIsNone(result) + + def test_retry_operation_with_arguments(self): + """Test retry operation with function arguments""" + def test_func(a, b, c=10): + return a + b + c + + result = haven_vlm_utility.retry_operation(test_func, max_retries=1, delay=0.1, 1, 2, c=5) + + self.assertEqual(result, 8) + + def test_retry_operation_with_keyword_arguments(self): + """Test retry operation with keyword arguments""" + def test_func(**kwargs): + return kwargs.get('value', 0) + + result = haven_vlm_utility.retry_operation(test_func, max_retries=1, delay=0.1, value=42) + + self.assertEqual(result, 42) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/plugins/AIOverhaul/AIOverhaul.yml b/plugins/AIOverhaul/AIOverhaul.yml index 8508554c..afac97cd 100644 --- a/plugins/AIOverhaul/AIOverhaul.yml +++ b/plugins/AIOverhaul/AIOverhaul.yml @@ -1,6 +1,6 @@ name: AIOverhaul description: AI Overhaul for Stash with a full plugin engine included to install and manage asynchronous stash plugins for AI or other purposes. -version: 0.9.0 +version: 0.9.3 url: https://discourse.stashapp.cc/t/aioverhaul/4847 ui: javascript: @@ -26,7 +26,17 @@ ui: - http://localhost:4153 - ws://localhost:4153 - https://localhost:4153 + - http://127.0.0.1:4153 + - ws://127.0.0.1:4153 + - https://127.0.0.1:4153 # Add additional urls here for the stash-ai-server if your browser is not on the same host + script-src: + - 'self' + - http://localhost:4153 + - https://localhost:4153 + - 'unsafe-inline' + - 'unsafe-eval' + # Allow plugin JavaScript files to be loaded from the backend server interface: raw exec: - python diff --git a/plugins/AIOverhaul/BackendBase.js b/plugins/AIOverhaul/BackendBase.js index 3794d143..2c995dce 100644 --- a/plugins/AIOverhaul/BackendBase.js +++ b/plugins/AIOverhaul/BackendBase.js @@ -7,6 +7,7 @@ defaultBackendBase; const PLUGIN_NAME = 'AIOverhaul'; // Local default to keep the UI functional before plugin config loads. const DEFAULT_BACKEND_BASE = 'http://localhost:4153'; +const STORAGE_KEY = 'ai_backend_base_url'; const CONFIG_QUERY = `query AIOverhaulPluginConfig($ids: [ID!]) { configuration { plugins(include: $ids) @@ -138,6 +139,15 @@ function applyPluginConfig(base, captureEvents, sharedKey) { const value = normalized || ''; try { window.AI_BACKEND_URL = value; + try { + if (value) { + sessionStorage.setItem(STORAGE_KEY, value); + } + else { + sessionStorage.removeItem(STORAGE_KEY); + } + } + catch { } window.dispatchEvent(new CustomEvent('AIBackendBaseUpdated', { detail: value })); } catch { } @@ -202,6 +212,16 @@ function defaultBackendBase() { loadPluginConfig(); } catch { } + try { + const stored = sessionStorage.getItem(STORAGE_KEY); + if (stored && typeof stored === 'string') { + const normalized = normalizeBase(stored); + if (normalized !== null && normalized !== undefined) { + return normalized; + } + } + } + catch { } if (typeof window.AI_BACKEND_URL === 'string') { const explicit = normalizeBase(window.AI_BACKEND_URL); if (explicit !== null && explicit !== undefined) { diff --git a/plugins/AIOverhaul/InteractionTracker.js b/plugins/AIOverhaul/InteractionTracker.js index ba857a07..e52a0161 100644 --- a/plugins/AIOverhaul/InteractionTracker.js +++ b/plugins/AIOverhaul/InteractionTracker.js @@ -817,8 +817,18 @@ class InteractionTracker { handlePageContext(ctx) { if (!ctx) return; - if (!ctx.isDetailView || !ctx.entityId) + // When leaving detail views, allow future entries (even same entity) to re-fire + if (!ctx.isDetailView || !ctx.entityId) { + this.lastDetailKey = null; + // Clear scene context so subsequent scene visits don't reuse stale ids + this.lastScenePageEntered = null; + if (this.currentScene) { + this.cleanupVideoElement(this.currentScene.video); + this.detachVideoJsWatcher(this.currentScene); + this.currentScene = undefined; + } return; + } const key = ctx.page + ':' + ctx.entityId; if (key === this.lastDetailKey) return; @@ -1248,12 +1258,14 @@ class InteractionTracker { const delay = attempt === 0 ? 0 : Math.min(600, 80 + attempt * 80); const handle = window.setTimeout(() => { this.playerReinstrumentTimers.delete(player); - const success = this.instrumentSceneWithVideoJs(sceneId, { player, attempt }); + const targetSceneId = this.resolveSceneIdFromContext() || sceneId; + // If navigation switched scenes, avoid applying the old scene id + const success = this.instrumentSceneWithVideoJs(targetSceneId, { player, attempt }); if (success) { this.pendingVideoJsPlayers.delete(player); } else if (attempt < 6) { - this.queuePlayerReinstrument(sceneId, player, attempt + 1); + this.queuePlayerReinstrument(targetSceneId, player, attempt + 1); } }, delay); this.playerReinstrumentTimers.set(player, handle); @@ -1354,10 +1366,15 @@ class InteractionTracker { } this.currentScene = state; this.cleanupVideoElement(video); - const onPlay = () => { + const beginPlayback = () => { var _a, _b, _c, _d; + if (state.lastPlayTs != null) + return; // already marked playing const snapshot = this.getPlaybackSnapshot(state); - state.lastPlayTs = Date.now(); + // If we attached mid-autoplay with no segments yet, backfill start time from current position + const now = Date.now(); + const backfill = snapshot.position !== undefined && snapshot.position > 0.5 && state.segments.length === 0; + state.lastPlayTs = backfill ? now - snapshot.position * 1000 : now; if (snapshot.position !== undefined) state.lastPosition = snapshot.position; this.trackInternal('scene_watch_start', 'scene', sceneId, { @@ -1365,6 +1382,8 @@ class InteractionTracker { duration: (_d = (_c = snapshot.duration) !== null && _c !== void 0 ? _c : state.duration) !== null && _d !== void 0 ? _d : (isFinite(video.duration) ? video.duration : undefined) }); }; + const onPlay = () => { beginPlayback(); }; + const onPlaying = () => { beginPlayback(); }; const onPause = () => { var _a, _b, _c, _d; const added = this.captureSegment(); @@ -1416,12 +1435,14 @@ class InteractionTracker { state.duration = video.duration; }; video.addEventListener('play', onPlay); + video.addEventListener('playing', onPlaying); video.addEventListener('pause', onPause); video.addEventListener('ended', onEnded); video.addEventListener('timeupdate', onTimeUpdate); video.addEventListener('loadedmetadata', onLoaded); video._aiInteractionCleanup = () => { video.removeEventListener('play', onPlay); + video.removeEventListener('playing', onPlaying); video.removeEventListener('pause', onPause); video.removeEventListener('ended', onEnded); video.removeEventListener('timeupdate', onTimeUpdate); @@ -1429,11 +1450,15 @@ class InteractionTracker { }; if (state.player) this.attachVideoJsWatcher(state, sceneId, state.player); + const triggerIfAlreadyPlaying = () => { + if (!video.isConnected) + return; + if (!video.paused || (isFinite(video.currentTime) && video.currentTime > 0)) + beginPlayback(); + }; + triggerIfAlreadyPlaying(); if (!video.paused) { - setTimeout(() => { - if (video.isConnected && !video.paused) - onPlay(); - }, 0); + setTimeout(() => { triggerIfAlreadyPlaying(); }, 0); } } trackImageView(imageId, opts) { diff --git a/plugins/AIOverhaul/PluginSettings.js b/plugins/AIOverhaul/PluginSettings.js index 5e29e2b2..bc05e292 100644 --- a/plugins/AIOverhaul/PluginSettings.js +++ b/plugins/AIOverhaul/PluginSettings.js @@ -1621,10 +1621,142 @@ const PluginSettings = () => { React.createElement("button", { style: smallBtn, onClick: handleConfigure }, openConfig === p.name ? 'Close' : 'Configure')))); })))); } + // Component to handle dynamic loading of custom field renderer scripts + function CustomFieldLoader({ fieldType, pluginName, field, backendBase, savePluginSetting, loadPluginSettings, setError, renderDefaultInput }) { + var _a; + const React = ((_a = window.PluginApi) === null || _a === void 0 ? void 0 : _a.React) || window.React; + const [renderer, setRenderer] = React.useState(null); + const [loading, setLoading] = React.useState(true); + const [failed, setFailed] = React.useState(false); + React.useEffect(() => { + const pluginSpecificName = `${pluginName}_${fieldType}_Renderer`; + const genericName = `${fieldType}_Renderer`; + const legacyName = fieldType === 'tag_list_editor' ? 'SkierAITaggingTagListEditor' : null; + // Check if renderer is already available + const checkRenderer = () => { + const found = window[pluginSpecificName] || + window[genericName] || + (legacyName ? window[legacyName] : null); + if (found && typeof found === 'function') { + setRenderer(() => found); + setLoading(false); + return true; + } + return false; + }; + if (checkRenderer()) + return; + // Try to load the script from the backend server + // Normalize backendBase to ensure it doesn't end with a slash + const normalizedBackendBase = backendBase.replace(/\/+$/, ''); + const possiblePaths = [ + `${normalizedBackendBase}/plugins/${pluginName}/${fieldType}.js`, + `${normalizedBackendBase}/dist/plugins/${pluginName}/${fieldType}.js`, + ]; + // Also try camelCase version + const typeParts = fieldType.split('_'); + if (typeParts.length > 1) { + const camelCase = typeParts[0] + typeParts.slice(1).map(p => p.charAt(0).toUpperCase() + p.slice(1)).join(''); + possiblePaths.push(`${normalizedBackendBase}/plugins/${pluginName}/${camelCase}.js`); + possiblePaths.push(`${normalizedBackendBase}/dist/plugins/${pluginName}/${camelCase}.js`); + } + let attemptIndex = 0; + const tryLoad = () => { + if (attemptIndex >= possiblePaths.length) { + setLoading(false); + setFailed(true); + if (window.AIDebug) { + console.warn('[PluginSettings.CustomFieldLoader] Failed to load renderer for', fieldType, 'tried:', possiblePaths); + } + return; + } + const path = possiblePaths[attemptIndex]; + // Use fetch + eval instead of script tag to work around CSP script-src-elem restrictions + // This uses script-src (which has unsafe-eval) instead of script-src-elem + fetch(path) + .then(response => { + if (!response.ok) { + throw new Error(`HTTP ${response.status}`); + } + return response.text(); + }) + .then(scriptText => { + console.log('[PluginSettings.CustomFieldLoader] Fetched script:', path); + try { + // Eval the script - this uses script-src (with unsafe-eval) instead of script-src-elem + // Create a new function context to avoid polluting global scope + const scriptFunction = new Function(scriptText); + scriptFunction(); + // Wait a bit for the script to register, then check again + setTimeout(() => { + if (checkRenderer()) { + return; + } + // Script loaded but renderer not found, try next path + attemptIndex++; + tryLoad(); + }, 200); + } + catch (evalError) { + console.error('[PluginSettings.CustomFieldLoader] Error evaluating script:', path, evalError); + attemptIndex++; + tryLoad(); + } + }) + .catch(error => { + console.warn('[PluginSettings.CustomFieldLoader] Failed to fetch script:', path, error); + attemptIndex++; + tryLoad(); + }); + }; + tryLoad(); + // Also poll for renderer in case it loads asynchronously (max 10 seconds) + let pollCount = 0; + const pollInterval = setInterval(() => { + pollCount++; + if (checkRenderer() || pollCount > 20) { + clearInterval(pollInterval); + if (pollCount > 20 && !renderer) { + setLoading(false); + setFailed(true); + } + } + }, 500); + return () => clearInterval(pollInterval); + }, [fieldType, pluginName]); + if (renderer) { + return React.createElement(renderer, { + field: field, + pluginName: pluginName, + backendBase: backendBase, + savePluginSetting: savePluginSetting, + loadPluginSettings: loadPluginSettings, + setError: setError + }); + } + if (loading) { + return React.createElement('div', { style: { padding: 8, fontSize: 11, color: '#888', fontStyle: 'italic' } }, `Loading ${fieldType} editor...`); + } + // Failed to load - use default input if provided, otherwise show error message + if (failed && renderDefaultInput) { + return renderDefaultInput(); + } + if (failed) { + return React.createElement('div', { style: { padding: 8, fontSize: 11, color: '#f85149' } }, `Failed to load ${fieldType} editor. Using default input.`); + } + return null; + } function FieldRenderer({ f, pluginName }) { const t = f.type || 'string'; const label = f.label || f.key; const savedValue = f.value === undefined ? f.default : f.value; + // Define styles and computed values early so they're available to callbacks + const changed = savedValue !== undefined && savedValue !== null && f.default !== undefined && savedValue !== f.default; + const inputStyle = { padding: 6, background: '#111', color: '#eee', border: '1px solid #333', minWidth: 120 }; + const wrap = { position: 'relative', padding: '4px 4px 6px', border: '1px solid #2a2a2a', borderRadius: 4, background: '#101010' }; + const resetStyle = { position: 'absolute', top: 2, right: 4, fontSize: 9, padding: '1px 4px', cursor: 'pointer' }; + const labelTitle = f && f.description ? String(f.description) : undefined; + const labelEl = React.createElement('span', { title: labelTitle }, React.createElement(React.Fragment, null, label, changed ? React.createElement('span', { style: { color: '#ffa657', fontSize: 10 } }, ' •') : null)); if (t === 'path_map') { const containerStyle = { position: 'relative', @@ -1643,15 +1775,81 @@ const PluginSettings = () => { changedMap && React.createElement("span", { style: { color: '#ffa657', fontSize: 10 } }, "\u2022")), React.createElement(PathMapEditor, { value: savedValue, defaultValue: f.default, onChange: async (next) => { await savePluginSetting(pluginName, f.key, next); }, onReset: async () => { await savePluginSetting(pluginName, f.key, null); }, variant: "plugin" }))); } - const changed = savedValue !== undefined && savedValue !== null && f.default !== undefined && savedValue !== f.default; - const inputStyle = { padding: 6, background: '#111', color: '#eee', border: '1px solid #333', minWidth: 120 }; - const wrap = { position: 'relative', padding: '4px 4px 6px', border: '1px solid #2a2a2a', borderRadius: 4, background: '#101010' }; - const resetStyle = { position: 'absolute', top: 2, right: 4, fontSize: 9, padding: '1px 4px', cursor: 'pointer' }; - const labelTitle = f && f.description ? String(f.description) : undefined; - const labelEl = React.createElement("span", { title: labelTitle }, - label, - " ", - changed && React.createElement("span", { style: { color: '#ffa657', fontSize: 10 } }, "\u2022")); + // Check for custom field renderers registered by plugins + // Supports both plugin-specific (pluginName_type_Renderer) and generic (type_Renderer) naming + if (t && typeof t === 'string' && t !== 'string' && t !== 'boolean' && t !== 'number' && t !== 'select' && t !== 'path_map') { + const pluginSpecificName = `${pluginName}_${t}_Renderer`; + const genericName = `${t}_Renderer`; + const customRenderer = window[pluginSpecificName] || window[genericName]; + const renderer = customRenderer; + // Debug logging + if (window.AIDebug) { + console.log('[PluginSettings.FieldRenderer] Custom field type detected:', { + type: t, + pluginName: pluginName, + pluginSpecificName: pluginSpecificName, + genericName: genericName, + hasPluginSpecific: !!window[pluginSpecificName], + hasGeneric: !!window[genericName], + renderer: renderer ? typeof renderer : 'null' + }); + } + if (renderer && typeof renderer === 'function') { + if (window.AIDebug) { + console.log('[PluginSettings.FieldRenderer] Using custom renderer for', t); + } + return React.createElement(renderer, { + field: f, + pluginName: pluginName, + backendBase: backendBase, + savePluginSetting: savePluginSetting, + loadPluginSettings: loadPluginSettings, + setError: setError + }); + } + else { + // Renderer not found - use CustomFieldLoader to dynamically load it + // CustomFieldLoader will handle fallback to default input if renderer not found + return React.createElement(CustomFieldLoader, { + fieldType: t, + pluginName: pluginName, + field: f, + backendBase: backendBase, + savePluginSetting: savePluginSetting, + loadPluginSettings: loadPluginSettings, + setError: setError, + // Pass the default input rendering logic as fallback + renderDefaultInput: () => { + // This will be called if renderer not found - render default text input + const display = savedValue === undefined || savedValue === null ? '' : String(savedValue); + const inputKey = `${pluginName}:${f.key}:${display}`; + const handleBlur = async (event) => { + var _a; + const next = (_a = event.target.value) !== null && _a !== void 0 ? _a : ''; + if (next === display) + return; + await savePluginSetting(pluginName, f.key, next); + }; + const handleKeyDown = (event) => { + if (event.key === 'Enter') { + event.preventDefault(); + event.target.blur(); + } + }; + const handleReset = async () => { + await savePluginSetting(pluginName, f.key, null); + }; + return React.createElement('div', { style: wrap }, React.createElement('label', { style: { fontSize: 12 } }, React.createElement(React.Fragment, null, labelEl, React.createElement('br'), React.createElement('input', { + key: inputKey, + style: inputStyle, + defaultValue: display, + onBlur: handleBlur, + onKeyDown: handleKeyDown + }))), changed ? React.createElement('button', { style: resetStyle, onClick: handleReset }, 'Reset') : null); + } + }); + } + } if (t === 'boolean') { return (React.createElement("div", { style: wrap }, React.createElement("label", { style: { fontSize: 12, display: 'flex', alignItems: 'center', gap: 8 } }, diff --git a/plugins/AIOverhaul/README.md b/plugins/AIOverhaul/README.md index 50a522ea..55c45ee7 100644 --- a/plugins/AIOverhaul/README.md +++ b/plugins/AIOverhaul/README.md @@ -3,4 +3,4 @@ https://discourse.stashapp.cc/t/aioverhaul/4847 # For details around this plugin and using and configuring it, see the official documentation here: -https://github.com/skier233/Stash-AIServer/wiki/AI-Overhaul-Installation-Instructions \ No newline at end of file +https://github.com/skier233/Stash-AIServer/wiki/AI-Overhaul-Installation-Instructions diff --git a/plugins/AIOverhaul/SimilarScenes.js b/plugins/AIOverhaul/SimilarScenes.js index d688dde2..a3a3c42f 100644 --- a/plugins/AIOverhaul/SimilarScenes.js +++ b/plugins/AIOverhaul/SimilarScenes.js @@ -619,11 +619,13 @@ }, [onSceneClicked]); // Render scene in queue list format (matching the Queue tab exactly) const renderQueueScene = useCallback((scene, index) => { - var _a, _b, _c; - const title = scene.title || `Scene ${scene.id}`; - const studio = ((_a = scene.studio) === null || _a === void 0 ? void 0 : _a.name) || ''; - const performers = ((_b = scene.performers) === null || _b === void 0 ? void 0 : _b.map(p => p.name).join(', ')) || ''; - const screenshot = (_c = scene.paths) === null || _c === void 0 ? void 0 : _c.screenshot; + var _a, _b, _c, _d, _e; + const filepath = ((_b = (_a = scene.files) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.path) || ''; + const filename = filepath ? filepath.replace(/\\/g, '/').split('/').pop() || '' : ''; + const title = scene.title || filename || `Scene ${scene.id}`; + const studio = ((_c = scene.studio) === null || _c === void 0 ? void 0 : _c.name) || ''; + const performers = ((_d = scene.performers) === null || _d === void 0 ? void 0 : _d.map(p => p.name).join(', ')) || ''; + const screenshot = (_e = scene.paths) === null || _e === void 0 ? void 0 : _e.screenshot; const date = scene.date || scene.created_at || ''; return React.createElement('li', { key: scene.id, @@ -647,10 +649,11 @@ className: 'queue-scene-details' }, [ React.createElement('span', { key: 'title', className: 'queue-scene-title' }, title), + filepath ? React.createElement('span', { key: 'filepath', className: 'queue-scene-filepath', title: filepath, style: { fontSize: '0.75em', color: '#888', overflow: 'hidden', textOverflow: 'ellipsis', whiteSpace: 'nowrap', maxWidth: '300px', display: 'block' } }, filepath) : null, React.createElement('span', { key: 'studio', className: 'queue-scene-studio' }, studio), React.createElement('span', { key: 'performers', className: 'queue-scene-performers' }, performers), React.createElement('span', { key: 'date', className: 'queue-scene-date' }, date) - ]) + ].filter(Boolean)) ]))); }, [handleSceneClick]); // Render recommender selector when recommenders are available diff --git a/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.py b/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.py index 131d108e..83975270 100644 --- a/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.py +++ b/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.py @@ -86,7 +86,7 @@ def get_download(): log.info(f"Plugin Cachepath {cachepath} ") # adulttime.com - # jerkbuddies.com + # jerk-buddies.com # adulttime.studio # oopsie.tube # adulttimepilots.com @@ -103,7 +103,7 @@ def get_download(): f={ "url": { "modifier": "MATCHES_REGEX", - "value": "howwomenorgasm\\.com|switch\\.com|getupclose\\.com|milfoverload\\.net|dareweshare\\.net|jerkbuddies\\.com|adulttime\\.studio|adulttime\\.com|oopsie\\.tube|adulttimepilots\\.com|kissmefuckme\\.net|youngerloverofmine\\.com", + "value": "howwomenorgasm\\.com|switch\\.com|getupclose\\.com|milfoverload\\.net|dareweshare\\.net|jerk-buddies\\.com|adulttime\\.studio|adulttime\\.com|oopsie\\.tube|adulttimepilots\\.com|kissmefuckme\\.net|youngerloverofmine\\.com", } }, fragment=SLIM_SCENE_FRAGMENT, diff --git a/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.yml b/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.yml index aacf27ba..f6c6b1e4 100644 --- a/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.yml +++ b/plugins/AdulttimeInteractiveDL/AdulttimeInteractiveDL.yml @@ -1,6 +1,6 @@ name: "Adulttime Interactive Downloader" description: Download Interactive Files for Adulttime Scenes -version: 0.1.2 +version: 0.1.3 url: https://discourse.stashapp.cc/t/adulttime-interactive-downloader/1327 exec: - python diff --git a/plugins/ExtraPerformerInfo/extraPerformerInfo.py b/plugins/ExtraPerformerInfo/extraPerformerInfo.py index 381f6e0e..26bff173 100644 --- a/plugins/ExtraPerformerInfo/extraPerformerInfo.py +++ b/plugins/ExtraPerformerInfo/extraPerformerInfo.py @@ -13,8 +13,8 @@ "wikidatExtraUrls":True, "awards": True, "otherInfo": True, - "createTag": True - + "createTag": True, + "overwrite": False } @@ -56,12 +56,14 @@ 'P69':'Educated At', 'P102':'Member of political party', 'P551':'Residence', - 'P3373': 'Sibling' + 'P3373': 'Sibling', + 'P26':'Spouse' } request_wd = requests.Session() +request_wd.headers.update({'User-Agent': 'stash extra performer info (https://github.com/stashapp/CommunityScripts/; ) generic-library/0.0'}) wd_properties={} tags_cache={} @@ -70,9 +72,11 @@ def getWDPPropertyLabel(propertyId): if propertyId not in wd_properties: property_url = 'https://www.wikidata.org/wiki/Special:EntityData/%s.json' % (propertyId,) wd2 = request_wd.get(property_url) +# log.debug(wd2.status_code) +# log.debug(wd2.content) if wd2.status_code == 200: - log.debug(wd2.json()) +# log.debug(wd2.json()) # data2 = wd2.json()['entities'] for k,data2 in wd2.json()['entities'].items(): if 'en' in data2['labels']: @@ -101,8 +105,9 @@ def processWikidata(performer,performer_update,url): api_url='https://www.wikidata.org/wiki/Special:EntityData/%s.json' % (wikidata_id,) log.debug('about to fetch wikidata url: %s for performer %s' % (api_url,performer['name'],)) wd=request_wd.get(api_url) + log.debug(wd.status_code) if wd.status_code==200: -# log.debug(wd.json().keys()) + log.debug(wd.json().keys()) data2=wd.json()['entities'] data=data2[next(iter(data2))] @@ -154,33 +159,12 @@ def processWikidata(performer,performer_update,url): elif prop == 'P1411': award['type']='nominated' - + # Conferred by if 'P1027' in award['wd']['claims'].keys(): award['conferred_wd']=getWDPPropertyLabel(award['wd']['claims']['P1027'][0]['mainsnak']['datavalue']['value']['id']) award['conferred'] =award['conferred_wd']['label'] - if award['type']=='award received': - if award['conferred'] not in award_totals: - award_totals[award['conferred']]=0 - award_totals[award['conferred']] =award_totals[award['conferred']] +1 - if settings['createTag']: - performer_update['tag_names'].append( - '[%s Award Winner]' % (award['conferred'],)) - - else: - if award['conferred'] not in nominated_totals: - nominated_totals[award['conferred']]=0 - nominated_totals[award['conferred']] =nominated_totals[award['conferred']] +1 - if settings['createTag']: - performer_update['tag_names'].append('[%s Award Nominated]' % (award['conferred'],)) else: - if award['type']=='award received': - if 'unknown' not in nominated_totals: - award_totals['unknown'] = 0 - award_totals['unknown'] = award_totals['unknown'] + 1 - else: - if 'unknown' not in nominated_totals: - nominated_totals['unknown']=0 - nominated_totals['unknown'] =nominated_totals['unknown'] +1 + award['conferred'] = 'unknown' # sublcass of, can be award for best scene @@ -202,26 +186,51 @@ def processWikidata(performer,performer_update,url): if q=='P805': award['venue_wd'] = getWDPPropertyLabel(qv[0]['datavalue']['value']['id']) award['venue'] = award['venue_wd']['label'] - # Award Rationale - if q=='P6208': - award['name']=qv[0]['datavalue']['value']['text'] + # P6208: Award Rationale + # P1932: object named as + if q in ('P6208','P6208'): + award['name']= qv[0]['datavalue']['value']['text'] + if award.get('conferred','unknown')=='unknown': + award['conferred']=award['wd']['label'] + if 'conferred' in award: + if award['type']=='award received': + if award['conferred'] not in award_totals: + award_totals[award['conferred']]=0 + award_totals[award['conferred']] =award_totals[award['conferred']] +1 + if settings['createTag']: + performer_update['tag_names'].append( + '[%s Award Winner]' % (award['conferred'],)) + else: + if award['conferred'] not in nominated_totals: + nominated_totals[award['conferred']]=0 + nominated_totals[award['conferred']] =nominated_totals[award['conferred']] +1 + if settings['createTag']: + performer_update['tag_names'].append('[%s Award Nominated]' % (award['conferred'],)) if award: # log.info('award: %s' % (award,)) if 'custom_fields' not in performer_update: - performer_update['custom_fields']={'full':performer['custom_fields'].copy()} + if settings["overwrite"]: + performer_update['custom_fields'] = {'full':{}} + else: + performer_update['custom_fields']={'full':performer['custom_fields'].copy()} award_name=award['name'] award['award_value']=award['name'] if 'venue' in award and 'time' in award: award['award_value']='%s - %s: %s' % (award['time'], award['venue'],award['name'],) - elif 'time' in award: - award['award_value']='%s: %s' % (award['time'],award['name'],) elif 'venue' in award: award['award_value']='%s: %s' % (award['venue'],award['name'],) - + elif 'time' in award: + if 'conferred' in award: + if award['conferred'] !='unknown': + award['award_value'] = '%s - %s: %s' % (award['time'],award['conferred'], award['name'],) + else: + award['award_value'] = '%s: %s' % (award['time'], award['name'],) + else: + award['award_value'] = '%s: %s' % (award['time'], award['name'],) if award['type']=='award received': won_award.append(award) @@ -229,7 +238,6 @@ def processWikidata(performer,performer_update,url): award['award_value']='%s - Nominated' % award['award_value'] nominated_award.append(award) if award_name not in performer_update['custom_fields']['full']: - performer_update['custom_fields']['full'][award_name]= award['award_value'] performer_update['update'] = True else: @@ -251,40 +259,17 @@ def processWikidata(performer,performer_update,url): ["%s: %s" % (k, v,) for k, v in nominated_totals.items()]) performer_update['update'] = True if won_award: -# performer_update['custom_fields']['full']['json_awards'] = json.dumps([x[for x in won_award]) performer_update['custom_fields']['full']['Awards Won'] = ', '.join( [x['award_value'] for x in won_award]) if settings['createTag']: performer_update['tag_names'].append('[Award Winner]') performer_update['update'] = True if nominated_award: -# performer_update['custom_fields']['full']['json_nominated'] = json.dumps(nominated_award) performer_update['custom_fields']['full']['Awards Nominated'] = ', '.join( [x['award_value'] for x in nominated_award]) if settings['createTag']: performer_update['tag_names'].append('[Award Nominated]') performer_update['update'] = True - # if settings['createTag']: - # if 'P31' in award['wd']['claims']: - # for c in award['wd']['claims']['P31']: - # log.debug('c %s' % (c,)) - # # avn Award Q824540 - # if c['mainsnak']['datavalue']['value']['id']=='Q824540': - # log.debug('---------------') - # if prop=='P166': - # performer_update['tag_names'].append('[AVN Award Winner]') - # performer_update['update'] = True - # elif prop=='P1411': - # performer_update['tag_names'].append('[AVN Award Nominated]') - # performer_update['update'] = True - # - # if settings['createTag']: - # if prop=='P166': - # performer_update['tag_names'].append('[Award Winner]') - # performer_update['update'] = True - # elif prop=='P1411': - # performer_update['tag_names'].append('[Award Nominated]') - # performer_update['update'] = True if settings['otherInfo']: for claim, label in wikidata_field_properties.items(): if claim in data['claims']: @@ -373,7 +358,7 @@ def processPerformers(): if "extraPerformerInfo" in config: settings.update(config["extraPerformerInfo"]) -log.info("config: %s " % (settings,)) +log.debug("config: %s " % (settings,)) if "mode" in json_input["args"]: @@ -385,6 +370,9 @@ def processPerformers(): processPerformer(performer) else: processPerformers() + elif "processAllOverwrite" == PLUGIN_ARGS: + settings["overwrite"]=True + processPerformers() elif "hookContext" in json_input["args"]: id = json_input["args"]["hookContext"]["id"] diff --git a/plugins/ExtraPerformerInfo/extraPerformerInfo.yml b/plugins/ExtraPerformerInfo/extraPerformerInfo.yml index ce140540..96ef2e21 100644 --- a/plugins/ExtraPerformerInfo/extraPerformerInfo.yml +++ b/plugins/ExtraPerformerInfo/extraPerformerInfo.yml @@ -1,6 +1,6 @@ name: Extra Performer Info description: add award info from wikidata -version: 0.2 +version: 0.3 url: https://discourse.stashapp.cc/t/extra-performer-info/1332 exec: - python diff --git a/plugins/FunscriptHaven/README.md b/plugins/FunscriptHaven/README.md new file mode 100644 index 00000000..387f739c --- /dev/null +++ b/plugins/FunscriptHaven/README.md @@ -0,0 +1,182 @@ +# Funscript Haven + +https://discourse.stashapp.cc/t/funscript-haven/5124 + +A StashApp plugin that automatically generates funscript files from video scenes using optical flow analysis. + +## Overview + +Funscript Haven analyzes video content using computer vision techniques to detect motion patterns and automatically generate funscript files compatible with interactive devices. The plugin integrates seamlessly with StashApp, allowing you to queue scenes for processing by simply adding a tag. + +## Features + +- **Automatic Funscript Generation** - Analyzes video motion using optical flow algorithms to generate accurate funscript files +- **Tag-Based Workflow** - Simply tag scenes with a trigger tag to queue them for processing +- **VR Support** - Automatically detects VR content and adjusts processing accordingly +- **Multi-Axis Output** - Optional generation of secondary axis funscripts (Roll, Pitch, Twist, Surge, Sway) +- **POV Mode** - Specialized processing mode for POV content +- **Keyframe Reduction** - Intelligent compression to reduce file size while maintaining quality +- **Batch Processing** - Process multiple scenes in sequence with progress tracking +- **Configurable Settings** - Extensive options available through StashApp UI or config file +- **Enjoying Funscript Haven?** Check out more tools and projects at https://github.com/Haven-hvn + +## Requirements + +- **StashApp** - This plugin requires a running StashApp instance +- **Python 3.8+** - Python interpreter with pip +- **Dependencies** (automatically installed): + - `stashapp-tools` (>=0.2.58) + - `numpy` (v1.26.4) + - `opencv-python` (v4.10.0.84) + - `decord` (v0.6.0) + +## Installation + +1. Copy the plugin files to your StashApp plugins directory: + ``` + /plugins/funscript_haven/ + ├── funscript_haven.py + ├── funscript_haven.yml + └── funscript_haven_config.py + ``` + +2. Reload plugins in StashApp (Settings → Plugins → Reload Plugins) + +3. Configure plugin settings as needed (Settings → Plugins → Funscript Haven) + +## Usage + +### Basic Usage + +1. **Tag a Scene**: Add the tag `FunscriptHaven_Process` to any scene you want to process +2. **Run the Plugin**: Go to Settings → Tasks → Run Plugin Task → Funscript Haven → "Process Tagged Scenes" +3. **Wait for Processing**: The plugin will process each tagged scene and generate funscript files +4. **Check Results**: Funscript files are saved alongside the video files with `.funscript` extension + +### Tag Workflow + +| Tag | Purpose | +|-----|---------| +| `FunscriptHaven_Process` | Add to scenes to queue them for processing | +| `FunscriptHaven_Complete` | Automatically added when processing succeeds | +| `FunscriptHaven_Error` | Automatically added if an error occurs | + +## Configuration + +Settings can be configured in two ways: +1. **StashApp UI** (Settings → Plugins → Funscript Haven) - Takes priority +2. **Config File** (`funscript_haven_config.py`) - Fallback defaults + +### Processing Settings + +| Setting | Default | Description | +|---------|---------|-------------| +| `threads` | CPU count | Number of threads for optical flow computation | +| `detrend_window` | 2 | Detrend window in seconds - controls drift removal (integer 1-10) | +| `norm_window` | 4 | Normalization window in seconds - calibrates motion range (integer 1-10) | +| `batch_size` | 3000 | Frames per batch - higher is faster but uses more RAM | +| `overwrite` | false | Whether to overwrite existing funscript files | +| `keyframe_reduction` | true | Enable intelligent keyframe reduction | + +**Note:** StashApp UI only accepts integer values 0-10 for NUMBER type settings. Decimal values are converted internally. + +### Mode Settings + +| Setting | Default | Description | +|---------|---------|-------------| +| `pov_mode` | false | Improves stability for POV videos | +| `balance_global` | true | Attempts to cancel out camera motion | + +### Multi-Axis Settings + +| Setting | Default | Description | +|---------|---------|-------------| +| `multi_axis` | false | Generate secondary axis funscripts | +| `multi_axis_intensity` | 5 | Intensity of secondary axis motion (0-10, where 10 = maximum) | +| `random_speed` | 3 | Speed of random motion variation (0-10, where 10 = fastest) | +| `auto_home_delay` | 1 | Seconds of inactivity before returning to center (integer 0-10) | +| `auto_home_duration` | 1 | Time to smoothly return to center position in seconds (integer 0-10) | +| `smart_limit` | true | Scale secondary axis with primary stroke activity | + +**Note:** Settings like `multi_axis_intensity` and `random_speed` use 0-10 integer scale in the UI but are converted to 0.0-1.0 decimal values internally. + +### VR Detection + +The plugin automatically detects VR content by checking for these tags (case-insensitive): +- VR +- Virtual Reality +- 180° +- 360° + +You can customize VR tag detection in `funscript_haven_config.py`. + +## Multi-Axis Output + +When `multi_axis` is enabled, the plugin generates additional funscript files for secondary axes: + +| Axis | File Suffix | Description | +|------|-------------|-------------| +| L1 (Surge) | `.surge.funscript` | Forward/Backward motion | +| L2 (Sway) | `.sway.funscript` | Left/Right motion | +| R0 (Twist) | `.twist.funscript` | Rotational twist | +| R1 (Roll) | `.roll.funscript` | Roll rotation | +| R2 (Pitch) | `.pitch.funscript` | Pitch rotation | + +Secondary axes use OpenSimplex noise generation for natural, organic motion patterns that correlate with the primary stroke activity. + +## Technical Details + +### Algorithm Overview + +1. **Frame Extraction** - Video frames are extracted and downsampled using decord +2. **Optical Flow** - Farneback optical flow algorithm detects motion between frames +3. **Divergence Analysis** - Maximum divergence points identify primary motion centers +4. **Radial Motion** - Weighted radial motion calculation extracts stroke direction +5. **Integration** - Piecewise integration of motion values +6. **Detrending** - Rolling window detrending removes drift artifacts +7. **Normalization** - Local normalization scales output to 0-100 range +8. **Keyframe Reduction** - Direction changes are used to reduce keyframe count + +### Performance Tips + +- **RAM Usage**: Lower `batch_size` if running out of memory +- **Speed**: Increase `threads` to match available CPU cores +- **Quality**: Adjust `detrend_window` and `norm_window` based on video content +- **File Size**: Keep `keyframe_reduction` enabled for smaller files + +## Troubleshooting + +### Common Issues + +**"No scenes found with tag"** +- Ensure the trigger tag exists and is applied to scenes +- Check tag name matches exactly (case-sensitive) + +**"Video file not found"** +- Verify the scene has a valid file path in StashApp +- Check file permissions + +**Processing is slow** +- Reduce `batch_size` to lower memory usage +- Ensure sufficient CPU threads are allocated +- VR content takes longer due to higher resolution processing + +**Poor funscript quality** +- Try adjusting `detrend_window` (higher for stable cameras) +- Enable `pov_mode` for POV content +- Disable `balance_global` if camera doesn't move + +### Log Messages + +Check StashApp logs for detailed processing information and error messages. + +## License + +This project is part of the StashApp Community Scripts collection. + +## Credits + +- Uses OpenCV for optical flow computation +- Uses decord for efficient video frame extraction +- OpenSimplex noise algorithm for multi-axis generation +- Built for integration with StashApp diff --git a/plugins/FunscriptHaven/funscript_haven.py b/plugins/FunscriptHaven/funscript_haven.py new file mode 100644 index 00000000..8794afe7 --- /dev/null +++ b/plugins/FunscriptHaven/funscript_haven.py @@ -0,0 +1,1737 @@ +""" +Funscript Haven - StashApp Plugin +Generates funscript files from video scenes using optical flow analysis +""" + +import gc +import os +import sys +import json +import math +import threading +import concurrent.futures +import random +import subprocess +from multiprocessing import Pool +from typing import Dict, Any, List, Optional, Callable, Tuple + +# Hardware acceleration will be tried first, then fallback to software decoding if needed +# We don't set these initially to allow hardware acceleration to be attempted + +# ----------------- Setup and Dependencies ----------------- + +# Use PythonDepManager for dependency management +try: + from PythonDepManager import ensure_import + + # Install and ensure all required dependencies with specific versions + ensure_import( + "stashapi:stashapp-tools==0.2.58", + "numpy==1.26.4", + "opencv-python==4.10.0.84", + "decord==0.6.0" + ) + + # Import the dependencies after ensuring they're available + import stashapi.log as log + from stashapi.stashapp import StashInterface + import numpy as np + import cv2 + from decord import VideoReader, cpu + +except ImportError as e: + print(f"Failed to import PythonDepManager or required dependencies: {e}") + print("Please ensure PythonDepManager is installed and available.") + sys.exit(1) +except Exception as e: + print(f"Error during dependency management: {e}") + import traceback + print(f"Stack trace: {traceback.format_exc()}") + sys.exit(1) + +# Import local config +try: + import funscript_haven_config as config +except ModuleNotFoundError: + log.error("Please provide a funscript_haven_config.py file with the required variables.") + raise Exception("Please provide a funscript_haven_config.py file with the required variables.") + +# ----------------- Global Variables ----------------- + +stash: Optional[StashInterface] = None +progress: float = 0.0 +total_tasks: int = 0 +completed_tasks: int = 0 + +# ----------------- Optical Flow Functions ----------------- + +def max_divergence(flow): + """ + Computes the divergence of the optical flow over the whole image and returns + the pixel (x, y) with the highest absolute divergence along with its value. + """ + div = np.gradient(flow[..., 0], axis=0) + np.gradient(flow[..., 1], axis=1) + y, x = np.unravel_index(np.argmax(np.abs(div)), div.shape) + return x, y, div[y, x] + + +def radial_motion_weighted(flow, center, is_cut, pov_mode=False, balance_global=True): + """ + Computes signed radial motion: positive for outward motion, negative for inward motion. + Closer pixels have higher weight. + """ + if is_cut: + return 0.0 + h, w, _ = flow.shape + y, x = np.indices((h, w)) + dx = x - center[0] + dy = y - center[1] + + dot = flow[..., 0] * dx + flow[..., 1] * dy + + if pov_mode or not balance_global: + return np.mean(dot) + + weighted_dot = np.where(x > center[0], dot * (w - x) / w, dot * x / w) + weighted_dot = np.where(y > center[1], weighted_dot * (h - y) / h, weighted_dot * y / h) + + return np.mean(weighted_dot) + + +def precompute_flow_info(p0, p1, params): + """ + Compute optical flow and extract relevant information for funscript generation. + """ + cut_threshold = params.get("cut_threshold", 7) + + flow = cv2.calcOpticalFlowFarneback(p0, p1, None, 0.5, 3, 15, 3, 5, 1.2, 0) + + if params.get("pov_mode"): + max_val = (p0.shape[1] // 2, p0.shape[0] - 1, 0) + else: + max_val = max_divergence(flow) + + pos_center = max_val[0:2] + val_pos = max_val[2] + + mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) + mean_mag = np.mean(mag) + is_cut = mean_mag > cut_threshold + + return { + "flow": flow, + "pos_center": pos_center, + "neg_center": pos_center, + "val_pos": val_pos, + "val_neg": val_pos, + "cut": is_cut, + "cut_center": pos_center[0], + "mean_mag": mean_mag + } + + +def precompute_wrapper(p, params): + return precompute_flow_info(p[0], p[1], params) + + +def find_intel_arc_render_device() -> Optional[str]: + """ + Find the render device path (/dev/dri/renderD*) for the Intel Arc GPU. + Returns the device path or None if not found. + """ + try: + # Check all render devices + render_devices = [] + for item in os.listdir("/dev/dri/"): + if item.startswith("renderD"): + render_devices.append(f"/dev/dri/{item}") + + # Try each render device to find the Intel Arc one + for render_dev in sorted(render_devices): + try: + result = subprocess.run( + ["vainfo", "--display", "drm", "--device", render_dev], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0 and "Intel" in result.stdout: + # Check if it supports AV1 (Arc GPUs support AV1) + if "AV1" in result.stdout or "av1" in result.stdout.lower(): + return render_dev + except (FileNotFoundError, subprocess.TimeoutExpired, subprocess.SubprocessError): + continue + + # Fallback: if we found Intel but no AV1, still return the first Intel device + for render_dev in sorted(render_devices): + try: + result = subprocess.run( + ["vainfo", "--display", "drm", "--device", render_dev], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0 and "Intel" in result.stdout: + return render_dev + except (FileNotFoundError, subprocess.TimeoutExpired, subprocess.SubprocessError): + continue + + return None + except Exception: + return None + + +def detect_intel_arc_gpu() -> Tuple[bool, Optional[str], Optional[str]]: + """ + Detect if an Intel Arc GPU is available. + Returns (is_available, device_name_or_error, render_device_path). + """ + render_device: Optional[str] = None + try: + # Method 1: Check /sys/class/drm for Intel graphics devices + drm_path = "/sys/class/drm" + if os.path.exists(drm_path): + for item in os.listdir(drm_path): + if item.startswith("card") and os.path.isdir(os.path.join(drm_path, item)): + device_path = os.path.join(drm_path, item, "device", "vendor") + if os.path.exists(device_path): + with open(device_path, "r") as f: + vendor_id = f.read().strip() + # Intel vendor ID is 0x8086 + if vendor_id == "0x8086" or vendor_id == "8086": + # Check device name + uevent_path = os.path.join(drm_path, item, "device", "uevent") + if os.path.exists(uevent_path): + with open(uevent_path, "r") as uf: + uevent_data = uf.read() + # Check for Arc-specific device IDs or names + # Intel Arc GPU device ID ranges: + # - 569x series: Arc A310 (e.g., 0x5690-0x569F) + # - 56Ax series: Arc A380 (e.g., 0x56A0-0x56AF) + # - 56Bx series: Arc A750, A770 (e.g., 0x56B0-0x56BF) + # Format in uevent: PCI_ID=8086:56A5 or PCI_ID=0000:0000:8086:56A5 + device_id_line = [line for line in uevent_data.split("\n") if "PCI_ID" in line] + if device_id_line: + device_id = device_id_line[0].split("=")[-1] if "=" in device_id_line[0] else "" + # Extract device ID part (after vendor ID 8086) + # Handle formats like "8086:56A5" or "0000:0000:8086:56A5" + arc_detected = False + if ":" in device_id: + parts = device_id.split(":") + # Find the part after 8086 (vendor ID) + for i, part in enumerate(parts): + if part.upper() == "8086" and i + 1 < len(parts): + device_part = parts[i + 1].upper() + # Check if it's an Arc GPU device ID + if any(arc_id in device_part for arc_id in ["569", "56A", "56B"]): + arc_detected = True + break + # Fallback: check if any Arc ID is in the full device_id string + if not arc_detected: + arc_detected = any(arc_id in device_id.upper() for arc_id in ["569", "56A", "56B"]) + if arc_detected: + # Find the corresponding render device + render_device = find_intel_arc_render_device() + return True, f"Intel Arc GPU (device: {device_id})", render_device + + # Method 2: Try using lspci if available + try: + result = subprocess.run( + ["lspci"], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0: + for line in result.stdout.split("\n"): + if "VGA" in line or "Display" in line: + if "Intel" in line and ("Arc" in line or "A" in line.split("Intel")[-1].split()[0] if len(line.split("Intel")) > 1 else False): + # Find the corresponding render device + render_device = find_intel_arc_render_device() + return True, f"Intel Arc GPU detected via lspci: {line.strip()}", render_device + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + + # Method 3: Check vaapi devices (try all render devices) + # This is a fallback method that checks VAAPI directly + render_device = find_intel_arc_render_device() + if render_device: + # Verify it supports AV1 + try: + result = subprocess.run( + ["vainfo", "--display", "drm", "--device", render_device], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0 and "Intel" in result.stdout: + # Check if it supports AV1 + if "AV1" in result.stdout or "av1" in result.stdout.lower(): + return True, "Intel Arc GPU (VAAPI with AV1 support)", render_device + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + + return False, "No Intel Arc GPU detected", None + except Exception as e: + return False, f"Error detecting GPU: {e}", None + + +def enable_intel_arc_hardware_acceleration(render_device: Optional[str] = None) -> None: + """ + Enable Intel Arc GPU hardware acceleration via VAAPI. + + This function sets environment variables that force FFmpeg/decord to use the Intel Arc GPU + for hardware-accelerated video decoding. The iHD driver will automatically select the + Intel Arc GPU when LIBVA_DRIVER_NAME is set to "iHD". + + Args: + render_device: Path to the render device (e.g., /dev/dri/renderD128). + If None, will try to detect it automatically. + Note: libva typically auto-selects the correct device, but specifying + it explicitly ensures the right GPU is used. + """ + # Remove software-only restrictions + os.environ.pop("DECORD_CPU_ONLY", None) + os.environ.pop("FFMPEG_HWACCEL", None) + os.environ.pop("AVCODEC_FORCE_SOFTWARE", None) + + # Enable VAAPI hardware acceleration for Intel Arc + # Setting LIBVA_DRIVER_NAME to "iHD" forces libva to use the Intel HD Graphics driver + # which supports Intel Arc GPUs and AV1 hardware acceleration + os.environ["LIBVA_DRIVER_NAME"] = "iHD" + # Don't set LIBVA_DRIVERS_PATH to allow system to find the driver automatically + os.environ.pop("LIBVA_DRIVERS_PATH", None) + + # Enable hardware acceleration in FFmpeg (used by decord) + os.environ["FFMPEG_HWACCEL"] = "vaapi" + + # Specify the render device explicitly to ensure we use the Intel Arc GPU + # libva will use this device when initializing the iHD driver + if render_device: + # Some systems respect these environment variables for device selection + os.environ["LIBVA_DRIVER_DEVICE"] = render_device + # Alternative variable name that some tools use + os.environ["VAAPI_DEVICE"] = render_device + else: + # Try to find the device automatically + detected_device = find_intel_arc_render_device() + if detected_device: + os.environ["LIBVA_DRIVER_DEVICE"] = detected_device + os.environ["VAAPI_DEVICE"] = detected_device + + # Suppress FFmpeg report messages + os.environ["FFREPORT"] = "file=/dev/null:level=0" + + +def enable_software_decoding() -> None: + """ + Enable software-only decoding by setting environment variables. + + These environment variables are set at the Python process level and will override + any system-wide or user-level settings. They take effect for: + - The current Python process + - All child processes (including FFmpeg subprocesses) + - OpenCV's FFmpeg backend + - Decord's FFmpeg backend + + Note: Process-level environment variables (set via os.environ) have the highest + precedence and will override system/user settings. + + This function aggressively disables ALL hardware acceleration, including Intel Arc GPU. + """ + # Force software decoding - these override any system/user settings + os.environ["DECORD_CPU_ONLY"] = "1" + os.environ["FFMPEG_HWACCEL"] = "none" + + # Aggressively disable VAAPI (Video Acceleration API) completely + # Clear any Intel Arc GPU settings that might have been set + os.environ["LIBVA_DRIVERS_PATH"] = "/dev/null" # Invalid path disables VAAPI + os.environ["LIBVA_DRIVER_NAME"] = "" # Clear driver name (removes iHD setting) + os.environ.pop("LIBVA_DRIVER_DEVICE", None) # Remove device setting + os.environ.pop("VAAPI_DEVICE", None) # Remove alternative device setting + + # Force software decoding in libavcodec (FFmpeg's codec library) + os.environ["AVCODEC_FORCE_SOFTWARE"] = "1" + + # Suppress FFmpeg logging (warnings about hardware acceleration failures) + os.environ["FFREPORT"] = "file=/dev/null:level=0" + os.environ["FFMPEG_LOGLEVEL"] = "error" # Only show errors, suppress warnings + + # Additional FFmpeg options to prevent hardware acceleration attempts + os.environ["FFMPEG_HWACCEL_DEVICE"] = "" + + # Explicitly disable all hardware acceleration methods + os.environ["FFMPEG_HWACCEL_OUTPUT_FORMAT"] = "" + + +def disable_software_decoding() -> None: + """Disable software-only decoding by removing environment variables.""" + os.environ.pop("DECORD_CPU_ONLY", None) + os.environ.pop("FFMPEG_HWACCEL", None) + os.environ.pop("LIBVA_DRIVERS_PATH", None) + os.environ.pop("LIBVA_DRIVER_NAME", None) + os.environ.pop("AVCODEC_FORCE_SOFTWARE", None) + os.environ.pop("FFREPORT", None) + + +def is_av1_hardware_error(error_msg: str) -> bool: + """Check if error is related to AV1 hardware acceleration failure.""" + error_lower = error_msg.lower() + return ( + "av1" in error_lower and + ("failed to get pixel format" in error_lower or + "doesn't suppport hardware accelerated" in error_lower or + "hardware accelerated" in error_lower) + ) + + +def probe_video_streams(video_path: str) -> Tuple[bool, Optional[int], Optional[str]]: + """ + Probe video file to find the correct video stream index. + Returns (success, video_stream_index, error_message). + """ + try: + cmd = [ + "ffprobe", + "-v", "error", + "-select_streams", "v", + "-show_entries", "stream=index,codec_type", + "-of", "json", + video_path + ] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode != 0: + return False, None, f"ffprobe failed: {result.stderr}" + + data = json.loads(result.stdout) + streams = data.get("streams", []) + + # Find the first video stream + for stream in streams: + if stream.get("codec_type") == "video": + stream_index = stream.get("index") + if stream_index is not None: + return True, int(stream_index), None + + return False, None, "No video stream found in file" + except subprocess.TimeoutExpired: + return False, None, "ffprobe timed out" + except json.JSONDecodeError as e: + return False, None, f"Failed to parse ffprobe output: {e}" + except Exception as e: + return False, None, f"Error probing video: {e}" + + +def validate_video_file(video_path: str) -> Tuple[bool, Optional[str]]: + """ + Validate that a video file can be opened with OpenCV. + Returns (is_valid, error_message). + """ + if not os.path.exists(video_path): + return False, f"Video file does not exist: {video_path}" + + if not os.path.isfile(video_path): + return False, f"Path is not a file: {video_path}" + + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + cap.release() + return False, f"OpenCV cannot open video file: {video_path}" + + frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + fps = cap.get(cv2.CAP_PROP_FPS) + cap.release() + + if frame_count <= 0: + return False, f"Video file has no frames: {video_path}" + + if fps <= 0: + return False, f"Video file has invalid FPS: {video_path}" + + return True, None + + +def fetch_frames_opencv(video_path: str, chunk: List[int], params: Dict[str, Any]) -> List[np.ndarray]: + """ + Fetch frames using OpenCV as fallback when decord fails. + Software decoding is enforced via environment variables set before calling this function. + FFmpeg warnings about AV1 hardware acceleration are suppressed (FFmpeg will fall back to software). + + IMPORTANT: FFmpeg has BUILT-IN automatic fallback to software decoding. + Even if environment variables are ignored, FFmpeg will: + 1. Try hardware acceleration first (if available) + 2. If hardware fails, automatically fall back to software decoding + 3. Continue processing successfully with software decoding + + The AV1 hardware warnings you see are just warnings - FFmpeg continues with software decoding. + """ + frames_gray = [] + target_width = 512 if params.get("vr_mode") else 256 + target_height = 512 if params.get("vr_mode") else 256 + + # Ensure software decoding is enforced (in case it wasn't set properly) + # This is a safety check - environment variables should already be set + enable_software_decoding() + + # Suppress FFmpeg stderr output (these AV1 hardware errors are harmless - FFmpeg falls back to software) + import sys + import io + old_stderr = sys.stderr + suppressed_stderr = io.StringIO() + + try: + # Temporarily redirect stderr to suppress FFmpeg AV1 hardware warnings + # FFmpeg will try hardware first, fail, then AUTOMATICALLY fall back to software - these are just warnings + sys.stderr = suppressed_stderr + + # Try opening with OpenCV - use CAP_FFMPEG backend explicitly and disable hardware acceleration + # OpenCV's VideoCapture may need explicit backend selection to respect our environment variables + cap = cv2.VideoCapture(video_path, cv2.CAP_FFMPEG) + if not cap.isOpened(): + # Try without explicit backend + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + # Restore stderr before returning + sys.stderr = old_stderr + suppressed_stderr.close() + return frames_gray + + try: + frames_read = 0 + frames_failed = 0 + + # For AV1 videos, seeking can be unreliable. Try sequential reading if seeking fails + # Sort chunk indices to read sequentially when possible + sorted_chunk = sorted(chunk) + current_pos = -1 + + for frame_idx in sorted_chunk: + # Try seeking first + if current_pos != frame_idx - 1: + # Need to seek + cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) + current_pos = frame_idx + else: + # Sequential read - more reliable for AV1 + current_pos += 1 + + # Try reading frame - FFmpeg will automatically use software if hardware fails + ret, frame = cap.read() + if not ret or frame is None: + frames_failed += 1 + # If seeking failed, try sequential reading from start + if current_pos != frame_idx: + # Reset and read sequentially + cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + for i in range(frame_idx + 1): + ret, frame = cap.read() + if not ret or frame is None: + break + if not ret or frame is None: + continue + else: + # Already sequential, just try one more read + ret, frame = cap.read() + if not ret or frame is None: + continue + + frames_read += 1 + current_pos = frame_idx + + # Resize frame + frame_resized = cv2.resize(frame, (target_width, target_height), interpolation=cv2.INTER_LINEAR) + + # Convert to grayscale + if params.get("vr_mode"): + h, w = frame_resized.shape[:2] + gray = cv2.cvtColor(frame_resized[h // 2:, :w // 2], cv2.COLOR_BGR2GRAY) + else: + gray = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY) + + frames_gray.append(gray) + + # If we read frames successfully, FFmpeg's automatic fallback worked + # (The AV1 warnings are harmless - FFmpeg fell back to software automatically) + # Log diagnostic info if we failed to read frames + if frames_read == 0 and len(chunk) > 0: + # This would indicate an actual problem, not just warnings + # The AV1 warnings are harmless, but if we can't read frames, there's a real issue + pass # Will return empty list, which is handled by caller + finally: + cap.release() + finally: + # Restore stderr + sys.stderr = old_stderr + # Discard suppressed output (FFmpeg AV1 hardware warnings) + suppressed_stderr.close() + + return frames_gray + + +def fetch_frames(video_path, chunk, params): + """Fetch and preprocess frames from video.""" + frames_gray = [] + vr: Optional[VideoReader] = None + target_width = 512 if params.get("vr_mode") else 256 + target_height = 512 if params.get("vr_mode") else 256 + + # Try multiple strategies for VideoReader initialization + initialization_strategies = [ + # Strategy 1: With width/height (preferred for performance) + {"width": target_width, "height": target_height, "num_threads": params["threads"]}, + # Strategy 2: Without width/height (will resize frames manually) + {"num_threads": params["threads"]}, + # Strategy 3: Lower resolution + {"width": target_width // 2, "height": target_height // 2, "num_threads": params["threads"]}, + # Strategy 4: Single thread + {"width": target_width, "height": target_height, "num_threads": 1}, + # Strategy 5: Minimal parameters + {}, + ] + + batch_frames = None + needs_resize = False + av1_hardware_error_detected = False + + # First attempt: Try with current settings (hardware acceleration if enabled) + for strategy in initialization_strategies: + vr = None + try: + vr = VideoReader(video_path, ctx=cpu(0), **strategy) + batch_frames = vr.get_batch(chunk).asnumpy() + # Check if we got frames without the desired size + if batch_frames.size > 0 and "width" not in strategy: + needs_resize = True + # Success - break out of loop, vr will be cleaned up after processing + break + except Exception as e: + error_msg = str(e) + # Check if this is an AV1 hardware acceleration error + if is_av1_hardware_error(error_msg): + av1_hardware_error_detected = True + break # Exit to try software decoding + # Failed with this strategy, try next one + if vr is not None: + vr = None + continue + + # If AV1 hardware acceleration failed, retry with software decoding + if batch_frames is None and av1_hardware_error_detected: + enable_software_decoding() + # Retry all strategies with software decoding + for strategy in initialization_strategies: + vr = None + try: + vr = VideoReader(video_path, ctx=cpu(0), **strategy) + batch_frames = vr.get_batch(chunk).asnumpy() + # Check if we got frames without the desired size + if batch_frames.size > 0 and "width" not in strategy: + needs_resize = True + # Success - break out of loop + break + except Exception: + # Failed with this strategy, try next one + if vr is not None: + vr = None + continue + + # Clean up VideoReader after getting frames + if vr is not None: + vr = None + gc.collect() + + if batch_frames is None or batch_frames.size == 0: + return frames_gray + + for f in batch_frames: + # Resize if needed (when VideoReader was initialized without width/height) + if needs_resize: + f = cv2.resize(f, (target_width, target_height), interpolation=cv2.INTER_LINEAR) + + if params.get("vr_mode"): + h, w, _ = f.shape + gray = cv2.cvtColor(f[h // 2:, :w // 2], cv2.COLOR_RGB2GRAY) + else: + gray = cv2.cvtColor(f, cv2.COLOR_RGB2GRAY) + frames_gray.append(gray) + + return frames_gray + + +# ----------------- OpenSimplex Noise Generator ----------------- + +class OpenSimplex: + """OpenSimplex noise generator for smooth, natural random motion.""" + PSIZE = 2048 + PMASK = PSIZE - 1 + + def __init__(self, seed=None): + if seed is None: + seed = random.randint(0, 2**63 - 1) + + self._perm = [0] * self.PSIZE + self._grad = [(0.0, 0.0)] * self.PSIZE + + grad_base = [ + (0.130526192220052, 0.991444861373810), + (0.382683432365090, 0.923879532511287), + (0.608761429008721, 0.793353340291235), + (0.793353340291235, 0.608761429008721), + (0.923879532511287, 0.382683432365090), + (0.991444861373810, 0.130526192220051), + (0.991444861373810, -0.130526192220051), + (0.923879532511287, -0.382683432365090), + (0.793353340291235, -0.608761429008720), + (0.608761429008721, -0.793353340291235), + (0.382683432365090, -0.923879532511287), + (0.130526192220052, -0.991444861373810), + (-0.130526192220052, -0.991444861373810), + (-0.382683432365090, -0.923879532511287), + (-0.608761429008721, -0.793353340291235), + (-0.793353340291235, -0.608761429008721), + (-0.923879532511287, -0.382683432365090), + (-0.991444861373810, -0.130526192220052), + (-0.991444861373810, 0.130526192220051), + (-0.923879532511287, 0.382683432365090), + (-0.793353340291235, 0.608761429008721), + (-0.608761429008721, 0.793353340291235), + (-0.382683432365090, 0.923879532511287), + (-0.130526192220052, 0.991444861373810) + ] + + n = 0.05481866495625118 + self._grad_lookup = [(dx / n, dy / n) for dx, dy in grad_base] + + source = list(range(self.PSIZE)) + for i in range(self.PSIZE - 1, -1, -1): + seed = (seed * 6364136223846793005 + 1442695040888963407) & 0xFFFFFFFFFFFFFFFF + r = int((seed + 31) % (i + 1)) + if r < 0: + r += i + 1 + self._perm[i] = source[r] + self._grad[i] = self._grad_lookup[self._perm[i] % len(self._grad_lookup)] + source[r] = source[i] + + def calculate_2d(self, x, y): + s = 0.366025403784439 * (x + y) + return self._calculate_2d_impl(x + s, y + s) + + def calculate_2d_octaves(self, x, y, octaves=1, persistence=1.0, lacunarity=1.0): + frequency = 1.0 + amplitude = 1.0 + total_value = 0.0 + total_amplitude = 0.0 + + for _ in range(octaves): + total_value += self.calculate_2d(x * frequency, y * frequency) * amplitude + total_amplitude += amplitude + amplitude *= persistence + frequency *= lacunarity + + return total_value / total_amplitude if total_amplitude > 0 else 0 + + def _calculate_2d_impl(self, xs, ys): + xsb = int(math.floor(xs)) + ysb = int(math.floor(ys)) + xsi = xs - xsb + ysi = ys - ysb + + a = int(xsi + ysi) + + ssi = (xsi + ysi) * -0.211324865405187 + xi = xsi + ssi + yi = ysi + ssi + + value = 0.0 + + value += self._contribute(xsb, ysb, xi, yi) + value += self._contribute(xsb + 1, ysb + 1, xi - 1 + 2 * 0.211324865405187, yi - 1 + 2 * 0.211324865405187) + + if a == 0: + value += self._contribute(xsb + 1, ysb, xi - 1 + 0.211324865405187, yi + 0.211324865405187) + value += self._contribute(xsb, ysb + 1, xi + 0.211324865405187, yi - 1 + 0.211324865405187) + else: + value += self._contribute(xsb + 2, ysb + 1, xi - 2 + 3 * 0.211324865405187, yi - 1 + 3 * 0.211324865405187) + value += self._contribute(xsb + 1, ysb + 2, xi - 1 + 3 * 0.211324865405187, yi - 2 + 3 * 0.211324865405187) + + return value + + def _contribute(self, xsb, ysb, dx, dy): + attn = 2.0 / 3.0 - dx * dx - dy * dy + if attn <= 0: + return 0 + + pxm = xsb & self.PMASK + pym = ysb & self.PMASK + grad = self._grad[self._perm[pxm] ^ pym] + extrapolation = grad[0] * dx + grad[1] * dy + + attn *= attn + return attn * attn * extrapolation + + +# ----------------- Multi-Axis Generation ----------------- + +MULTI_AXIS_CONFIG = { + "surge": { + "name": "L1", + "friendly_name": "Forward/Backward", + "file_suffix": "surge", + "default_value": 50, + "phase_offset": 0.25, + }, + "sway": { + "name": "L2", + "friendly_name": "Left/Right", + "file_suffix": "sway", + "default_value": 50, + "phase_offset": 0.5, + }, + "twist": { + "name": "R0", + "friendly_name": "Twist", + "file_suffix": "twist", + "default_value": 50, + "phase_offset": 0.0, + }, + "roll": { + "name": "R1", + "friendly_name": "Roll", + "file_suffix": "roll", + "default_value": 50, + "phase_offset": 0.33, + }, + "pitch": { + "name": "R2", + "friendly_name": "Pitch", + "file_suffix": "pitch", + "default_value": 50, + "phase_offset": 0.66, + }, +} + + +class MultiAxisGenerator: + """Generates secondary axis funscripts from primary L0 (stroke) data.""" + + def __init__(self, settings): + self.settings = settings + self.intensity = settings.get("multi_axis_intensity", 0.5) + self.random_speed = settings.get("random_speed", 0.3) + self.smart_limit = settings.get("smart_limit", True) + self.auto_home_delay = settings.get("auto_home_delay", 1.0) + self.auto_home_duration = settings.get("auto_home_duration", 0.5) + + self.noise_generators = { + axis_name: OpenSimplex(seed=hash(axis_name) & 0xFFFFFFFF) + for axis_name in MULTI_AXIS_CONFIG.keys() + } + + def generate_all_axes(self, l0_actions, fps, log_func=None): + if not l0_actions or len(l0_actions) < 2: + return {} + + activity_data = self._analyze_activity(l0_actions) + + results = {} + for axis_name, axis_config in MULTI_AXIS_CONFIG.items(): + if log_func: + log_func(f"Generating {axis_config['friendly_name']} ({axis_config['name']}) axis...") + + axis_actions = self._generate_axis( + axis_name, + axis_config, + l0_actions, + activity_data, + fps + ) + + axis_actions = self._apply_auto_home(axis_actions, activity_data, axis_config) + results[axis_name] = axis_actions + + if log_func: + log_func(f" Generated {len(axis_actions)} actions for {axis_config['file_suffix']}") + + return results + + def _analyze_activity(self, l0_actions): + velocities = [] + activity_levels = [] + + for i in range(len(l0_actions)): + if i == 0: + velocities.append(0) + else: + dt = (l0_actions[i]["at"] - l0_actions[i-1]["at"]) / 1000.0 + if dt > 0: + dp = abs(l0_actions[i]["pos"] - l0_actions[i-1]["pos"]) + velocities.append(dp / dt) + else: + velocities.append(0) + + max_vel = max(velocities) if velocities else 1 + if max_vel > 0: + activity_levels = [min(1.0, v / max_vel) for v in velocities] + else: + activity_levels = [0] * len(velocities) + + window_size = min(5, len(activity_levels)) + smoothed_activity = [] + for i in range(len(activity_levels)): + start = max(0, i - window_size // 2) + end = min(len(activity_levels), i + window_size // 2 + 1) + smoothed_activity.append(sum(activity_levels[start:end]) / (end - start)) + + idle_periods = [] + idle_threshold = 0.1 + min_idle_duration_ms = self.auto_home_delay * 1000 + + idle_start = None + for i, (action, activity) in enumerate(zip(l0_actions, smoothed_activity)): + if activity < idle_threshold: + if idle_start is None: + idle_start = action["at"] + else: + if idle_start is not None: + idle_duration = action["at"] - idle_start + if idle_duration >= min_idle_duration_ms: + idle_periods.append((idle_start, action["at"])) + idle_start = None + + if idle_start is not None and l0_actions: + idle_duration = l0_actions[-1]["at"] - idle_start + if idle_duration >= min_idle_duration_ms: + idle_periods.append((idle_start, l0_actions[-1]["at"])) + + return { + "velocities": velocities, + "activity_levels": smoothed_activity, + "idle_periods": idle_periods + } + + def _generate_axis(self, axis_name, axis_config, l0_actions, activity_data, fps): + noise = self.noise_generators[axis_name] + phase_offset = axis_config["phase_offset"] + default_value = axis_config["default_value"] + + actions = [] + + for i, l0_action in enumerate(l0_actions): + timestamp_ms = l0_action["at"] + time_sec = timestamp_ms / 1000.0 + + noise_x = time_sec * self.random_speed + phase_offset * 10 + noise_y = time_sec * self.random_speed * 0.7 + phase_offset * 5 + + noise_value = noise.calculate_2d_octaves( + noise_x, noise_y, + octaves=2, + persistence=0.5, + lacunarity=2.0 + ) + + raw_pos = default_value + noise_value * 50 * self.intensity + + if self.smart_limit and i < len(activity_data["activity_levels"]): + activity = activity_data["activity_levels"][i] + deviation = raw_pos - default_value + raw_pos = default_value + deviation * activity + + pos = int(round(max(0, min(100, raw_pos)))) + actions.append({"at": timestamp_ms, "pos": pos}) + + return actions + + def _apply_auto_home(self, actions, activity_data, axis_config): + if not actions or not activity_data["idle_periods"]: + return actions + + default_value = axis_config["default_value"] + home_duration_ms = self.auto_home_duration * 1000 + + result_actions = [] + idle_periods = activity_data["idle_periods"] + + for action in actions: + timestamp_ms = action["at"] + + in_idle = False + for idle_start, idle_end in idle_periods: + if idle_start <= timestamp_ms <= idle_end: + in_idle = True + idle_progress = (timestamp_ms - idle_start) / home_duration_ms + idle_progress = min(1.0, idle_progress) + + ease = 1 - (1 - idle_progress) ** 2 + current_pos = action["pos"] + homed_pos = int(round(current_pos + (default_value - current_pos) * ease)) + + result_actions.append({"at": timestamp_ms, "pos": homed_pos}) + break + + if not in_idle: + result_actions.append(action) + + return result_actions + + def save_axis_funscript(self, base_path, axis_name, actions, log_func=None): + axis_config = MULTI_AXIS_CONFIG.get(axis_name) + if not axis_config: + return False + + output_path = f"{base_path}.{axis_config['file_suffix']}.funscript" + + funscript = { + "version": "1.0", + "inverted": False, + "range": 100, + "actions": actions + } + + try: + with open(output_path, "w") as f: + json.dump(funscript, f, indent=2) + if log_func: + log_func(f"Multi-axis funscript saved: {output_path}") + return True + except Exception as e: + if log_func: + log_func(f"ERROR: Could not save {output_path}: {e}") + return False + + +# ----------------- Main Processing Function ----------------- + +def process_video(video_path: str, params: Dict[str, Any], log_func: Callable, + progress_callback: Optional[Callable] = None, + cancel_flag: Optional[Callable] = None) -> bool: + """ + Process a video file and generate funscript. + Returns True if an error occurred, False otherwise. + """ + error_occurred = False + base, _ = os.path.splitext(video_path) + output_path = base + ".funscript" + + if os.path.exists(output_path) and not params.get("overwrite", False): + log_func(f"Skipping: output file exists ({output_path})") + return error_occurred + + # Validate video file before attempting to process + is_valid, validation_error = validate_video_file(video_path) + if not is_valid: + log_func(f"ERROR: Video validation failed: {validation_error}") + return True + + log_func(f"Processing video: {video_path}") + + # Probe video to get stream information + probe_success, video_stream_index, probe_error = probe_video_streams(video_path) + if probe_success and video_stream_index is not None: + log_func(f"Found video stream at index {video_stream_index}") + + # Detect Intel Arc GPU and configure hardware acceleration + intel_arc_detected, arc_info, render_device = detect_intel_arc_gpu() + if intel_arc_detected: + log_func(f"Intel Arc GPU detected: {arc_info}") + if render_device: + log_func(f"Using render device: {render_device}") + log_func("Configuring hardware acceleration for Intel Arc AV1 decoding...") + log_func("Note: If hardware acceleration fails, FFmpeg will AUTOMATICALLY fall back to software decoding") + enable_intel_arc_hardware_acceleration(render_device) + else: + log_func(f"No Intel Arc GPU detected ({arc_info}), using software decoding") + enable_software_decoding() + + # Try multiple initialization strategies for decord VideoReader + vr: Optional[VideoReader] = None + initialization_strategies = [ + # Strategy 1: With width/height (original approach) + {"width": 1024, "height": 1024, "num_threads": params["threads"]}, + # Strategy 2: Without width/height (native resolution, resize later) + {"num_threads": params["threads"]}, + # Strategy 3: Lower resolution + {"width": 512, "height": 512, "num_threads": params["threads"]}, + # Strategy 4: Single thread + {"width": 1024, "height": 1024, "num_threads": 1}, + # Strategy 5: No parameters (minimal) + {}, + ] + + last_error: Optional[str] = None + av1_hardware_error_detected = False + + # First attempt: Try with detected configuration (Intel Arc if detected, otherwise software) + if intel_arc_detected: + log_func("Attempting to open video with Intel Arc hardware acceleration...") + else: + log_func("Attempting to open video with software decoding...") + + for i, strategy in enumerate(initialization_strategies): + try: + log_func(f"Trying VideoReader initialization strategy {i+1}/{len(initialization_strategies)}...") + vr = VideoReader(video_path, ctx=cpu(0), **strategy) + # Test that we can actually read properties + _ = len(vr) + _ = vr.get_avg_fps() + if intel_arc_detected: + log_func(f"Successfully opened video with Intel Arc hardware acceleration using strategy {i+1}") + else: + log_func(f"Successfully opened video with software decoding using strategy {i+1}") + break + except Exception as e: + error_msg = str(e) + last_error = error_msg + + # Check if this is an AV1 hardware acceleration error + if is_av1_hardware_error(error_msg): + av1_hardware_error_detected = True + log_func(f"AV1 hardware acceleration error detected, will fallback to software decoding") + break # Exit loop to try software decoding + + if "cannot find video stream" in error_msg or "st_nb" in error_msg: + # This is the specific error we're trying to fix, try next strategy + continue + else: + # Other errors, try next strategy + continue + + # If AV1 hardware acceleration failed, retry with software decoding + if vr is None and av1_hardware_error_detected: + log_func("Falling back to software decoding due to AV1 hardware acceleration issues...") + enable_software_decoding() + + # Retry all strategies with software decoding + for i, strategy in enumerate(initialization_strategies): + try: + log_func(f"Trying VideoReader initialization strategy {i+1}/{len(initialization_strategies)} (software decoding)...") + vr = VideoReader(video_path, ctx=cpu(0), **strategy) + # Test that we can actually read properties + _ = len(vr) + _ = vr.get_avg_fps() + log_func(f"Successfully opened video with software decoding using strategy {i+1}") + break + except Exception as e: + error_msg = str(e) + last_error = error_msg + if "cannot find video stream" in error_msg or "st_nb" in error_msg: + continue + else: + continue + + # If decord completely failed, try OpenCV as a fallback + use_opencv_fallback = False + if vr is None: + error_msg = last_error or "Unknown error" + log_func(f"WARNING: Decord failed to open video with all strategies.") + log_func(f"WARNING: Last decord error: {error_msg}") + if probe_success: + log_func(f"WARNING: Video has valid stream at index {video_stream_index}, but decord cannot access it.") + log_func("Attempting to use OpenCV as fallback (slower but more compatible)...") + # CRITICAL: Force software decoding when using OpenCV fallback + # This MUST override any Intel Arc GPU hardware acceleration settings that were enabled earlier + log_func("Disabling all hardware acceleration for OpenCV fallback (including Intel Arc GPU)...") + enable_software_decoding() + # Double-check that hardware acceleration is completely disabled + # Clear any Intel Arc GPU settings that might persist + if os.environ.get("LIBVA_DRIVER_NAME"): + log_func(f"Clearing LIBVA_DRIVER_NAME (was: '{os.environ.get('LIBVA_DRIVER_NAME')}')") + os.environ["LIBVA_DRIVER_NAME"] = "" + if os.environ.get("FFMPEG_HWACCEL") != "none": + log_func(f"Setting FFMPEG_HWACCEL to 'none' (was: '{os.environ.get('FFMPEG_HWACCEL')}')") + os.environ["FFMPEG_HWACCEL"] = "none" + # Remove any device-specific settings + os.environ.pop("LIBVA_DRIVER_DEVICE", None) + os.environ.pop("VAAPI_DEVICE", None) + use_opencv_fallback = True + + # Get video properties + if use_opencv_fallback: + # Use OpenCV to get video properties + # Ensure software decoding is enforced (safety check) + enable_software_decoding() + + # Suppress FFmpeg stderr warnings during property reading + import sys + import io + old_stderr = sys.stderr + suppressed_stderr = io.StringIO() + + try: + sys.stderr = suppressed_stderr + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + log_func(f"ERROR: OpenCV fallback also failed to open video: {video_path}") + return True + + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + fps = cap.get(cv2.CAP_PROP_FPS) + cap.release() + finally: + sys.stderr = old_stderr + suppressed_stderr.close() + + if total_frames <= 0: + log_func(f"ERROR: Video has no frames: {video_path}") + return True + + if fps <= 0: + log_func(f"ERROR: Video has invalid FPS: {video_path}") + return True + + log_func(f"Using OpenCV fallback for video reading") + else: + # Use decord video properties + try: + total_frames = len(vr) + fps = vr.get_avg_fps() + + if total_frames <= 0: + log_func(f"ERROR: Video has no frames: {video_path}") + return True + + if fps <= 0: + log_func(f"ERROR: Video has invalid FPS: {video_path}") + return True + except Exception as e: + log_func(f"ERROR: Unable to read video properties: {e}") + return True + + step = max(1, int(math.ceil(fps / 15.0))) + effective_fps = fps / step + indices = list(range(0, total_frames, step)) + log_func(f"FPS: {fps:.2f}; downsampled to ~{effective_fps:.2f} fps; {len(indices)} frames selected.") + + step = max(1, int(math.ceil(fps / 30.0))) + indices = list(range(0, total_frames, step)) + bracket_size = int(params.get("batch_size", 3000)) + + # Store whether to use OpenCV fallback in params for fetch_frames + params["use_opencv_fallback"] = use_opencv_fallback + + final_flow_list = [] + next_batch = None + fetch_thread = None + + for chunk_start in range(0, len(indices), bracket_size): + if cancel_flag and cancel_flag(): + log_func("Processing cancelled by user.") + return error_occurred + + chunk = indices[chunk_start:chunk_start + bracket_size] + frame_indices = chunk[:-1] + if len(chunk) < 2: + continue + + if fetch_thread: + fetch_thread.join() + if params.get("use_opencv_fallback"): + frames_gray = next_batch if next_batch is not None else fetch_frames_opencv(video_path, chunk, params) + else: + frames_gray = next_batch if next_batch is not None else fetch_frames(video_path, chunk, params) + next_batch = None + else: + if params.get("use_opencv_fallback"): + frames_gray = fetch_frames_opencv(video_path, chunk, params) + else: + frames_gray = fetch_frames(video_path, chunk, params) + + if not frames_gray: + log_func(f"ERROR: Unable to fetch frames for chunk {chunk_start} - skipping.") + # If this is a critical chunk and we have no data, we might need to abort + if len(final_flow_list) == 0 and chunk_start == 0: + log_func("ERROR: Failed to fetch initial frames - cannot continue processing") + # Add diagnostic information + if params.get("use_opencv_fallback"): + log_func("DEBUG: Using OpenCV fallback - checking if video can be opened...") + import sys + import io + old_stderr = sys.stderr + suppressed_stderr = io.StringIO() + try: + sys.stderr = suppressed_stderr + test_cap = cv2.VideoCapture(video_path) + if test_cap.isOpened(): + test_frame_count = int(test_cap.get(cv2.CAP_PROP_FRAME_COUNT)) + test_fps = test_cap.get(cv2.CAP_PROP_FPS) + log_func(f"DEBUG: OpenCV can open video - frame_count={test_frame_count}, fps={test_fps}") + # Try reading a single frame + test_cap.set(cv2.CAP_PROP_POS_FRAMES, chunk[0] if chunk else 0) + ret, test_frame = test_cap.read() + if ret and test_frame is not None: + log_func(f"DEBUG: OpenCV can read frames - frame shape: {test_frame.shape}") + else: + log_func("DEBUG: OpenCV opened video but cannot read frames") + else: + log_func("DEBUG: OpenCV cannot open video file") + test_cap.release() + finally: + sys.stderr = old_stderr + suppressed_stderr.close() + return True + continue + + # Ensure we have at least 2 frames to create pairs + if len(frames_gray) < 2: + log_func(f"WARNING: Chunk {chunk_start} has insufficient frames ({len(frames_gray)}) - skipping.") + continue + + if chunk_start + bracket_size < len(indices): + next_chunk = indices[chunk_start + bracket_size:chunk_start + 2 * bracket_size] + def fetch_and_store(): + global next_batch + if params.get("use_opencv_fallback"): + next_batch = fetch_frames_opencv(video_path, next_chunk, params) + else: + next_batch = fetch_frames(video_path, next_chunk, params) + + fetch_thread = threading.Thread(target=fetch_and_store) + fetch_thread.start() + + pairs = list(zip(frames_gray[:-1], frames_gray[1:])) + + with Pool(processes=params["threads"]) as pool: + precomputed = pool.starmap(precompute_wrapper, [(p, params) for p in pairs]) + + final_centers = [] + for j, info in enumerate(precomputed): + center_list = [info["pos_center"]] + for i in range(1, 7): + if j - i >= 0: + center_list.append(precomputed[j - i]["pos_center"]) + if j + i < len(precomputed): + center_list.append(precomputed[j + i]["pos_center"]) + center_list = np.array(center_list) + center = np.mean(center_list, axis=0) + final_centers.append(center) + + with concurrent.futures.ProcessPoolExecutor(max_workers=params["threads"]) as ex: + dot_futures = [] + for j, info in enumerate(precomputed): + dot_futures.append(ex.submit( + radial_motion_weighted, + info["flow"], + final_centers[j], + info["cut"], + params.get("pov_mode", False), + params.get("balance_global", True) + )) + dot_vals = [f.result() for f in dot_futures] + + for j, dot_val in enumerate(dot_vals): + is_cut = precomputed[j]["cut"] + final_flow_list.append((dot_val, is_cut, frame_indices[j])) + + if progress_callback: + prog = min(100, int(100 * (chunk_start + len(chunk)) / len(indices))) + progress_callback(prog) + + # Piecewise Integration + if not final_flow_list: + log_func("ERROR: No flow data computed - video processing failed completely") + return True + + cum_flow = [0] + time_stamps = [final_flow_list[0][2]] + + for i in range(1, len(final_flow_list)): + flow_prev, cut_prev, t_prev = final_flow_list[i - 1] + flow_curr, cut_curr, t_curr = final_flow_list[i] + + if cut_curr: + cum_flow.append(0) + else: + mid_flow = (flow_prev + flow_curr) / 2 + cum_flow.append(cum_flow[-1] + mid_flow) + + time_stamps.append(t_curr) + + cum_flow = [(cum_flow[i] + cum_flow[i-1]) / 2 if i > 0 else cum_flow[i] for i in range(len(cum_flow))] + + # Detrending & Normalization + detrend_win = int(params["detrend_window"] * effective_fps) + disc_threshold = 1000 + + detrended_data = np.zeros_like(cum_flow) + weight_sum = np.zeros_like(cum_flow) + + disc_indices = np.where(np.abs(np.diff(cum_flow)) > disc_threshold)[0] + 1 + segment_boundaries = [0] + list(disc_indices) + [len(cum_flow)] + + overlap = detrend_win // 2 + + for i in range(len(segment_boundaries) - 1): + seg_start = segment_boundaries[i] + seg_end = segment_boundaries[i + 1] + seg_length = seg_end - seg_start + + if seg_length < 5: + detrended_data[seg_start:seg_end] = cum_flow[seg_start:seg_end] - np.mean(cum_flow[seg_start:seg_end]) + continue + if seg_length <= detrend_win: + segment = cum_flow[seg_start:seg_end] + x = np.arange(len(segment)) + trend = np.polyfit(x, segment, 1) + detrended_segment = segment - np.polyval(trend, x) + weights = np.hanning(len(segment)) + detrended_data[seg_start:seg_end] += detrended_segment * weights + weight_sum[seg_start:seg_end] += weights + else: + for start in range(seg_start, seg_end - overlap, overlap): + end = min(start + detrend_win, seg_end) + segment = cum_flow[start:end] + x = np.arange(len(segment)) + trend = np.polyfit(x, segment, 1) + detrended_segment = segment - np.polyval(trend, x) + weights = np.hanning(len(segment)) + detrended_data[start:end] += detrended_segment * weights + weight_sum[start:end] += weights + + detrended_data /= np.maximum(weight_sum, 1e-6) + + smoothed_data = np.convolve(detrended_data, [1/16, 1/4, 3/8, 1/4, 1/16], mode='same') + + norm_win = int(params["norm_window"] * effective_fps) + if norm_win % 2 == 0: + norm_win += 1 + half_norm = norm_win // 2 + norm_rolling = np.empty_like(smoothed_data) + for i in range(len(smoothed_data)): + start_idx = max(0, i - half_norm) + end_idx = min(len(smoothed_data), i + half_norm + 1) + local_window = smoothed_data[start_idx:end_idx] + local_min = local_window.min() + local_max = local_window.max() + if local_max - local_min == 0: + norm_rolling[i] = 50 + else: + norm_rolling[i] = (smoothed_data[i] - local_min) / (local_max - local_min) * 100 + + # Keyframe Reduction + if params.get("keyframe_reduction", True): + key_indices = [0] + for i in range(1, len(norm_rolling) - 1): + d1 = norm_rolling[i] - norm_rolling[i - 1] + d2 = norm_rolling[i + 1] - norm_rolling[i] + + if (d1 < 0) != (d2 < 0): + key_indices.append(i) + key_indices.append(len(norm_rolling) - 1) + else: + key_indices = range(len(norm_rolling)) + + actions = [] + for ki in key_indices: + try: + timestamp_ms = int(((time_stamps[ki]) / fps) * 1000) + pos = int(round(norm_rolling[ki])) + actions.append({"at": timestamp_ms, "pos": 100 - pos}) + except Exception as e: + log_func(f"Error computing action at segment index {ki}: {e}") + error_occurred = True + + log_func(f"Keyframe reduction: {len(actions)} actions computed.") + + funscript = {"version": "1.0", "actions": actions} + try: + with open(output_path, "w") as f: + json.dump(funscript, f, indent=2) + log_func(f"Funscript saved: {output_path}") + except Exception as e: + log_func(f"ERROR: Could not write output: {e}") + error_occurred = True + + # Generate multi-axis funscripts if enabled + if params.get("multi_axis", False) and actions: + log_func("Generating multi-axis funscripts...") + multi_gen = MultiAxisGenerator(params) + secondary_axes = multi_gen.generate_all_axes(actions, fps, log_func) + + for axis_name, axis_actions in secondary_axes.items(): + multi_gen.save_axis_funscript(base, axis_name, axis_actions, log_func) + + log_func(f"Multi-axis generation complete: {len(secondary_axes)} additional axes created.") + + return error_occurred + + +# ----------------- StashApp Integration ----------------- + +def initialize_stash(connection: Dict[str, Any]) -> None: + """Initialize the StashApp interface.""" + global stash + stash = StashInterface(connection) + + +def get_scenes_with_tag(tag_name: str) -> List[Dict[str, Any]]: + """Get all scenes that have a specific tag.""" + tag = stash.find_tag(tag_name, create=False) + if not tag: + log.warning(f"Tag '{tag_name}' not found") + return [] + + # Use fragment to limit fields and avoid fingerprint errors + # Only fetch id, files.path, and tags.id/name - avoiding problematic fragments + scenes = stash.find_scenes( + f={"tags": {"value": [tag["id"]], "modifier": "INCLUDES"}}, + filter={"per_page": -1}, + fragment="id files { path } tags { id name }" + ) + return scenes or [] + + +def remove_tag_from_scene(scene_id: str, tag_name: str) -> None: + """Remove a tag from a scene.""" + tag = stash.find_tag(tag_name, create=False) + if not tag: + return + + # Use fragment to limit fields and avoid fingerprint errors + scene = stash.find_scene(scene_id, fragment="id tags { id }") + if not scene: + return + + current_tags = [t["id"] for t in scene.get("tags", [])] + if tag["id"] in current_tags: + current_tags.remove(tag["id"]) + stash.update_scene({"id": scene_id, "tag_ids": current_tags}) + + +def add_tag_to_scene(scene_id: str, tag_name: str) -> None: + """Add a tag to a scene.""" + tag = stash.find_tag(tag_name, create=True) + if not tag: + return + + # Use fragment to limit fields and avoid fingerprint errors + scene = stash.find_scene(scene_id, fragment="id tags { id }") + if not scene: + return + + current_tags = [t["id"] for t in scene.get("tags", [])] + if tag["id"] not in current_tags: + current_tags.append(tag["id"]) + stash.update_scene({"id": scene_id, "tag_ids": current_tags}) + + +def is_vr_scene(scene: Dict[str, Any]) -> bool: + """Check if a scene is tagged as VR.""" + tags = scene.get("tags", []) + vr_tag_names = config.vr_tag_names if hasattr(config, 'vr_tag_names') else ["VR", "Virtual Reality"] + for tag in tags: + if tag.get("name", "").lower() in [t.lower() for t in vr_tag_names]: + return True + return False + + +def add_scene_marker(scene_id: str, title: str, seconds: float, tag_name: Optional[str] = None) -> None: + """Add a marker to a scene.""" + marker_data = { + "scene_id": scene_id, + "title": title, + "seconds": seconds, + } + + if tag_name: + tag = stash.find_tag(tag_name, create=True) + if tag: + marker_data["primary_tag_id"] = tag["id"] + + stash.create_scene_marker(marker_data) + + +def get_scene_file_path(scene: Dict[str, Any]) -> Optional[str]: + """Get the file path for a scene.""" + files = scene.get("files", []) + if files: + return files[0].get("path") + return None + + +# ----------------- Settings Helper ----------------- + +def get_plugin_setting(key: str, default: Any = None) -> Any: + """Get a plugin setting from StashApp, falling back to config file.""" + try: + settings = stash.get_configuration().get("plugins", {}).get("funscript_haven", {}) + if key in settings and settings[key] is not None: + return settings[key] + except Exception: + pass + + # Fall back to config file + return getattr(config, key, default) + + +def get_trigger_tag() -> str: + """Get the trigger tag name.""" + return get_plugin_setting("trigger_tag", "FunscriptHaven_Process") + + +def get_complete_tag() -> Optional[str]: + """Get the completion tag name.""" + tag = get_plugin_setting("complete_tag", "FunscriptHaven_Complete") + return tag if tag else None + + +def get_error_tag() -> str: + """Get the error tag name.""" + return get_plugin_setting("error_tag", "FunscriptHaven_Error") + + +# ----------------- Task Functions ----------------- + +def process_tagged_scenes() -> None: + """Process all scenes tagged with the trigger tag.""" + global total_tasks, completed_tasks + + trigger_tag = get_trigger_tag() + scenes = get_scenes_with_tag(trigger_tag) + if not scenes: + log.info(f"No scenes found with tag '{trigger_tag}'") + return + + total_tasks = len(scenes) + completed_tasks = 0 + log.info(f"Found {total_tasks} scenes to process") + log.progress(0.0) + + for scene in scenes: + scene_id = scene["id"] + video_path = get_scene_file_path(scene) + + if not video_path: + log.error(f"No file path for scene {scene_id}") + completed_tasks += 1 + continue + + if not os.path.exists(video_path): + log.error(f"Video file not found: {video_path}") + completed_tasks += 1 + continue + + # Build processing parameters from plugin settings (with config file fallback) + # Convert 0-10 integer settings to their actual decimal values + detrend_window_raw = get_plugin_setting('detrend_window', 2) # Default: 2 (was 1.5) + norm_window_raw = get_plugin_setting('norm_window', 4) # Default: 4 (was 4.0) + multi_axis_intensity_raw = get_plugin_setting('multi_axis_intensity', 5) # Default: 5 (was 0.5, scale 0-10) + random_speed_raw = get_plugin_setting('random_speed', 3) # Default: 3 (was 0.3, scale 0-10) + auto_home_delay_raw = get_plugin_setting('auto_home_delay', 1) # Default: 1 (was 1.0) + auto_home_duration_raw = get_plugin_setting('auto_home_duration', 1) # Default: 1 (was 0.5, rounded) + + params = { + "threads": int(get_plugin_setting('threads', os.cpu_count() or 4)), + "detrend_window": float(max(1, min(10, int(detrend_window_raw)))), # Clamp to 1-10 seconds + "norm_window": float(max(1, min(10, int(norm_window_raw)))), # Clamp to 1-10 seconds + "batch_size": int(get_plugin_setting('batch_size', 3000)), + "overwrite": bool(get_plugin_setting('overwrite', False)), + "keyframe_reduction": bool(get_plugin_setting('keyframe_reduction', True)), + "vr_mode": is_vr_scene(scene), + "pov_mode": bool(get_plugin_setting('pov_mode', False)), + "balance_global": bool(get_plugin_setting('balance_global', True)), + "multi_axis": bool(get_plugin_setting('multi_axis', False)), + "multi_axis_intensity": float(max(0, min(10, int(multi_axis_intensity_raw))) / 10.0), # Convert 0-10 to 0.0-1.0 + "random_speed": float(max(0, min(10, int(random_speed_raw))) / 10.0), # Convert 0-10 to 0.0-1.0 + "auto_home_delay": float(max(0, min(10, int(auto_home_delay_raw)))), # Clamp to 0-10 seconds + "auto_home_duration": float(max(0, min(10, int(auto_home_duration_raw)))), # Clamp to 0-10 seconds + "smart_limit": bool(get_plugin_setting('smart_limit', True)), + } + + log.info(f"Processing scene {scene_id}: {video_path}") + + def progress_cb(prog: int) -> None: + scene_progress = prog / 100.0 + overall_progress = (completed_tasks + scene_progress) / total_tasks + log.progress(overall_progress) + + try: + error = process_video( + video_path, + params, + log.info, + progress_callback=progress_cb + ) + + if error: + log.error(f"Error processing scene {scene_id}") + add_tag_to_scene(scene_id, get_error_tag()) + else: + log.info(f"Successfully processed scene {scene_id}") + + # Add completion tag if configured + complete_tag = get_complete_tag() + if complete_tag: + add_tag_to_scene(scene_id, complete_tag) + + # Add scene marker if configured + if get_plugin_setting('add_marker', True): + add_scene_marker(scene_id, "Funscript Generated", 0, "Funscript") + + # Remove trigger tag + remove_tag_from_scene(scene_id, trigger_tag) + + except Exception as e: + log.error(f"Exception processing scene {scene_id}: {e}") + add_tag_to_scene(scene_id, get_error_tag()) + + completed_tasks += 1 + log.progress(completed_tasks / total_tasks) + + log.info(f"Completed processing {total_tasks} scenes") + log.progress(1.0) + + +# ----------------- Main Execution ----------------- + +def main() -> None: + """Main entry point for the plugin.""" + json_input = read_json_input() + output = {} + run(json_input, output) + out = json.dumps(output) + print(out + "\n") + + +def read_json_input() -> Dict[str, Any]: + """Read JSON input from stdin.""" + json_input = sys.stdin.read() + return json.loads(json_input) + + +def run(json_input: Dict[str, Any], output: Dict[str, Any]) -> None: + """Main execution logic.""" + plugin_args = None + try: + log.debug(json_input["server_connection"]) + os.chdir(json_input["server_connection"]["PluginDir"]) + initialize_stash(json_input["server_connection"]) + except Exception as e: + log.error(f"Failed to initialize: {e}") + output["output"] = "error" + return + + try: + plugin_args = json_input['args'].get("mode") + except (KeyError, TypeError): + pass + + if plugin_args == "process_scenes": + process_tagged_scenes() + output["output"] = "ok" + return + + # Default action: process tagged scenes + process_tagged_scenes() + output["output"] = "ok" + return + + +if __name__ == "__main__": + try: + main() + except KeyboardInterrupt: + log.info("Plugin interrupted by user") + except Exception as e: + log.error(f"Plugin failed: {e}") + sys.exit(1) diff --git a/plugins/FunscriptHaven/funscript_haven.yml b/plugins/FunscriptHaven/funscript_haven.yml new file mode 100644 index 00000000..e57bd799 --- /dev/null +++ b/plugins/FunscriptHaven/funscript_haven.yml @@ -0,0 +1,82 @@ +name: Funscript Haven +description: Generates funscript files from video scenes using optical flow analysis. Tag scenes with 'FunscriptHaven_Process' to queue them for processing. +version: 1.0.0 +url: https://discourse.stashapp.cc/t/funscript-haven/5124 +exec: + - python + - "{pluginDir}/funscript_haven.py" +interface: raw +tasks: + - name: Process Tagged Scenes + description: Process all scenes tagged with 'FunscriptHaven_Process' and generate funscript files + defaultArgs: + mode: process_scenes +settings: + trigger_tag: + displayName: Trigger Tag + description: Tag name that triggers processing (add this tag to scenes you want to process) + type: STRING + complete_tag: + displayName: Completion Tag + description: Tag name added when processing completes successfully (leave empty to disable) + type: STRING + error_tag: + displayName: Error Tag + description: Tag name added when an error occurs during processing + type: STRING + threads: + displayName: Threads + description: Number of threads for optical flow computation + type: NUMBER + detrend_window: + displayName: Detrend Window (1-10) + description: Controls drift removal aggressiveness. Higher values work better for stable cameras (integer 1-10) + type: NUMBER + norm_window: + displayName: Normalization Window (1-10) + description: Time window to calibrate motion range in seconds. Shorter values amplify motion (integer 1-10) + type: NUMBER + batch_size: + displayName: Batch Size (frames) + description: Number of frames to process per batch. Higher values are faster but use more RAM + type: NUMBER + overwrite: + displayName: Overwrite Existing + description: Overwrite existing funscript files + type: BOOLEAN + keyframe_reduction: + displayName: Keyframe Reduction + description: Enable keyframe reduction to reduce file size while maintaining quality + type: BOOLEAN + pov_mode: + displayName: POV Mode + description: Improves stability for POV videos + type: BOOLEAN + balance_global: + displayName: Balance Global Motion + description: Try to cancel out camera motion. Disable for scenes with no camera movement + type: BOOLEAN + multi_axis: + displayName: Multi-Axis Output + description: Generate additional funscript files for secondary axes (Roll, Pitch, Twist, Surge, Sway) + type: BOOLEAN + multi_axis_intensity: + displayName: Multi-Axis Intensity (0-10) + description: Intensity of secondary axis motion (0-10, where 10 = maximum intensity) + type: NUMBER + random_speed: + displayName: Random Speed (0-10) + description: Speed of random motion variation (0-10, where 10 = fastest) + type: NUMBER + auto_home_delay: + displayName: Auto Home Delay (0-10) + description: Seconds of inactivity before returning to center position (integer 0-10) + type: NUMBER + auto_home_duration: + displayName: Auto Home Duration (0-10) + description: Time to smoothly return to center position in seconds (integer 0-10) + type: NUMBER + add_marker: + displayName: Add Marker on Complete + description: Add a scene marker when funscript generation completes + type: BOOLEAN diff --git a/plugins/FunscriptHaven/funscript_haven_config.py b/plugins/FunscriptHaven/funscript_haven_config.py new file mode 100644 index 00000000..b00de874 --- /dev/null +++ b/plugins/FunscriptHaven/funscript_haven_config.py @@ -0,0 +1,88 @@ +""" +Funscript Haven - Configuration File +Edit these settings to customize the plugin behavior +""" + +# ----------------- Tag Configuration ----------------- + +# Tag name that triggers processing (add this tag to scenes you want to process) +trigger_tag = "FunscriptHaven_Process" + +# Tag name added when processing completes successfully (set to None to disable) +complete_tag = "FunscriptHaven_Complete" + +# Tag name added when an error occurs during processing +error_tag = "FunscriptHaven_Error" + +# Tag names that indicate a VR scene (case-insensitive) +vr_tag_names = ["VR", "Virtual Reality", "180°", "360°"] + + +# ----------------- Processing Settings ----------------- + +# Number of threads for optical flow computation (default: CPU count) +import os +threads = os.cpu_count() or 4 + +# Detrend window - controls drift removal aggressiveness (integer 1-10) +# Higher values work better for stable cameras (recommended: 1-10) +# Note: StashApp UI only accepts integers 0-10 +detrend_window = 2 + +# Normalization window in seconds - time window to calibrate motion range (integer 1-10) +# Shorter values amplify motion but may cause artifacts in long thrusts +# Note: StashApp UI only accepts integers 0-10 +norm_window = 4 + +# Batch size in frames - higher values are faster but use more RAM +batch_size = 3000 + +# Overwrite existing funscript files +overwrite = False + +# Enable keyframe reduction (reduces file size while maintaining quality) +keyframe_reduction = True + + +# ----------------- Mode Settings ----------------- + +# POV Mode - improves stability for POV videos +pov_mode = False + +# Balance Global Motion - tries to cancel out camera motion +# Disable for scenes with no camera movement +balance_global = True + + +# ----------------- Multi-Axis Settings ----------------- + +# Generate additional funscript files for secondary axes +# (Roll, Pitch, Twist, Surge, Sway) +multi_axis = False + +# Intensity of secondary axis motion (0-10, where 10 = maximum) +# Higher values = more movement +# Note: StashApp UI only accepts integers 0-10, converted to 0.0-1.0 internally +multi_axis_intensity = 5 + +# Speed of random motion variation (0-10, where 10 = fastest) +# Higher values = faster changes +# Note: StashApp UI only accepts integers 0-10, converted to 0.0-1.0 internally +random_speed = 3 + +# Seconds of inactivity before returning to center position (integer 0-10) +# Note: StashApp UI only accepts integers 0-10 +auto_home_delay = 1 + +# Time to smoothly return to center position in seconds (integer 0-10) +# Note: StashApp UI only accepts integers 0-10 +auto_home_duration = 1 + +# Scale secondary axis movement with primary stroke activity +smart_limit = True + + +# ----------------- Marker Settings ----------------- + +# Add a scene marker when funscript generation completes +add_marker = True diff --git a/plugins/GroupAutoScraper/GroupAutoScraper.yml b/plugins/GroupAutoScraper/GroupAutoScraper.yml new file mode 100644 index 00000000..62da5d76 --- /dev/null +++ b/plugins/GroupAutoScraper/GroupAutoScraper.yml @@ -0,0 +1,13 @@ +name: GroupAutoScraper +description: Automatically re-scrape groups that have an Adult Empire URL to pickup tags and studio for the group. +version: 1.1.0 +url: https://discourse.stashapp.cc/t/groupautoscraper/6196 +exec: + - python + - "{pluginDir}/autoScraper.py" +interface: raw +hooks: + - name: hook_group_auto_scraper + description: Re-scrape group on create when it has a URL. + triggeredBy: + - Group.Create.Post \ No newline at end of file diff --git a/plugins/GroupAutoScraper/README.md b/plugins/GroupAutoScraper/README.md new file mode 100644 index 00000000..bccd68ea --- /dev/null +++ b/plugins/GroupAutoScraper/README.md @@ -0,0 +1,75 @@ +# GroupAutoScraper + +https://discourse.stashapp.cc/t/groupautoscraper/6196 + +Automatically re-scrape groups that have a supported URL and merge the scraped data back into the group. + +## What it does + +- **Trigger** + - Listens to the **`Group.Create.Post`** hook only. +- **URL filter** + - If the group has no URLs, the plugin exits quietly (no changes). + - If the first URL does **not** contain `adultdvdempire.com/`, the plugin logs: + - `AutoGroup only uses AdultDVDEmpire URLS. Exiting.` + and exits without making any changes. +- **Scrape + merge** + - When the first URL *does* contain `adultdvdempire.com/`: + - Calls `scrapeGroupURL(url)` for that URL. + - Merges scraped data into the group and performs a `GroupUpdate`: + - Uses scraped values when present, otherwise keeps existing values. + - Uses `scraped.studio.stored_id` as `studio_id` only when it is not `null`. + - Builds `tag_ids` from: + - existing group tag IDs, plus + - scraped tag entries where `stored_id` is not `null`, + - then de-duplicates. + - Only sends `front_image` / `back_image` when present in the scrape result so existing images are not overwritten with `null`. +- **Summary logging** + - On a successful update, the plugin logs a concise summary, e.g.: + - `Group 9681 'Women Seeking Women Vol. 101' updated. Added 4 tag(s), set studio.` + - If a studio name is scraped but cannot be resolved (no `stored_id`), the message instead reads: + - `Group 9681 'Some Title' updated. Added 3 tag(s), could not set studio 'Some Studio', not found in studios.` + +Groups without any URL, or with non-AdultDVD Empire URLs, are ignored without error. + +## Installation + +1. Copy this folder to your Stash plugins directory, typically: + + - `plugins/CommunityScripts/plugins/GroupAutoScraper/` + +2. Install Python dependencies. From this plugin's directory run: + + ```bash + pip install -r requirements.txt + ``` + + This installs: + + - `requests` + - `stashapp-tools` (which provides the `stashapi` package used by the plugin) + +3. Ensure the following files exist in this directory: + + - `manifest` + - `GroupAutoScraper.yml` + - `autoScraper.py` + - `README.md` + - `requirements.txt` + +4. In Stash, open **Settings → Plugins** and reload or restart Stash so the plugin is detected. + +You should then see **GroupAutoScraper** listed with a hook that triggers on `Group.Create.Post`. + +## Configuration + +This plugin intentionally uses the **server connection information provided by Stash**: + +- GraphQL URL, scheme, host and port come from the plugin input. +- Authentication uses the Stash session cookie provided in `server_connection`. + +As a result: + +- **No API keys or URLs need to be hard-coded or edited in the script.** +- The plugin should work across environments as long as it is installed in the correct plugins directory and the Python dependencies are installed. + diff --git a/plugins/GroupAutoScraper/autoScraper.py b/plugins/GroupAutoScraper/autoScraper.py new file mode 100644 index 00000000..5dae956d --- /dev/null +++ b/plugins/GroupAutoScraper/autoScraper.py @@ -0,0 +1,387 @@ +#!/usr/bin/env python3 +""" +autoScraper.py + +External raw plugin for Stash that: +- Triggers on group hooks (e.g. Group.Create.Post). +- If the group has at least one URL, calls ScrapeGroupURL on the first URL. +- Merges scraped data back into the group via GroupUpdate: + * Uses scraped values when present, otherwise keeps existing ones. + * For studio/tags, only uses scraped entries where stored_id is not null. + * Tag ids from scraped data are merged with existing tag ids (unique). + +This script is designed to be run by Stash as a raw external plugin and +expects its input JSON on stdin (the standard Stash plugin FRAGMENT format). + +Requires: + - Python 3.7+ + - requests (pip install requests) +""" + +import sys +import json +import time +from typing import Any, Dict, List, Optional + +import requests +import stashapi.log as log +from stashapi.stashapp import StashInterface + + +START_TIME = time.time() + + +def exit_plugin(msg: Optional[str] = None, err: Optional[str] = None) -> None: + if msg is None and err is None: + msg = "plugin ended" + log.debug(f"Execution time: {round(time.time() - START_TIME, 5)}s") + output_json = {"output": msg, "error": err} + print(json.dumps(output_json)) + sys.exit(0 if err is None else 1) + + +def load_fragment() -> Dict[str, Any]: + try: + raw = sys.stdin.read() + fragment = json.loads(raw) + except Exception as exc: + log.error(f"Failed to read/parse plugin input: {exc}") + exit_plugin(err="invalid plugin input") + return fragment + + +def build_graphql_client(server: Dict[str, Any]) -> Dict[str, Any]: + scheme = server.get("Scheme", "http") + host = server.get("Host", "localhost") + port = str(server.get("Port", "9999")) + if host == "0.0.0.0": + host = "localhost" + + url = f"{scheme}://{host}:{port}/graphql" + cookies = {} + session = server.get("SessionCookie") or {} + if session.get("Value"): + cookies["session"] = session["Value"] + + headers = { + "Accept-Encoding": "gzip, deflate, br", + "Content-Type": "application/json", + "Accept": "application/json", + "Connection": "keep-alive", + "DNT": "1", + } + + return {"url": url, "headers": headers, "cookies": cookies} + + +def graphql_request( + client: Dict[str, Any], query: str, variables: Dict[str, Any] +) -> Dict[str, Any]: + payload = {"query": query, "variables": variables} + try: + resp = requests.post( + client["url"], + json=payload, + headers=client["headers"], + cookies=client["cookies"], + timeout=20, + ) + except Exception as exc: + log.error(f"Error calling GraphQL: {exc}") + exit_plugin(err="graphql request failed") + + if resp.status_code != 200: + log.error( + f"GraphQL HTTP {resp.status_code}: {resp.content!r}" + ) + exit_plugin(err="graphql http error") + + data = resp.json() + if "errors" in data and data["errors"]: + log.error(f"GraphQL errors: {data['errors']}") + exit_plugin(err="graphql errors") + return data.get("data", {}) + + +def seconds_from_duration(duration: Optional[str]) -> Optional[int]: + """ + Convert a duration string like "3:16:00" or "16:00" into seconds. + Returns None if duration is falsy or cannot be parsed. + """ + if not duration: + return None + parts = duration.split(":") + if not all(p.isdigit() for p in parts): + return None + try: + if len(parts) == 3: + h, m, s = map(int, parts) + elif len(parts) == 2: + h = 0 + m, s = map(int, parts) + elif len(parts) == 1: + h = 0 + m = 0 + s = int(parts[0]) + else: + return None + except ValueError: + return None + return h * 3600 + m * 60 + s + + +def coalesce(new_val: Any, old_val: Any) -> Any: + """Return new_val if it is not None, otherwise old_val.""" + return new_val if new_val is not None else old_val + + +def build_group_update_input( + group_id: int, + existing: Dict[str, Any], + scraped: Dict[str, Any], +) -> Dict[str, Any]: + """ + Build the GroupUpdateInput payload, merging scraped data with existing. + """ + input_obj: Dict[str, Any] = {"id": str(group_id)} + + # Basic scalar fields + input_obj["name"] = coalesce(scraped.get("name"), existing.get("name")) + + # aliases: scraped may be list or string; convert list -> comma separated string + scraped_aliases = scraped.get("aliases") + if isinstance(scraped_aliases, list): + aliases_str = ", ".join(a for a in scraped_aliases if a) + else: + aliases_str = scraped_aliases + input_obj["aliases"] = coalesce(aliases_str, existing.get("aliases") or "") + + # duration: convert scraped duration string to seconds; keep existing if scrape missing + scraped_duration_seconds = seconds_from_duration(scraped.get("duration")) + if scraped_duration_seconds is not None: + input_obj["duration"] = scraped_duration_seconds + elif existing.get("duration") is not None: + input_obj["duration"] = existing.get("duration") + + input_obj["date"] = coalesce(scraped.get("date"), existing.get("date")) + + # Director + input_obj["director"] = coalesce(scraped.get("director"), existing.get("director")) + + # URLs: prefer scraped urls when non-empty + scraped_urls = scraped.get("urls") or [] + existing_urls = existing.get("urls") or [] + if scraped_urls: + input_obj["urls"] = scraped_urls + elif existing_urls: + input_obj["urls"] = existing_urls + + # Synopsis + input_obj["synopsis"] = coalesce(scraped.get("synopsis"), existing.get("synopsis")) + + # Studio: use scraped.studio.stored_id when present, else existing studio.id + existing_studio = existing.get("studio") or {} + existing_studio_id = existing_studio.get("id") + scraped_studio = scraped.get("studio") or {} + scraped_studio_id = scraped_studio.get("stored_id") + studio_id = coalesce(scraped_studio_id, existing_studio_id) + if studio_id is not None: + input_obj["studio_id"] = str(studio_id) + + # Tags: union of existing tag ids and scraped tags with stored_id, filtering nulls + existing_tags = existing.get("tags") or [] + existing_tag_ids: List[str] = [str(t.get("id")) for t in existing_tags if t.get("id") is not None] + + scraped_tags = scraped.get("tags") or [] + scraped_tag_ids: List[str] = [ + str(t.get("stored_id")) + for t in scraped_tags + if t.get("stored_id") is not None + ] + + if existing_tag_ids or scraped_tag_ids: + merged_ids: List[str] = [] + for tid in existing_tag_ids + scraped_tag_ids: + if tid not in merged_ids: + merged_ids.append(tid) + input_obj["tag_ids"] = merged_ids + + # Images: only send when we actually have scraped data URIs; otherwise omit so we + # don't overwrite existing images with null. + front_image = scraped.get("front_image") + if front_image: + input_obj["front_image"] = front_image + back_image = scraped.get("back_image") + if back_image: + input_obj["back_image"] = back_image + + return input_obj + + +def main() -> None: + fragment = load_fragment() + server = fragment.get("server_connection") or {} + client = build_graphql_client(server) + # Create StashInterface instance for consistency with other plugins, + # even though this plugin currently uses direct GraphQL requests. + _stash = StashInterface(server) + + args = fragment.get("args") or {} + + # When triggered by a hook, we get hookContext with type/id + hook_ctx = args.get("hookContext") or {} + hook_type = hook_ctx.get("type") + hook_id = hook_ctx.get("id") + + if not hook_type or not hook_id: + # Not a hook invocation – nothing to do. + exit_plugin("No hook context; skipping.") + + if hook_type not in ("Group.Create.Post", "Group.Update.Post"): + # Only act on group create/update + exit_plugin(f"Ignoring hook type {hook_type}") + + try: + group_id = int(hook_id) + except (TypeError, ValueError): + log.error(f"Invalid group id in hookContext: {hook_id!r}") + exit_plugin(err="invalid group id") + + log.debug(f"Running GroupAutoScraper for group id {group_id} ({hook_type})") + + # 1. Fetch existing group + find_group_query = """ + query FindGroup($id: ID!) { + findGroup(id: $id) { + id + name + aliases + duration + date + director + urls + synopsis + front_image_path + back_image_path + studio { + id + } + tags { + id + } + containing_groups { + group { + id + } + description + } + } + } + """ + + data = graphql_request(client, find_group_query, {"id": str(group_id)}) + group = data.get("findGroup") + if not group: + log.error(f"No group found with id {group_id}") + exit_plugin(err="group not found") + + urls = group.get("urls") or [] + if not urls: + # Nothing to scrape, but not an error + log.info(f"Group {group_id} has no URLs; nothing to do.") + exit_plugin("group has no URLs; skipped") + + target_url = urls[0] + + # Only handle AdultDVD Empire URLs + if "adultdvdempire.com/" not in target_url: + log.info("AutoGroup only uses AdultDVDEmpire URLS. Exiting.") + exit_plugin("non-AdultDVDEmpire URL; skipped") + + # 2. Scrape group URL + scrape_query = """ + query ScrapeGroupURL($url: String!) { + scrapeGroupURL(url: $url) { + name + aliases + duration + date + rating + director + urls + synopsis + front_image + back_image + studio { + stored_id + name + urls + } + tags { + stored_id + name + remote_site_id + } + } + } + """ + + scrape_data = graphql_request(client, scrape_query, {"url": target_url}) + scraped = scrape_data.get("scrapeGroupURL") + if not scraped: + log.error(f"ScrapeGroupURL returned no data for URL {target_url}") + exit_plugin(err="scrapeGroupURL returned no data") + + # 3. Build GroupUpdate input + # Compute tag additions and studio status for logging. + existing_tags = group.get("tags") or [] + existing_tag_ids = {str(t.get("id")) for t in existing_tags if t.get("id") is not None} + + scraped_tags = scraped.get("tags") or [] + scraped_tag_ids = [ + str(t.get("stored_id")) + for t in scraped_tags + if t.get("stored_id") is not None + ] + tags_added_count = sum(1 for tid in scraped_tag_ids if tid not in existing_tag_ids) + + scraped_studio = scraped.get("studio") or {} + scraped_studio_name = scraped_studio.get("name") + scraped_studio_id = scraped_studio.get("stored_id") + if scraped_studio_id is not None: + studio_msg = "set studio" + elif scraped_studio_name: + studio_msg = f"could not set studio '{scraped_studio_name}', not found in studios" + else: + studio_msg = "no studio in scrape" + + update_input = build_group_update_input(group_id, group, scraped) + + # 4. Perform GroupUpdate + update_query = """ + mutation GroupUpdate($input: GroupUpdateInput!) { + groupUpdate(input: $input) { + id + name + } + } + """ + + result = graphql_request(client, update_query, {"input": update_input}) + updated = result.get("groupUpdate") + if not updated: + log.error("GroupUpdate did not return a group") + exit_plugin(err="groupUpdate failed") + + log.info( + f"Group {updated.get('id')} '{updated.get('name')}' updated. " + f"Added {tags_added_count} tag(s), {studio_msg}." + ) + exit_plugin( + msg=f"Updated group {updated.get('id')} '{updated.get('name')}' from {target_url}" + ) + + +if __name__ == "__main__": + main() + diff --git a/plugins/GroupAutoScraper/requirements.txt b/plugins/GroupAutoScraper/requirements.txt new file mode 100644 index 00000000..bba58e16 --- /dev/null +++ b/plugins/GroupAutoScraper/requirements.txt @@ -0,0 +1,3 @@ +requests +stashapp-tools + diff --git a/plugins/GroupDetails/GroupDetails.css b/plugins/GroupDetails/GroupDetails.css new file mode 100644 index 00000000..f1f338b6 --- /dev/null +++ b/plugins/GroupDetails/GroupDetails.css @@ -0,0 +1,191 @@ +.group-card .card-popovers { + flex-wrap: wrap; +} + +/* Stacked layout: line1 = duration + scene count; line2 = resolution (so extra chips stay on their own row). */ +.group-card .card-popovers .gd-metrics-row { + display: flex; + flex-direction: column; + align-items: stretch; + width: 100%; + flex: 1 1 100%; + box-sizing: border-box; + gap: 0.12rem; + padding: 0.15rem 2px; +} + +.group-card .card-popovers .gd-metrics-line1 { + display: grid; + grid-template-columns: 1fr; + align-items: center; + width: 100%; +} + +.group-card .card-popovers .gd-metrics-line1 .scene-count { + justify-self: center; +} + +.group-card .card-popovers .gd-metrics-line2 { + display: flex; + justify-content: flex-end; + align-items: center; + width: 100%; +} + +.group-card .card-popovers .gd-res-bucket { + display: inline-flex; + align-items: center; + justify-content: center; + min-height: 1.15em; + line-height: 1; + /* Let hover hit the wrapper so the native `title` tooltip on .gd-res-bucket shows (SVG children do not inherit it). */ + cursor: help; +} + +.group-card .card-popovers .gd-res-bucket > * { + pointer-events: none; +} + +.group-card .card-popovers .gd-resolution-png { + height: 1em; + width: auto; + max-width: 2.4em; + display: block; + object-fit: contain; +} + +.group-card .card-popovers .gd-res-bucket-fallback { + font-size: 0.68rem; + font-weight: 600; + letter-spacing: 0.04em; + opacity: 0.95; +} + +.group-card .card-popovers .gd-stat { + display: inline-flex; + align-items: center; + justify-content: center; + min-width: 0; + padding: 0.2rem 0.15rem; + font-size: 0.9rem; + line-height: 1.1; + color: var(--text, #d5dbe3); + opacity: 0.95; + white-space: nowrap; +} + +.group-card .gd-date-line { + display: flex; + justify-content: space-between; + align-items: center; + width: 100%; +} + +.group-card .gd-date-text { + min-width: 0; +} + +.group-card .gd-date-duration { + margin-left: 0.5rem; + margin-right: 0; + margin-inline-start: 0.5rem; + margin-inline-end: 0; + padding: 0; + justify-content: flex-end; + font-size: inherit; + line-height: inherit; + font-weight: inherit; + color: inherit; + opacity: inherit; + cursor: help; +} + +.group-card .card-popovers .gd-performer-count { + position: relative; +} + +.group-card, +.group-card .card-popovers { + overflow: visible; +} + +.group-card .card-popovers .gd-performer-count .btn { + pointer-events: none; +} + +.group-card .card-popovers .gd-performer-popover { + position: absolute; + left: 50%; + top: calc(100% + 0.35rem); + transform: translate(-50%, 0.2rem); + z-index: 40; + display: block; + width: max-content; + max-width: min(32rem, calc(100vw - 1.5rem)); + max-height: 360px; + overflow: visible; + opacity: 0; + visibility: hidden; + pointer-events: none; + transition: opacity 180ms ease, transform 180ms ease, visibility 0s linear 180ms; +} + +.group-card .card-popovers .gd-performer-count.gd-open .gd-performer-popover { + opacity: 1; + visibility: visible; + pointer-events: auto; + transform: translate(-50%, 0); + transition-delay: 0ms; +} + +.group-card .card-popovers .gd-performer-popover .popover-body { + display: flex; + flex-wrap: wrap; + justify-content: center; + align-content: flex-start; + gap: 0.45rem; + overflow: auto; + max-height: 360px; + padding: 0.55rem 0.6rem 0.55rem; +} + +.group-card .card-popovers .gd-performer-image { + width: 96px; + height: 120px; + border-radius: 0.25rem; + object-fit: cover; + object-position: top; + background: #324252; +} + +.group-card .card-popovers .gd-performer-item { + display: inline-flex; + flex-direction: column; + align-items: center; + text-align: center; + width: 96px; + margin: 0; +} + +.group-card .card-popovers .gd-performer-item .gd-performer-thumb { + display: block; + width: 96px; + text-decoration: none; +} + +.group-card .card-popovers .gd-performer-name { + display: inline-block; + max-width: 100%; + margin: 0.2rem auto 0; + padding: 0.06rem 0.42rem; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + font-size: 0.76rem; + line-height: 1.2; + text-align: center; + border-radius: 0.25rem; + background: #d7e4f1; + color: #1a2330; + text-decoration: none; +} diff --git a/plugins/GroupDetails/GroupDetails.js b/plugins/GroupDetails/GroupDetails.js new file mode 100644 index 00000000..0deab901 --- /dev/null +++ b/plugins/GroupDetails/GroupDetails.js @@ -0,0 +1,759 @@ +"use strict"; + +(function () { + var ROOT_ID = "root"; + var ROUTE_PREFIX = "/groups"; + var PLUGIN_ID = "GroupDetails"; + var GROUP_METRICS_QUERY = + "query GroupDetailsMetrics($id: ID!) {" + + " findGroup(id: $id) {" + + " id " + + " scenes { " + + " id " + + " title " + + " files { duration height } " + + " performers { id name image_path } " + + " groups { group { id } scene_index } " + + " } " + + " }" + + "}"; + + var state = { + observer: null, + attachedRoot: null, + retryTimer: null, + applyingDomEnhancements: false, + cacheByGroupId: new Map(), + inFlightByGroupId: new Map(), + includeAllScenes: false, + includePerformers: false, + tooltipDelegateBound: false, + }; + + async function gql(query, variables) { + var res = await fetch("/graphql", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ query: query, variables: variables || {} }), + }); + var j = await res.json(); + if (j.errors && j.errors.length) { + throw new Error( + j.errors.map(function (e) { + return e.message; + }).join("; ") + ); + } + return j.data; + } + + function routeMatches() { + var p = window.location.pathname || ""; + return p === ROUTE_PREFIX || p.indexOf(ROUTE_PREFIX + "/") === 0; + } + + function parseGroupIdFromHref(href) { + if (!href) return null; + var match = String(href).match(/\/groups\/(\d+)/); + return match ? String(match[1]) : null; + } + + function parseGroupIdFromCard(card) { + if (!card) return null; + var header = card.querySelector("a.group-card-header"); + if (header && header.getAttribute("href")) { + return parseGroupIdFromHref(header.getAttribute("href")); + } + var firstLink = card.querySelector("a[href*='/groups/']"); + if (firstLink && firstLink.getAttribute("href")) { + return parseGroupIdFromHref(firstLink.getAttribute("href")); + } + return null; + } + + function sceneIndexForGroup(scene, groupId) { + var groups = (scene && scene.groups) || []; + var gid = String(groupId); + for (var i = 0; i < groups.length; i++) { + var g = groups[i]; + if (g && g.group && String(g.group.id) === gid) return g.scene_index; + } + return undefined; + } + + function isEligibleSceneIndex(idx) { + if (idx == null) return true; + var n = Number(idx); + return Number.isFinite(n) && n >= 0 && n <= 89; + } + + function readBoolSetting(raw, fallback) { + if (raw === true || raw === "true") return true; + if (raw === false || raw === "false") return false; + return fallback; + } + + async function loadPluginSettings() { + try { + var data = await gql("query GdCfg { configuration { plugins } }"); + var plug = data.configuration && data.configuration.plugins; + var cfg = null; + if (plug && typeof plug === "object") { + cfg = plug[PLUGIN_ID] || null; + if (!cfg) { + var k = Object.keys(plug).find(function (key) { + return String(key).toLowerCase() === String(PLUGIN_ID).toLowerCase(); + }); + if (k) cfg = plug[k]; + } + } + var next = false; + var nextPerformers = false; + if (cfg && typeof cfg === "object") { + next = readBoolSetting(cfg.includeAllScenes, false); + nextPerformers = readBoolSetting(cfg.includePerformers, false); + } + if ( + next !== state.includeAllScenes || + nextPerformers !== state.includePerformers + ) { + state.includeAllScenes = next; + state.includePerformers = nextPerformers; + state.cacheByGroupId.clear(); + state.inFlightByGroupId.clear(); + } + } catch (e) { + state.includeAllScenes = false; + state.includePerformers = false; + } + } + + function metricsCacheKey(groupId) { + return ( + String(groupId) + + ":" + + (state.includeAllScenes ? "1" : "0") + + ":" + + (state.includePerformers ? "1" : "0") + ); + } + + function getSceneDurationSeconds(scene) { + var files = (scene && scene.files) || []; + var maxDur = 0; + for (var i = 0; i < files.length; i++) { + var dur = Number(files[i] && files[i].duration); + if (Number.isFinite(dur) && dur > maxDur) maxDur = dur; + } + return maxDur; + } + + function getSceneVerticalPixels(scene) { + var files = (scene && scene.files) || []; + var maxHeight = 0; + for (var i = 0; i < files.length; i++) { + var h = Number(files[i] && files[i].height); + if (Number.isFinite(h) && h > maxHeight) maxHeight = h; + } + return maxHeight; + } + + function formatDuration(totalSeconds) { + var s = Math.max(0, Math.round(Number(totalSeconds) || 0)); + var hrs = Math.floor(s / 3600); + var mins = Math.floor((s % 3600) / 60); + var secs = s % 60; + return hrs + ":" + String(mins).padStart(2, "0") + ":" + String(secs).padStart(2, "0"); + } + + function formatSceneTooltipLine(sceneIndex, title, durationSec) { + var t = String(title || "").replace(/\s+/g, " ").trim(); + if (!t) t = "(no title)"; + var hasIndex = !(sceneIndex == null || sceneIndex === ""); + if (hasIndex) return String(sceneIndex) + ". " + t + " " + formatDuration(durationSec); + return t + " " + formatDuration(durationSec); + } + + /** For ordering only: missing index sorts like 90. */ + function sceneIndexSortKey(idx) { + if (idx == null || idx === "") return 90; + var n = Number(idx); + return Number.isFinite(n) ? n : 90; + } + + function computeMetrics(groupId, scenes, includeAllScenes) { + var totalDurationSec = 0; + var verticalSum = 0; + var verticalCount = 0; + var totalFileCount = 0; + var list = scenes || []; + var applySceneIndexFilter = + !includeAllScenes && list.length !== 1; + var rows = []; + + for (var i = 0; i < list.length; i++) { + var scene = list[i]; + var idx = sceneIndexForGroup(scene, groupId); + if (applySceneIndexFilter && !isEligibleSceneIndex(idx)) continue; + var duration = getSceneDurationSeconds(scene); + rows.push({ scene: scene, idx: idx, duration: duration }); + } + + rows.sort(function (a, b) { + var ka = sceneIndexSortKey(a.idx); + var kb = sceneIndexSortKey(b.idx); + if (ka !== kb) return ka - kb; + var da = Number(a.duration) || 0; + var db = Number(b.duration) || 0; + if (db !== da) return db - da; + var ida = String((a.scene && a.scene.id) || ""); + var idb = String((b.scene && b.scene.id) || ""); + return ida < idb ? -1 : ida > idb ? 1 : 0; + }); + + for (var fc = 0; fc < rows.length; fc++) { + var filesForCount = (rows[fc].scene && rows[fc].scene.files) || []; + totalFileCount += filesForCount.length; + } + var bypassDurationFilterForResolution = totalFileCount === 1; + + var durationLines = []; + var performerById = new Map(); + for (var j = 0; j < rows.length; j++) { + var row = rows[j]; + var scene = row.scene; + var idx = row.idx; + var duration = row.duration; + totalDurationSec += duration; + durationLines.push( + formatSceneTooltipLine(idx, scene && scene.title, duration) + ); + + if (bypassDurationFilterForResolution || duration > 360) { + var height = getSceneVerticalPixels(scene); + if (height > 0) { + verticalSum += height; + verticalCount += 1; + } + } + + var perfs = (scene && scene.performers) || []; + for (var p = 0; p < perfs.length; p++) { + var perf = perfs[p]; + if (!perf) continue; + var pid = String(perf.id || perf.name || ""); + if (!pid) continue; + if (!performerById.has(pid)) { + performerById.set(pid, { + id: String(perf.id || ""), + name: String(perf.name || "").trim() || "(unknown performer)", + imagePath: String(perf.image_path || ""), + }); + } + } + } + + var durationHeader = + "Scenes in total duration:\n"; + var durationTooltip = + durationLines.length > 0 + ? durationHeader + durationLines.join("\n") + : "No eligible scenes for total duration."; + var avgPx = + verticalCount > 0 ? Math.round(verticalSum / verticalCount) : null; + var resolutionTooltip = + avgPx == null || verticalCount < 1 + ? "Resolution Average: \u2014" + : "Resolution Average: " + avgPx + "p"; + + return { + totalDurationSec: Math.round(totalDurationSec), + averageVerticalPixels: avgPx, + verticalSampleCount: verticalCount, + totalFileCount: totalFileCount, + durationTooltip: durationTooltip, + resolutionTooltip: resolutionTooltip, + performers: Array.from(performerById.values()).sort(function (a, b) { + return a.name.localeCompare(b.name); + }), + }; + } + + var RESOLUTION_PNG_LADDER = [ + { value: 4320, label: "8k", file: "8k.png" }, + { value: 3160, label: "6k", file: "6k.png" }, + { value: 2880, label: "5k", file: "5k.png" }, + { value: 2160, label: "4k", file: "4k.png" }, + { value: 1440, label: "2k", file: "2k.png" }, + { value: 1080, label: "1080p", file: "1080p.png" }, + { value: 720, label: "720p", file: "720p.png" }, + { value: 480, label: "480p", file: "480p.png" }, + { value: 360, label: "360p", file: "360p.png" }, + { value: 240, label: "240p", file: "240p.png" }, + ]; + var LOWEST_RESOLUTION_PNG = { value: 144, label: "144p", file: "144p.png" }; + var RESOLUTION_MATCH_RATIO = 0.98; // 2% tolerance + var LOWEST_RESOLUTION_CUTOFF = 234; + + function pickResolutionPngSpec(avgHeightPx) { + var h = Math.round(Number(avgHeightPx) || 0); + if (!Number.isFinite(h) || h <= 0) return null; + if (h < LOWEST_RESOLUTION_CUTOFF) return LOWEST_RESOLUTION_PNG; + for (var i = 0; i < RESOLUTION_PNG_LADDER.length; i++) { + var spec = RESOLUTION_PNG_LADDER[i]; + if (h >= Math.round(spec.value * RESOLUTION_MATCH_RATIO)) return spec; + } + return LOWEST_RESOLUTION_PNG; + } + + function getEmbeddedResolutionImage(fileName) { + var map = + typeof window !== "undefined" && window.GroupDetailsImages + ? window.GroupDetailsImages + : null; + if (!map) return ""; + var key = String(fileName || ""); + var uri = map[key]; + return typeof uri === "string" ? uri : ""; + } + + function createResolutionPng(spec) { + if (!spec || !spec.file) return null; + var src = getEmbeddedResolutionImage(spec.file); + if (!src) return null; + var img = document.createElement("img"); + img.className = "gd-resolution-png"; + img.alt = spec.label; + img.setAttribute("aria-hidden", "true"); + img.src = src; + return img; + } + + function buildResolutionBucket(id, avgPixels, resolutionTooltip, totalFileCount) { + var wrap = document.createElement("span"); + wrap.id = id; + wrap.className = "gd-stat gd-res-bucket"; + wrap.setAttribute("role", "img"); + var tip = resolutionTooltip || ""; + var h = avgPixels == null ? NaN : Math.round(Number(avgPixels)); + if (!Number.isFinite(h) || h <= 0) { + if ((Number(totalFileCount) || 0) > 1) { + wrap.textContent = "\u2014"; + applySceneListTooltip(wrap, tip); + } else { + // No files (or only one file with no usable height): render nothing. + wrap.textContent = ""; + applySceneListTooltip(wrap, ""); + } + return wrap; + } + var spec = pickResolutionPngSpec(h); + if (spec) { + wrap.setAttribute("data-gd-resolution-tier", spec.label); + } + var img = createResolutionPng(spec); + if (img) wrap.appendChild(img); + else { + var fb = document.createElement("span"); + fb.className = "gd-res-bucket-fallback"; + fb.textContent = h + "p"; + wrap.appendChild(fb); + } + applySceneListTooltip(wrap, tip); + return wrap; + } + + function normalizePerformerImageUrl(path) { + var p = String(path || ""); + if (!p) return ""; + if (/^https?:\/\//i.test(p)) return p; + if (p.indexOf("/") === 0) return p; + return "/" + p; + } + + function wirePerformerPopoverHover(trigger, popover) { + if (!trigger || !popover) return; + var enterDelayMs = 200; + var leaveDelayMs = 200; + var enterTimer = null; + var leaveTimer = null; + + function openSoon() { + if (leaveTimer) { + clearTimeout(leaveTimer); + leaveTimer = null; + } + if (trigger.classList.contains("gd-open")) return; + if (enterTimer) clearTimeout(enterTimer); + enterTimer = setTimeout(function () { + trigger.classList.add("gd-open"); + enterTimer = null; + }, enterDelayMs); + } + + function closeSoon() { + if (enterTimer) { + clearTimeout(enterTimer); + enterTimer = null; + } + if (leaveTimer) clearTimeout(leaveTimer); + leaveTimer = setTimeout(function () { + trigger.classList.remove("gd-open"); + leaveTimer = null; + }, leaveDelayMs); + } + + function onEnter() { + openSoon(); + } + + function onLeave() { + closeSoon(); + } + + trigger.addEventListener("mouseenter", onEnter); + trigger.addEventListener("mouseleave", onLeave); + popover.addEventListener("mouseenter", onEnter); + popover.addEventListener("mouseleave", onLeave); + trigger.addEventListener("focusin", onEnter); + trigger.addEventListener("focusout", onLeave); + } + + function buildPerformerChip(id, performers) { + var list = Array.isArray(performers) ? performers : []; + if (list.length === 0) return null; + + var wrap = document.createElement("span"); + wrap.id = id; + wrap.className = "gd-performer-count performer-count"; + wrap.setAttribute("role", "img"); + wrap.setAttribute("aria-label", "Performers: " + list.length); + + var btn = document.createElement("button"); + btn.type = "button"; + btn.className = "minimal btn btn-primary"; + btn.tabIndex = -1; + btn.setAttribute("aria-hidden", "true"); + + var icon = document.createElementNS("http://www.w3.org/2000/svg", "svg"); + icon.setAttribute("aria-hidden", "true"); + icon.setAttribute("focusable", "false"); + icon.setAttribute("data-prefix", "fas"); + icon.setAttribute("data-icon", "user"); + icon.setAttribute("class", "svg-inline--fa fa-user fa-icon"); + icon.setAttribute("role", "img"); + icon.setAttribute("viewBox", "0 0 448 512"); + var path = document.createElementNS("http://www.w3.org/2000/svg", "path"); + path.setAttribute( + "d", + "M224 256A128 128 0 1 0 224 0a128 128 0 1 0 0 256zm-45.7 48C79.8 304 0 383.8 0 482.3C0 498.7 13.3 512 29.7 512l388.6 0c16.4 0 29.7-13.3 29.7-29.7C448 383.8 368.2 304 269.7 304l-91.4 0z" + ); + path.setAttribute("fill", "currentColor"); + icon.appendChild(path); + + var count = document.createElement("span"); + count.textContent = String(list.length); + + btn.appendChild(icon); + btn.appendChild(count); + wrap.appendChild(btn); + + var pop = document.createElement("div"); + pop.className = "gd-performer-popover popover bs-popover-bottom hover-popover-content"; + var arrow = document.createElement("div"); + arrow.className = "arrow"; + pop.appendChild(arrow); + var body = document.createElement("div"); + body.className = "popover-body"; + for (var i = 0; i < list.length; i++) { + var perf = list[i]; + var item = document.createElement("div"); + item.className = "gd-performer-item"; + var thumbLink = document.createElement(perf.id ? "a" : "div"); + thumbLink.className = "gd-performer-thumb"; + if (perf.id) thumbLink.href = "/performers/" + encodeURIComponent(String(perf.id)); + var img = document.createElement("img"); + img.className = "gd-performer-image image-thumbnail"; + img.alt = perf.name; + var src = normalizePerformerImageUrl(perf.imagePath); + if (src) img.src = src; + else img.src = "/images/wall-item/performer"; + thumbLink.appendChild(img); + + var name = document.createElement(perf.id ? "a" : "span"); + name.className = "gd-performer-name d-block"; + name.textContent = perf.name; + if (perf.id) name.href = "/performers/" + encodeURIComponent(String(perf.id)); + item.appendChild(thumbLink); + item.appendChild(name); + body.appendChild(item); + } + pop.appendChild(body); + wrap.appendChild(pop); + wirePerformerPopoverHover(wrap, pop); + return wrap; + } + + async function fetchMetricsForGroup(groupId) { + var data = await gql(GROUP_METRICS_QUERY, { id: String(groupId) }); + var group = data && data.findGroup; + return computeMetrics( + groupId, + (group && group.scenes) || [], + state.includeAllScenes + ); + } + + async function getMetricsForGroup(groupId) { + if (!groupId) return null; + var gid = String(groupId); + var key = metricsCacheKey(gid); + if (state.cacheByGroupId.has(key)) return state.cacheByGroupId.get(key); + if (state.inFlightByGroupId.has(key)) return state.inFlightByGroupId.get(key); + + var p = fetchMetricsForGroup(gid) + .then(function (metrics) { + state.cacheByGroupId.set(key, metrics); + state.inFlightByGroupId.delete(key); + return metrics; + }) + .catch(function (e) { + state.inFlightByGroupId.delete(key); + throw e; + }); + state.inFlightByGroupId.set(key, p); + return p; + } + + function applySceneListTooltip(el, tip) { + var s = tip == null ? "" : String(tip); + if (s.length > 0) { + el.setAttribute("data-gd-full-title", s); + el.setAttribute("title", s); + } else { + el.removeAttribute("data-gd-full-title"); + el.removeAttribute("title"); + } + } + + function ensureTooltipRefreshDelegate() { + if (state.tooltipDelegateBound) return; + state.tooltipDelegateBound = true; + document.addEventListener( + "pointerenter", + function (ev) { + var raw = ev.target; + var el = raw && raw.nodeType === 1 ? raw : raw && raw.parentElement; + if (!el || !el.closest) return; + var host = el.closest("[data-gd-full-title]"); + if (!host) return; + var full = host.getAttribute("data-gd-full-title"); + if (full != null) host.setAttribute("title", full); + }, + true + ); + } + + function buildStatNode(id, text, title) { + var el = document.createElement("span"); + el.id = id; + el.className = "gd-stat"; + el.textContent = text; + applySceneListTooltip(el, title); + return el; + } + + function findDateLineInCard(card) { + if (!card || !card.querySelectorAll) return null; + var re = /^\d{4}-\d{2}-\d{2}$/; + var nodes = card.querySelectorAll("time, small, span, div, p"); + for (var i = 0; i < nodes.length; i++) { + var el = nodes[i]; + if (!el || !el.textContent) continue; + if (el.closest && el.closest(".card-popovers")) continue; + if (el.querySelector && el.querySelector(".gd-date-duration")) continue; + var raw = String(el.textContent || "").trim(); + if (re.test(raw)) return el; + } + return null; + } + + function mountDurationOnDateLine(card, durationNode) { + if (!card || !durationNode) return false; + var old = card.querySelectorAll(".gd-date-duration"); + for (var i = 0; i < old.length; i++) { + if (old[i] && old[i].parentNode) old[i].parentNode.removeChild(old[i]); + } + var line = findDateLineInCard(card); + if (!line) return false; + + line.classList.add("gd-date-line"); + var textSpan = line.querySelector(".gd-date-text"); + if (!textSpan) { + var original = String(line.textContent || "").trim(); + line.textContent = ""; + textSpan = document.createElement("span"); + textSpan.className = "gd-date-text"; + textSpan.textContent = original; + line.appendChild(textSpan); + } + durationNode.classList.add("gd-date-duration"); + line.appendChild(durationNode); + return true; + } + + function injectMetricsIntoCard(card, metrics) { + if (!card || !metrics) return; + var popovers = card.querySelector(".card-popovers"); + if (!popovers) return; + var sceneCount = popovers.querySelector(".scene-count"); + if (!sceneCount) return; + + var oldRow = popovers.querySelector(".gd-metrics-row"); + if (oldRow && oldRow.parentNode) oldRow.parentNode.removeChild(oldRow); + var oldRight = popovers.querySelector(".gd-stat-right"); + if (oldRight && oldRight.parentNode) oldRight.parentNode.removeChild(oldRight); + var oldPerf = popovers.querySelector(".gd-performer-count"); + if (oldPerf && oldPerf.parentNode) oldPerf.parentNode.removeChild(oldPerf); + + var durationNode = buildStatNode( + "gd-stat-duration-" + Date.now(), + formatDuration(metrics.totalDurationSec), + metrics.durationTooltip || "" + ); + durationNode.classList.add("gd-duration"); + + var resolutionNode = buildResolutionBucket( + "gd-stat-right-" + Date.now(), + metrics.averageVerticalPixels, + metrics.resolutionTooltip || "", + metrics.totalFileCount + ); + resolutionNode.classList.add("gd-stat-right"); + resolutionNode.classList.add("chip"); + if (state.includePerformers) { + var performerNode = buildPerformerChip( + "gd-performer-" + Date.now(), + metrics.performers + ); + if (performerNode) popovers.appendChild(performerNode); + } + popovers.appendChild(resolutionNode); + + mountDurationOnDateLine(card, durationNode); + } + + async function decorateGroupCard(card) { + var groupId = parseGroupIdFromCard(card); + if (!groupId) return; + var metrics = await getMetricsForGroup(groupId); + injectMetricsIntoCard(card, metrics); + } + + function applyDomEnhancements() { + if (state.applyingDomEnhancements) return; + state.applyingDomEnhancements = true; + + var cards = Array.prototype.slice.call( + document.querySelectorAll("div.group-card") + ); + Promise.all( + cards.map(function (card) { + return decorateGroupCard(card).catch(function () { + // Ignore per-card failures so one bad response does not block others. + }); + }) + ).finally(function () { + state.applyingDomEnhancements = false; + }); + } + + function detachObserver() { + if (state.observer) { + state.observer.disconnect(); + state.observer = null; + } + state.attachedRoot = null; + } + + function clearRetryTimer() { + if (state.retryTimer) { + clearInterval(state.retryTimer); + state.retryTimer = null; + } + } + + function attach() { + if (!routeMatches()) { + detachObserver(); + return false; + } + ensureTooltipRefreshDelegate(); + var root = document.getElementById(ROOT_ID); + if (!root) return false; + + if (state.attachedRoot === root && state.observer) { + loadPluginSettings() + .then(function () {}) + .catch(function () {}) + .finally(function () { + applyDomEnhancements(); + }); + return true; + } + + detachObserver(); + state.cacheByGroupId.clear(); + state.inFlightByGroupId.clear(); + + var obs = new MutationObserver(function () { + applyDomEnhancements(); + }); + obs.observe(root, { childList: true, subtree: true }); + state.observer = obs; + state.attachedRoot = root; + + loadPluginSettings() + .then(function () {}) + .catch(function () {}) + .finally(function () { + applyDomEnhancements(); + }); + return true; + } + + function scheduleAttachRetries() { + clearRetryTimer(); + state.retryTimer = setInterval(function () { + try { + if (!routeMatches()) { + detachObserver(); + return; + } + if (attach()) clearRetryTimer(); + } catch (e) { + // Ignore transient route/render timing errors. + } + }, 500); + setTimeout(clearRetryTimer, 60000); + } + + if (document.readyState === "loading") { + document.addEventListener("DOMContentLoaded", function () { + attach(); + scheduleAttachRetries(); + }); + } else { + attach(); + scheduleAttachRetries(); + } + + window.addEventListener("popstate", function () { + attach(); + scheduleAttachRetries(); + }); + window.addEventListener("hashchange", function () { + attach(); + scheduleAttachRetries(); + }); +})(); diff --git a/plugins/GroupDetails/GroupDetails.yml b/plugins/GroupDetails/GroupDetails.yml new file mode 100644 index 00000000..fe7ee0c2 --- /dev/null +++ b/plugins/GroupDetails/GroupDetails.yml @@ -0,0 +1,25 @@ +name: Group Details +description: Adds group-card metrics for filtered duration and average vertical resolution. +version: 0.2.3 +url: https://discourse.stashapp.cc/t/group-details/6819 +ui: + javascript: + - images.js + - GroupDetails.js + css: + - GroupDetails.css + +settings: + includeAllScenes: + displayName: Include all scenes + description: >- + When enabled, duration and average resolution include every scene linked to the group. + When disabled (default), scenes are limited to those with scene_index null or 0..89 for this group, + except when the group has only one scene (that scene is always included). + type: BOOLEAN + includePerformers: + displayName: Include performers + description: >- + When enabled, appends a performer-count chip to group cards and shows a performer image/name popover + built from the union of performers across included scenes in the group. + type: BOOLEAN diff --git a/plugins/GroupDetails/README.md b/plugins/GroupDetails/README.md new file mode 100644 index 00000000..8473de6f --- /dev/null +++ b/plugins/GroupDetails/README.md @@ -0,0 +1,127 @@ +# Group Details + +https://discourse.stashapp.cc/t/group-details/6819 + +`Group Details` is a UI plugin for Stash group card. + +## Screenshot + +![Group Details screenshot](./screenshot.png) + +## What It Adds + +- **Date line:** appends total duration (`H:MM:SS`) to the right side of the date row. +- **Chip list:** appends a resolution chip (PNG badge) to the end of `.card-popovers`. +- **Performer chip (optional):** when enabled, appends a performer-count chip with a hover popover. +- **Tooltips/popovers:** duration and resolution expose native `title` tooltips; performer chip opens a delayed hover popover. + +## Data Source + +Metrics are computed in-browser from GraphQL `findGroup` scene data (`id`, `title`, `files { duration height }`, `performers { id name image_path }`, `groups { group { id } scene_index }`). + +## Scene Filtering + +When **Include all scenes** is disabled (default), scenes are included only if `scene_index` is: + +- `null`, or +- an integer in `0..89` + +Exception: if the group has exactly **one scene**, scene-index filtering is bypassed for that group. + +When **Include all scenes** is enabled, all returned scenes are included regardless of `scene_index`. + +![Group Details Settings screenshot](./details.png) + +## Performer Metric (Optional) + +When **Include performers** is enabled: + +- The plugin builds a union of performers across all scenes included by the same filtering rules used for duration/resolution. +- A performer-count chip (`user` icon + count) is appended to `.card-popovers`. +- Hovering the chip opens a performer drawer styled to match Stash scene-card behavior: + - centered popover aligned to the chip + - fixed-size performer tiles with image + name badge + - centered wrapping rows (including centered final row) +- Hover behavior uses delayed open/close timing (`~200ms` enter and leave) and fade transitions to mimic native feel. + +## Sorting + +Duration tooltip scene lines are sorted by: + +1. `scene_index` ascending (`null` sorts as `90`) +2. duration descending +3. scene `id` ascending (stable tie-break) + +## Duration Metric + +- Uses each included scene's **max file duration**. +- Card value is total duration displayed as `H:MM:SS`. +- Tooltip lists every included scene as: + - `N. Title H:MM:SS` when `scene_index` is present + - `Title H:MM:SS` when `scene_index` is null + +## Resolution Metric + +Average resolution uses vertical pixels (`height`) from each included scene's tallest file: + +- For groups with **exactly one total file**, the duration gate is bypassed. +- Otherwise, only scenes with `duration > 360` are eligible. +- Resolution average is `round(sum(height) / count)`. +- Tooltip format is: + - `Resolution Average: p` + - or `Resolution Average: —` when no eligible average exists. + +Resolution chip empty/dash behavior: + +- If there are no files (or single-file case with unusable height): render nothing. +- If `totalFileCount > 1` and no eligible average: render `—`. + +## Resolution Badge Mapping + +The plugin picks a PNG badge using a 2% tolerance (`>= 98%` of target resolution): + +- `< 234` -> `144p.png` +- then highest match from: + - `240`, `360`, `480`, `720`, `1080`, `1440 (2k)`, `2160 (4k)`, `2880 (5k)`, `3160 (6k)`, `4320 (8k)` + +## Assets And Build + +Badges are authored as PNG files in `assets/` and embedded into `images.js` as base64 data URIs. The image.js provides base64 data back to the plugin and users can ultimately swap a logo on their plugin for further customiation if desired. +`images.js` is generated output and should not be hand-edited. + +- Source files: `plugins/GroupDetails/assets/*.png` +- Generated file: `plugins/GroupDetails/images.js` +- Build script: `plugins/GroupDetails/build.sh` + +### Regenerating `images.js` + +From `plugins/GroupDetails/`: + +```bash +bash build.sh +``` + +What `build.sh` does: + +- validates that `assets/` exists and contains at least one `*.png` +- reads `assets/*.png` +- sorts filenames deterministically (stable diffs) +- rewrites `images.js` with `window.GroupDetailsImages = { ... }` + +Commit both: + +- the changed PNG files in `assets/` +- the regenerated `images.js` + +## Updates Not Showing? + +After editing plugin files, perform a **full page reload** (F5 / Ctrl+Shift+R). In-app navigation can keep an older script in memory. + +## Gen AI Assisted Plugin Authorship +This plugin was generated with the help of Generative AI (Cursor). + +Per the draft guidelines of [#678] +- ✅ LLM use is openly disclosed. +- ✅ Code is reviewed by a human. +- ✅ Human testing and validation was performed. +- ✅ You take full responsibility for the code (including license compliance). \ No newline at end of file diff --git a/plugins/GroupDetails/assets/.gitkeep b/plugins/GroupDetails/assets/.gitkeep new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/plugins/GroupDetails/assets/.gitkeep @@ -0,0 +1 @@ + diff --git a/plugins/GroupDetails/assets/1080p.png b/plugins/GroupDetails/assets/1080p.png new file mode 100644 index 00000000..cd5f49b1 Binary files /dev/null and b/plugins/GroupDetails/assets/1080p.png differ diff --git a/plugins/GroupDetails/assets/144p.png b/plugins/GroupDetails/assets/144p.png new file mode 100644 index 00000000..25b98c49 Binary files /dev/null and b/plugins/GroupDetails/assets/144p.png differ diff --git a/plugins/GroupDetails/assets/240p.png b/plugins/GroupDetails/assets/240p.png new file mode 100644 index 00000000..23582435 Binary files /dev/null and b/plugins/GroupDetails/assets/240p.png differ diff --git a/plugins/GroupDetails/assets/2k.png b/plugins/GroupDetails/assets/2k.png new file mode 100644 index 00000000..9a413062 Binary files /dev/null and b/plugins/GroupDetails/assets/2k.png differ diff --git a/plugins/GroupDetails/assets/360p.png b/plugins/GroupDetails/assets/360p.png new file mode 100644 index 00000000..98fcb7a3 Binary files /dev/null and b/plugins/GroupDetails/assets/360p.png differ diff --git a/plugins/GroupDetails/assets/480p.png b/plugins/GroupDetails/assets/480p.png new file mode 100644 index 00000000..e752e90d Binary files /dev/null and b/plugins/GroupDetails/assets/480p.png differ diff --git a/plugins/GroupDetails/assets/4k.png b/plugins/GroupDetails/assets/4k.png new file mode 100644 index 00000000..fda73dcd Binary files /dev/null and b/plugins/GroupDetails/assets/4k.png differ diff --git a/plugins/GroupDetails/assets/5k.png b/plugins/GroupDetails/assets/5k.png new file mode 100644 index 00000000..9e566bcc Binary files /dev/null and b/plugins/GroupDetails/assets/5k.png differ diff --git a/plugins/GroupDetails/assets/6k.png b/plugins/GroupDetails/assets/6k.png new file mode 100644 index 00000000..bf8dbb64 Binary files /dev/null and b/plugins/GroupDetails/assets/6k.png differ diff --git a/plugins/GroupDetails/assets/720p.png b/plugins/GroupDetails/assets/720p.png new file mode 100644 index 00000000..6576a7d5 Binary files /dev/null and b/plugins/GroupDetails/assets/720p.png differ diff --git a/plugins/GroupDetails/assets/8k.png b/plugins/GroupDetails/assets/8k.png new file mode 100644 index 00000000..e803c4b9 Binary files /dev/null and b/plugins/GroupDetails/assets/8k.png differ diff --git a/plugins/GroupDetails/build.sh b/plugins/GroupDetails/build.sh new file mode 100644 index 00000000..d35a0dea --- /dev/null +++ b/plugins/GroupDetails/build.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ASSET_DIR="${SCRIPT_DIR}/assets" +OUT_FILE="${SCRIPT_DIR}/images.js" + +if [[ ! -d "${ASSET_DIR}" ]]; then + echo "error: assets directory not found: ${ASSET_DIR}" >&2 + echo "create it and add PNG files, then run this script again." >&2 + exit 1 +fi + +shopt -s nullglob +pngs=("${ASSET_DIR}"/*.png) +shopt -u nullglob + +if [[ ${#pngs[@]} -eq 0 ]]; then + echo "error: no PNG files found in ${ASSET_DIR}" >&2 + exit 1 +fi + +python3 - "${ASSET_DIR}" "${OUT_FILE}" <<'PY' +import base64 +import pathlib +import sys + +asset_dir = pathlib.Path(sys.argv[1]) +out_file = pathlib.Path(sys.argv[2]) + +# Deterministic ordering for clean diffs. +files = sorted(asset_dir.glob("*.png"), key=lambda p: p.name.lower()) + +lines = [ + '"use strict";', + "(function () {", + " var MAP = {", +] + +for p in files: + b64 = base64.b64encode(p.read_bytes()).decode("ascii") + lines.append(f' "{p.name}": "data:image/png;base64,{b64}",') + +lines.extend([ + " };", + " window.GroupDetailsImages = MAP;", + "})();", + "", +]) + +out_file.write_text("\n".join(lines), encoding="utf-8") +print(f"wrote {out_file} with {len(files)} image(s)") +PY diff --git a/plugins/GroupDetails/details.png b/plugins/GroupDetails/details.png new file mode 100644 index 00000000..3f474d6c Binary files /dev/null and b/plugins/GroupDetails/details.png differ diff --git a/plugins/GroupDetails/images.js b/plugins/GroupDetails/images.js new file mode 100644 index 00000000..e0716eca --- /dev/null +++ b/plugins/GroupDetails/images.js @@ -0,0 +1,17 @@ +"use strict"; +(function () { + var MAP = { + "1080p.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAIfElEQVR42u1de1BTVx7+EiIRNDjloUIVQtgiL3koojJFqVa0rNQ3VbHWcRxWULbLstq6totV8dVaS6WAaLXjjO2qSO0qEkDqqms71bauIuKDEBJoWUlgizwSIDH7z+og3HNJeIWbnO+/nO/87j2/8/vO+2QuQGHV4PWUYYz7BAOtJu7ikfI+r1cCoIG3DiHwjAl+5YMyWoMchMTbv0cR8NiCTwNveULoKgIeU/Bp4C1bCJ1FwKdjvvWhc6z5XUna+i0XTLHl09Zv3b0An7Z+6+4F+LRKrBtUAFQAFFQAFFQAFFQAFFYIARcLrVKpUFhcgosl30KhUKJOpYZWq4WzkxNcXJwxbWoY5ka9itDJk8Dj8Xr9HmV1DaSFRbj571t4WCGDWqVGc0sLdLoO2NnZw0EkgqenGD4TvDHrlUiET58GgcBmSPvUFbzOmwKm7gNoNBpUyqtQIZNBJpNDJqtEhUyGSnkVNBoNo80fNyZi81+Se1VYjUaDTzOykJ1zBB0dHT3mDwkOwu607Qjw9zNtrSyXI23XPhQWXzTJztV1LDalJCN26eIh51NndD4T6JMA4lavxeUrV02y6a0A6upUiF2xChWySpPsbGxssGfXDqx4Y5lR+a999z3WxSeiqbm51xX8xrIl+HDvLvD5/CHhE5sAODEHUNfXY9nyOJMrCgD0ej02v7sVp8/kGSWy+ISkPgUfAE6ePoNDh48OCZ8sYhK49f1tkFXKe21vMBiwZWsqlMpq1nyZ2TlobGzslzKnf5qBtrY2s/vEeQFc+udl5F+QMnK2trZIff+vuHnjOzwsv428019hUkgwY16tVost76WyvquwqJjIicUeOJR58Nm7iqXnsHTJImL+5pYW/Ova92b3aUBXAfb2dvDz9YGXlwS/85LASyKBl5cEDQ0NiFu9tl8EkJGZTeQO7N+LBTHzn/0OmxKKk18eR1T065DLq7rlv3zlKsrulsPfz5exW/3l11rG9wiFQnx5/Bjc3cc/S/P18cEn+/ehuakZUoJwqmtqzOrTgPcAh7M/Q1HBOWRlpCMl+W0sXBCDiQH+sLOz65fgK5XVuH7jJ0bOx2fCcxX1FHZ2dkjakEB85qncM4zpHR0dePLkCSMX4O/3XPA7Y97cOcR3abVas/rE+SHgfH4BDAbmqwqvsVT8vKhXiWvlc+cvMKYPHz4cjo4vEMdb4lgMMveim5tZfeK8AG6VlhK5gAB/Iufg4EBssXV1Kjx6VMfIzZwRwZhedrccCoWSkSuQFhHH8vDwaWb3idMCKL1D3pfw9PBgtRWz8HfK7jKmJyWux7Bhw7qlt7W1Ie6ttbhQUAiVWg2NRoPye/eQnPIOiopLGJ+1ZvUqODk6mt2nAZ0EDiR0Oj2qq2uIvIuLC6u9i4szkZNVVmL2rMhu6d7eL+HA/n340583QafTPcdVVSkQn7DRqLLPnhWJdzenDAmfONsDNDU1sY69Dg4iVns2/vHjx0Ru4evz8Y+vT2NqWKjJZXZ0fAHb/rYVx44cgq2t7ZDxibMCIBaaz4eNDfuhi5AhAM+e3cNOX+DEAGQeTMeSxQuNLq/7+HHISD+AdWvXELeAzekT5wRAOkwC0GNFAYBAMIzItba0Ern29nbs3LUX0yNewZm8s8YvWatrsPLNNViwOBYPK2RDyidOCoBtL6Hr+Exa1xOfbc/87NZWDZbHvYXsnCNob2/vVbl/+vkmomMW4YfrN4aET5wVwMiRI8lrb4MBOp2e1b6NJYAOIuaxNHX7Tly/8SMjN3lSCE4cP4ry0p9RVVGOq5eK8XZSIuP5v0ajQfz6jWho+K/ZfeKsAHqaEDU+Zj+0YZsUiRgqS6msxslTuYz5Q4KDkHvyBGbOiIBIJIJAIICnpxibUpKxd3cao019QwM+P/aFWX3itAAEAgHcx48j8qo6Fau9WqUmcl4SSbc0aVExcSs4aUMC4/4AAMQuXYyxY8cYtUk02D5xfiOIbWdMXqVgta2Uk49amW7T3L//gKUc5Ns3PB4Pvj4+jNzDChn0er3ZfOK8AIIDA4nc7dI7RO633xpRXfMLcbOFqcU2t7T0upykPXqDwYDWLjP/wfSJ8wL4ffQ8IictLCJyBSxczPzXGNNHjRpFtCkrK2edvJXfu0cYxmwwcsQIs/nEeQF4eLgjbEoosXvN+/obxqXcwc+yiM9ctoT5wua4F92INhmZ2cRl2qncPNTW/oeRc3N17dY7DKZPnBcAACSujydyKZu3IOfIUahUKmg0Gvxw/UfErlhFvCYV8XI4JhLG4EjCSeDTtX3sijdx5eq1/18L16GqSoGPPv4E72x5j2g3I+Jls/pk1PAF9P5W8I60PTh0+PN+CbS390v4toj5XHvdHzawdo/GQCgUoqQwH2KxB7Erj45ZxHpaZ+q84PzZXAQFBZrNJxI4dyt4d9oH8PQU9ykYO7enslYUj8dD2o5tEAj654B01crlxOAPlk8WMQQAgIuzM3L/fgIST0/THeTzsSdtu1F36CeFBCMnKwNCobBP5Y2ZH40dH6QOCZ8sQgAAMGbMaEjzv0FiQjxxU6bbMjIoEOfP5iJu5XKj3xM1ZzZKCvMxe1akyX/BcnN1RfrHHyIrI92ov4gNlk+cnwN0RV2dCtKiYlwsuQSFQgGVuh5arRZOTo4YPXo0pk4JxdyoOQibMrlP/6P7tbYW+RekuHXrNsrvP0C9uh5Nzc3Q63Wwtx8BB5EIEokYfr6+iJwZgfDp03r8N5C5feo6B+iTACi4Cc5NAikGDlQAVAAUVAAUVAAUVAAUVAAUVAAUVAAUViqArt+YobA8dI0xH+j502IUloenMef3pBAKy239zwmA9gLW1/oB+tk4q2v5rJ+NYxIBFYLldPlGfTiSJAIKy+n2jRIAFYJlB56CAgDwP0LgLaInypq+AAAAAElFTkSuQmCC", + "144p.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAX0AAAF9CAYAAADoebhRAAAZ+0lEQVR4nO3d+3NU5R3H8e/msgm5QgQiMeGSSmntbexUZKbtTIu1aKszYv9OmRbLpVVLx/YHqoOjqOOIGBISIISQCwkx920+Dzxxs9kkm72ec573a2ZnN2tuSPjsJ9/znOc0WIVdvXo184tf/MKam5sNAPBEJpPZ9Nzo6Kh98cUX9sc//jFlFVLWT/yvf/0r8/vf/37T88vLy/bo0aNNzy8uLhoAhChf6D/77LN537eurq5sWV2WT5TJ+u5v3769/nxra6t1dHRYY2Nj3o9bWVkxAAjRWpBv+d/U+L3cF4JSXwBK+mAf9j7oDx8+bACAneVr+lvxLwLZLwDFhn9RH0TYA0BpdhP62bJfAL7++mv78Y9/vKsc33XoK/AJewAoTbGh7ynwT5w44R7vpvUX/I7Z7Z6wB4DSlBr6ng//QoO/rpB3IvABIJoU+Ar+1dXVzLvvvrvjK8mOrwwK/AcPHrjHBw4cMABA7eX+puAb/4ULF+zNN9/cMttTO3xSAh8AIijfeKiQUU/dNp9w/TMS+AAQfdmjnq3eZ9uZvlo+gQ8A8bFT8Of9FcAvy+SgLQBEU6Grf3JHPXV5PhGBDwAxNzw8nPf5gpZsAgDipa+vzwV/7pinIfsNWj4AJFuDAQBiJ5Xa+QTcgwcP2p07d1yhTz39gPXQ90s0afkAkAzpdHrTcxtm+tl74QMA4ks9Xnv279mzx12w6r///a8r9hzIBYAE0jRn7SCuPfPMM3b37l07deqUe96Nd6I42hkZGcn7fG9vryX96/utL7777rsNz+/fv99aWlrcX6Rsd+UdAPBzf13F0OeGeyZKq3Z82G4XrnqfSoXvTp+7kO+vWDvtc6S/NP097du3z9ra2qy+vt4AYCv+BK75+Xk35tHB3Mis3skN09QWh6aHh4cz/n3KGf7+c+mmx319fVuerezff+/evS58yyF7y4ut/uxffvll5oUXXnCP7927Z4cOHTIA2Iof8Sjw5eOPP85Eoulnh3eqkHVI9n34DwwMWH9/v5XCf/3twj6XD/9S9ycqJOxzffDBB5nTp0/b4OCgHT161FCYqakpdwOK0d7ebh0dHdbY2GhxoqhStIyPj7sR8XroT09PW2dnp1Xb2NiYW0t648YNbRRU1OUbS2n8/mMLDdzcr637cmxMV+zXp/HvLN9FpYFizM7O2tzcnHV1dVlDQzxOc/Khr38H+jfQcOnSpfJcs6sIPvC1I9yPfvSj4q7svqbY4C8l8LO/tgJ/t8GvHx4/Gir16xP8W/M/6HLu3Dn7y1/+UtT/a0CWl5cz+ndbyeOKlbL2vbv7mi//0KtQsYHv/e1vf1sfzxSq1MD3iv34ycnJkj4+++sr8BX82EjL1BT4H330kfv/ROCjVGvtXj9KKZVV/XzFycrKiruvWegPDQ25lr+bq7hv5a233qrpP2b9EPi2XwhtgqTNkEoNfGxNxzs0v3z//fft5Zdf5v8zyqqpqSnV09MTq+D3SzZrMpTS8qHu7m4rp92MecrV8qOCMc9m/gD3q6++SuCjIvy/OwW/XgDioiZN/9GjR9bc3Jyoplto29csXy3/r3/9q6Ey1PKF36SAzWoS+rlnmlZbLQ/A+Fn+2bNnyxpItHyg+lQs4jbmSdx5/IUG+ocffmhJcv36dXfPAd0nox3f9gFslKjQ1wFSrQbyt3x0PoJMTExYJWgN73b8wRRUhv8tstCD6kBoEnURld2MbeK2xrZQjHieeOmllwzAZokb7+y0Vl9nHavt/+pXv7IkOXPmjMHcHiOMdlAt//nPf9xIgdU7O/Cb/9RKJUY777//vvvLP3LkyLbvp72tZS2Yyn4mNPP8J7RHClANv/nNb2J3klZNQl8nZenkrMxWg/ci+NAtZGyjfTPK7ZVXXnF/pp3oFG69304vDiieXljV9hcXF2u2xQiSz+dXnFq+1Gy8U+62r10nC92GQSOeW7dule1Fx7/gFMq3fe2WaWXAiVmbaQWPGlg5iwXgffHFF+7nKm4tX2p2IFdt/+lqm0ypJ9Gsrq4W3PI93/b1saVuBeFbfqHt3bd9vVCVau2Hzv3ZCfzN9Pehxl+OnzHA80UibmfiejVdvaMzU7WPfyn/KH3g73bXO7V9LevT/iz/+9//MsXuz6LvvZhxjd7fj7iK/bNfvnw54zdbI/Tz82v2afwo1cLCgqXTaZcbuo9j4EvNl2zqwi3FNv7s/ez1ArJb2jZBr9YnT54s+uuXMp8vJfgnJyczunIXgb8zBf/Dhw9tZmbGHeT14zWgUGvHh+z+/fvucZSuJV6MSKzTV2D74NfbOwXguXPnMmfPnnWPS72AiT+FWveFhq9+u9C7leOAbHbw6+1Cvv7S0lJG18cl8AunoPdhX+ttQBANu73GdNzD3ovMyVk++HXvA1CXtrt27Zr7y9Flyl588cX1PaF1mUQFdalXrBL/a1rugb/PP//cfU/6Ve748eMuoPX19Wue9tAp1wocH/zaedR/fb2tP+NawLvnT5w44S7Ttnb8wY0rNMoi8ItT6yXDQC2ldOWs1157zWp1ucR8FLTy3HPPbXhe3+Pjx4/dr+iV+l6zj8bnzux03EAhrLBVAO+2KexEW077XyH1QqAr3eiSbHqh0Z9dO3S2tra6g9AclwRQCH+5RK1YPHbsWDS3YfDz+dx9bLTqRWGvtlsp2UGfOwbQiiMFfbnD3tN20/63B70AiL/EWUtLS0XOLwAQlkjvvaMQzFbJsC/k6/t27SdAlWzbuV8bAMoh0qG/VchXI3Tzff7c3TsZsQCIm0iHvj9o61W76bMNMoCkiWXTrxaaPICkSdR++gCA7RH6ABAQQh8AAkLoA0BACH0ACAihDwABIfQBICCRDn3WyQNAedH0ASAghD4ABITQB4CAEPoAEBBCHwACEunQz750IQDESe7lVqMikqE/Pj7u7mv9Py37gin5sKQUwFZUWqMY/JELfQX+/v373eMUqQoghi5fvpw5c+ZMJIO/tlcpyTE2NuYC/8aNGwQ+gNh67bXXFGEpBX7UxtSRCX0F/sGDB939iRMnCHwAsRfF4I9U09cMvbu7m8AHkBj/+Mc/LErBH4nQn5ubs66uLvvoo48MAJJEox6LkEiE/uPHj62hocFOnTpFyweACorE6h01fQBA5dU89DXHb2lpMQBA5UVivDM/P28AkGSNjY22srJitcbeOwAQEEIfAAJC6ANAQAh9AAgIoQ8AASH0ASAghD4ABITQB4CAEPoAEBBCHwACQugDQEAIfQAICKEPAAEh9AEgIIQ+AASE0AeAgBD6ABAQQh8AAkLoA0BACH0ACAihDwABIfQBICCEPgAEhNAHgIAQ+gAQEEIfAAJC6ANAQAh9AAgIoQ8AASH0ASAghD4ABITQB4CAEPoAEBBCHwACQugDQEAIfQAICKEPAAEh9AEgIIQ+AASE0AeAgBD6ABAQQh8AAkLoA0BACH0ACAihj4JdvHgxk06nbXZ21t56662UAYgdQh95LS4uZurq6ky3VOr7fM9kMu5+ZWUls7S0ZE1NTba6umqffPKJvfTSS7wQABFH6GOdgj777fHxcT1navfNzc3rz/vgn5mZUfi7F4Vf/vKXej6jtxsaGgh/IKIIfdjy8nKmvr7eJiYmbG5uzlpaWqy9vd26u7u3/bi9e/dueHtkZMTdZ56+KqSyf0UAEAmEfuAU0Ldv33aPDx8+bF1dXVas3t5ed0/4A9FF6AfKB7ICX2FfTvnCn+AHooHQD1AlAz9bdvjT+oFoIPQDo/B98OCBe1zJwM+m8Kf1A9FA6AckO/APHDhg1UTrB6KhzhCEa9eurS/HrHbgZ/OtXzet9f/ss88yBqBqaPqBePHFF00tv5aB7/nWf/36dfvpT3/qwr++vp7WD1QBTT8AGqcMDw9HIvCz/fznP3dn/PqRjx/7AKgcQh81pwPK/lwBgh+oLMY7CedPvqrWSp1i+e9P3ysHeoHKoeknnDZDixNaP1BZNP0E++c//+l2yox6y89F6wcqh6afYK+++qqNjo5aXPnW78P/woULNH+gRDT9BNN+97rgSZxlt/4//elPnNELlIimn2ANDQ3W2tpqSUDrB8qDpp9gKsTaGz8paP1A6Qj9hLp06VJim7DCf3Jy0l25S8H/+PFja2trI/yBAjDeQSzt27dvfeSjERbLO4HC0PQRawp+Nf2HDx+yvBMoAKGP2FPT102t/5lnnmHWD2yD8Q4SI/ekLkY+wGY0fSRKdusXWj+wEaGPRGIrByA/xjtItNyTut577z1GPggaTR+Jl936//CHPzDyQdBo+ghGbusfGBig9SM4NH0EJbv1HzlyhOvzIjg0fQQpO/xXV1czuhkQAEIfwdJYX21fwa/HrOtHCBjvIHgKfhkeHmZ5JxKP0Aee6uvrc8HvWz/BjyQi9BGsfJmu4BdO6kJSMdMH8tCBXu3c6cP/8uXLzPuRCDR9YAvasVM3Bf+ZM2cY+SARaPrADnJb/9DQEK0fsUXTBwqQ3fr1IkDrR1wR+sAu+Navq3X5E7rq6uoIf8QG4x1gl9T4/T4+wtm8iBOaPlCk3K0c9JjWj6gj9IEi+ZG+38qhvb2dWT8ij9AHyoArdSEumOkDZZS9Z//y8nLm4sWLzPsRKTR9oMx8679165a98sorbt7PrB9RQdMHKuTYsWM2Pz/vWr8u1nLlyhVaP2qOpg9UUGtrq6XTabt79679+te/5kpdqDmaPlBBawFvzc3N1tvbawsLCzY6OuoO9N6/f5/Wj5qg6QNVsmfPHmtra3Pjnp6eHnegt6GhgdaPqiL0gSpR69dqzqeBb2NjY+4grx6vjYAIf1QF4x2girR0f63du5GPVvmsjXkU/G7Wb0AV0PSBGnr22Wfd/dDQECd1oSpo+kAEaCsHrfCZmppyI59z587R/FERNH0gIrq7u939nTt37OzZs+zjg4qg6QMRoemODvYeOnTIJiYm1vfx+eCDD2j9KBuaPhAROsArCv6uri53U/CfPn2a1o+yIfSBCMu+WIuCX4/X5v+EP4pG6AMRl71tM9fnRakIfSAm2LMf5cCBXCBmckc+BuwCTR+IIVo/ikXTB2Is+0pdCv933nmH5o9t0fSBmMtu/W+//TYHerEtmj6QELR+FIKmDyQIrR87oekDCaTwf/TokQ0MDLjg//bbb2n9cGj6QEJ1dHS4m4K/v7+f1g+H0AcSToGv1j8+Ps7yTjDeAUKgxq/wV+sXTuoKF6EPBMS3fj/rf/fddwn/wBD6QGCyW/8bb7xhHOQNC6EPBMoHv+5p/OHgQC4QsOzGjzDQ9IHA+eDn4G4YCH0ABH9AGO8AQEBo+gAc2n4YaPoAEBCaPoB1avvC2v3koukD2MCv3UcyEfoAEBBCH8AGbW1thuRipg9gg4MHD7r7q1evMtdPIJo+gLx+9rOfGZKHpg9gk7GxMWtpaTEkD00fAAJC6ANAQAh9AAgIoQ9gE7+CB8nDgVwA67TX2vDwsHv88ccfG5KHpg8gr5MnT6YMiUPoA0BACH0ACAihD8DRPH9mZsb6+vrsypUrhmTiQC4AJ5VK2eTkpHt8+vRp5vkJRdMH4MzNzVk6nbbV1VVDctH0ATgPHz60Q4cOWX19PS0/wWj6QKByr3/e29trN27cMCQbTR+AffPNN+7+xIkTtPyEo+kDgVPgHz9+3Orq6gj8ABD6QMAI/PAQ+kCgCPwwEfpAQJaWlmx2dtbm5+cJ/EBxIBcIhFbrDAwMWFtbm1uaSeCHiaYPBELr8NXuWYcfNpo+kFA6s1a3wcFBd9/V1UXgg9AHkur27dvu/vDhwy7sCXwI4x0gYaanp+3BgwfW39/v3ibskY3QB2LKb6Og0c1asNvKyoqNjo7ac889Z52dndo1k7DHJox3gJhSyIsCX+1eK3MU+Ldu3SLwsSWaPhBTDQ1P/vmOjIy4JZitra2EPXZE0wdiSnN7HazV7pgffvihNTY2EvjYEU0fiKGbN2/a888/786wpd1jNwh9IEbGxsbcDF8rcwh7FIPQB2JgamrKBf4Pf/hDdwCXZZgoFjN9IMI0vtGBWpX6Y8eOuXa/dgCXwEfRaPpARM3MzLj9co4ePerCP51OE/YoGaEPRIhOtFpcXLTx8XG3Kqe9vZ3ZPcqK8Q4QEZrV6ySr+/fvu8CfnJwk8FF2NH0gIoaGhtwZtbR7VBJNH4iAu3fvumWYn332GSdZoaJo+kCNLC8v271796yvr896enpo96gKmj5QA9pCQStztGfO+fPnCXxUDU0fqCK1e+2GqcsWKucJe1QboQ9UiUY52hlTgc9FyVErhD5QQVp3r3av3TC1QZreJvBRS8z0gQrRla20KiedTrvA1yiHPXNQa4Q+UCHffvutW5UjzO4RFYQ+UGaDg4PuXu1ezZ7AR5QQ+kAZaFav7RM0u9cGadeuXaPdI5I4kAsUSUEvawdm3cXIn259TNgj0mj6QJGU7bqwyejo6Pp1almZg6ij6QNF8teppd0jTgh9YJc0yhG1e5o94obQB3bBz+6Fdo84YqYPFMC3e3+dWgIfcUXTB3ZAu0eS0PSBLdDukUSEPpCHb/e6J+yRJIx3gCw+7H27NyBhCH3ANo5y9Li/v5/ARyIR+ggeB2oREkIfwco9UGtAAAh9BGd+ft5dupB2jxCxegdBYRkmQkfTRxBmZ2fdfvdrB2htZWWFdo9g0fSReLqSVVNTk/3gBz9wG6Q1NjYS+AgWTR+JNT09bQ8fPnSjnJmZGV2gnLBH8Ah9JJJfhtnZ2ckoB8jCeAeJorD3ga8rWRH4wEY0fSQGJ1kBO6PpI/ay2/358+cJfGAbNH3EGu0e2B2afkK9/vrrLgCXl5ctiYaGhmxhYcF6eno4yQrYBUI/4ZIY+gr8I0eOuMfNzc2EPbALhH7CLS4uWlIo7EWBr2ZP4AO7x0w/4ZLS9LPbPaMcoHg0/QRTy9fJSaurqxZXue2ewAdKQ+gn2MWLF13oT01NWdwo7H271z1hD5QH450EO3v2bCqzZmlpyeKEUQ5QOTT9hNNMP51OWxxkt/sLFy4Q+EAF0PQTTtsIq+1nn8QUFWvf1vpj2j1QHTR91FT2gVrtda+bAagYmn4A1Jyj1vYfPXpkExMT6+2esAeqg6YfCAW/At9fI7aWbt68aR0dHbR7oAYI/cDUMvjV7hX4zz//vHubsAeqj9APiD9AWovg9+1egc9JVkDtEPqBqXbwZ7d73RP2QG1xIDdACt6BgYGMD/59+/bZ3r17rdx82KvhE/ZANBD6gerv73chnHnC1l4ErL293W3bUOrJXCMjI+5egX/lyhU7ffo0gQ9EBKEfODXwf//735nf/va3boO28fFx97zaeVtbW8Gfxwd9b2+vu/nPbQAihdCH/e53v3Ph/Pe//z3z5z//ef35O3fubPtx/oza7KAXwh6ILkIf69544431sL569Wrm5ZdfLvhjCXogHgh95HXq1ClCHEggQh8AAkLoA0BACH0ACAihDwABIfQBICCEPgAEhNAHgIAQ+gAQEEIfAAJC6ANAQAh9AAgIoQ8AASH0ASAghD4ABITQB4CAEPoAEBBCHwACQugDQEAIfQAICKEPAAEh9AEgIIQ+AASE0AeAgBD6ABAQQh8AAkLoA0BACH0ACAihDwABIfQBICCEPgAEhNAHgIAQ+gAQEEIfAAJC6ANAQAh9AAgIoQ8AASH0ASAghD4ABITQB4CAEPoAEBBCHwACQugDQEAIfQAICKEPAAEh9AEgIIQ+AASE0AeAgEQi9Ds7O215edkAIKm+++47i4I6i4CGhgZ3+/TTTzMGAAmTyWRsdXXVoiASTT+dTtutW7fsJz/5iQFAknzyySeZVCplra2tFgWRafo9PT32+PFjm56epu0DSIwXXnjBRkZGbP/+/RYFNQ99vQJKU1OT1dXVWUdHh34VIvgBxJ6y7N69e9bb2+uyrr6+3motEk3fa29vt5mZGXfT/6x33nmH8AcQO5mnBgcH7ejRoxYlkVuyqeAXzfjffvttWj+AWIpi4Etk1+kfO3bMNf7x8XEDgKjxo+l8FPZRDHyJ9MlZav2++QMASscZuQAQEEIfAAJC6ANAQAh9AAgIoQ8AASH0ASAghD4ABKTu9ddfd2cYaLMzAEAyNTc329LSEk0fAJJuZWXFhb67dokBABJNDb+trc2++uqraO2yCQAoP4V+Y2OjLlSVWg/97u7uyFzDEQBQHtoYbmJiYv3tuqdPprS5/9zcnAEAkmvDeEfD/qhcvBcAUB5HjhyxoaEh93h9Q+i1sM9o7qNbVC7gCwAojcJeoZ96egGA9aZfV1fnnpuamjIAQDJtGO9orq/gn5+fNwBAvOW2fNmwTn8t9FO6Ju3Y2Jg1NTVtezkwAED85F2nr6a/vLxsAIB4ytfyZVPo6x0OHz7sruQurOYBgOTI2/QvXbpkx48ft/HxcR3gNQBAfGzV8mXLof3i4mJmenra9uzZwxJOAIiJ7QJftqzx6XQ6tX//ftf2AQDR5wP/vffe2/J9dlyes7CwkLl7967bm0etHwAQPT7wdX/06NEts72gNZkKfjV+7cfc2dnp1vNrdY/2ZgYA1FahgS8FHaVtampK9fT02L59++zmzZtPrr5C4ANAzWWPdHYKfNnV2VcDAwMZffLh4WFraWmxAwcOGACg+nzYS2oXZ9IWdcqtztr1X1T8FwYAVJaOsYqmL5La5dYJRe+zcP78+cybb77pHhP+AFA5Puil2LD3yrK5jm/+4l8ARKt9Dh48aACAjRYXF93Jr36jS8WoFsjomKnuZ2dn19/XB70UG/brH29lNDg4mMnX9rWBGwDge36LG4W9clyLY7TRpVZJ6nq22UoN+g2fyyrs/v37Gdo+AOSnKxaq8SvXFxYW7Pr163by5MmKZfP/AUrlBtzSfTfeAAAAAElFTkSuQmCC", + "240p.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAIsklEQVR42u1da1RU1xX+5kUywEAD4aUJqFFAwFYwPtpEE6MhqFRxWKiA76VFlnGpiCCKChIQeRQfbWzUiFqbAEGBWi1C8C1RG8AoIFCXgICAKMhzZBigP7p0gcOce4EZwsyc7+fd++67z93f2Xfvc+7MBSi0GhwmBTNLm256m9QXtY+LOQMiAA28dhCBwyb4j0oK6B1UQ4yxtmckAYcUfBp4zSPCmyTg9BV8GnjNJkJPEnDpM1/70DPW3DeFdPZrLvqKLZfOfu3OAlw6+7U7C3DpLdFuUAJQAlBQAlBoLfhDcZGKyirk5OQiv6AQZWXlKC0vR319A1pbW9DeLoWenh4MDUSwsLDAhAn2cHKcCOfZsyAUCjXiJufd/QUZmVnIyc1DaVkZGhpeoKOjA7pCIczMTGFtPQ6/nzYVrnNdYGJiMqS+cXq2BMrqAqRSKW7czMbFzCxkXbqMmpraftsQ6evDXeyGLZs34p13fqOSwUfHxuHAoa8Z9coeFoHP5/Xb/pWr17B3XwwKCh+w0ufxeBC7LUDgVj+Ym5upLOg9VwRVQoBjx08gZE+4UmwZGxkhNjoSs2fNVOpNuHfvPuaLPSCTdSqdADKZDDt2heIf3yUMyDcDAwPsj42C8+ezVE6AYV8DPK+vxxofX/w7PUNpNqVSKTZtCWAV/P5CJuvE6rW+Aw4+ADQ1NWHtuvVISf0nLQJf3dQNm7agorJKKfaiYuJQ8t+HKvE19KsIXLp8ZdB2Ojs7sdk/EHl3f6EEAICXL18iLHzvoO38nJOLI8eOq8THnNw8nDx1WonEl2Hrth0qyVRDTgAnx4nYvPFL/JBwGrezr+JRSQEe5Och7WwSVq1cDj6fuSHJyMxCU1PTgH2QSCTY7B+Irq4upY+vq6sLAUHBRNvGRkaIjgxH7p1sPCy6j6yL57FimTfRblFRMY5+G6+ebaBIJIK352J4LlmED8aMlpPr6OhgkpMjJjk5Quw2H15LV6K5pYU4I65dvwnXeXMG5E9EZDRKS8vkjgsEAnR0dAxqrNdvZqO4uEShXE9PF6lnEjF69KjXx2xsrBEeFoKRI0YgYl+0wnOPnzgJn7WrweVy1SMD6OnpYcN6X9y6cRnB2wP7DP6bcJz4O4SHhTDqPa6oHJBP2T/dwok+0vMcF2eMt7UZ9JgTE5OJ8vW+Pr2C3xPrfNZgzGjF96i6ugbXb9xUn0eA52IPBG71g6GhYb/OWzDflfGc58+f99ufltZW+PlvQ3d3713vd42NERkRNujxSiQSpGdkEnU83MWKg8DlQrxwAfH8syrqCIZVEcjj8TD2gzFEHYFA0P/KPCwClVXyHUTk3jAYGxkN2u/8gkJIpVKFckvL92FhYU60MWXyJKI8N++udnQBEomEKH//vZH9snfl6jV8n5DU54x0cf5cKT7n55MX0OztxjPaYNIpKytHS2urZhOgrU2C0rJyos7kyR/2a0HFP3C73HELC3PsCQlWmt/5BYVEOdPsBwBDQ0Pi3kd3dzcKCh5oNgHOpKQSM4CDvR2sx41lbS949x65fQgOh4PYqEiIRCKl+V1T+5QoN2W5wWNi8i5RXvu0VnMJUF1dg8ioWKLOpo1fsraXnpGJsylpcsdXLPPGjOkfKdX3FkLr+qorYtU96ZL1WppbNJMAjY2NWL12HRobGxXqzJvrwvqZXV/fgG1BO+WOjxplhR1BAUr3v7m5mSh/S0eHlR2dt8h6TQzXUUsC1Nc3wHPpStwnFFLW1uMQE8V+GTgoeBeevdEu8ng8HPhztEreMWhrIxeuPD679TYBg15bW5vSfef/msGvqKyC97JVeFRaqlBnhIUFTsUfhUhfn5XNtHP/wvkL6fKLLX9ag0lOjioZh1D4NlEuY7nKSGol/38dXc3JAIUPirBA7EEMvrm5GX5IOI33RrJr/erq6hC8K1TuuK2tDfz9NqpsLEwFJVNgX6GdQc9ApK8ZBPjp1m24L/LC06d1xMWTM4nfwcrKkrXdgKBgNDS86J3i+HwcjIsZ0AKSsgjQ3MKueGttIff5IgOR+hPg/IV0eC9fTSycbG1tkJqc2K/gy2SdyPzxktxxv00bYDfeVqVjMjM1YchMz9hlsGfPGK5jqt41QPzJv2N36FfELdNpU6cg/tjflNanR8XEISomblA2Ro3tTaClXkt67SE42NsjKfmswvOfPKlmVQy3t7crlHM4HNixWFEctgSIionDwb+QX8B0nTcHB+NioMOybRoucHCwI8rZvBTKtJpoZWXJuhAeVo8AmawT/gFBjMFftXI5vj60X+2C/yoDkPyurKrqczOqJ27fuUOUOzlOVInvKiWARCLBGh9fJCQlE1NbUKA/wkJ2quSFh6GArq4QXzjPJuokn0klTpKUtHPE88Vu89WLAC9eNGKJ9wr8mHVZ8fOHz8f+2Cis9/WBumOxhztRfvibIwpb3kN/PYzHjyuI7fD0jz9SHwJUPXkCN/dFyMnNU6ijp6eLk8ePwF3sBk3AjOkfEzeqWlvbsNB9Cb5PSEJdXR3a29tRXFyCwO07ERt3gPx4XLEMPB5PJX6r5IchbAq+geK3Exxw4VyKUm3O/eNC3Lufr1DO9ochd/7zM9wXecm9eTQY2NhY4+L5NFYvzbKFWv0wRJ0wZfKHWL7US3ktGp+H6MhwpQb/V18I0nSE7g7GJzOmDz4wXC6i90WorPqnBFAR+Hw+Tnz7DTwXewzYhkgkwpHDh4gvklICDGMIBAJE74vAqfij/VqG5vF4cBe74VLGBbh84Tw0hKXhUh0+m/kpPpv5KXJy817/P0BZefnr/wcQCt+GmakZrK3H4Q/TpsJ13hyYmmrA/wNQDG/QLoCC1gAUlAAUlAAUlACUABSUABSUAD37QwrNXwPoRQCmT4tRaB5exZzLxBAKzZ39vQhAs4D2zX6AfjZO62Y+8bNxfZGAEkFzUj6rD0cqIgGF5qR9VgSgRNDswFNQAAD+BzLuJpmRkU/7AAAAAElFTkSuQmCC", + "2k.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAGcElEQVR42u2dW1ATVxjH/xuCGrmpNSDQqdhWnI4koKKo3FHQ1trp1DoqNwGlVmvt2FFEEapFKaCOChq5hnBLAAFp+1AvqHRaEZX7TFvttEJ9wLEF+xCCOmDSB8cOUtjdIgm7m/N/zceeL/v/fd+es3vIAkRmLYopwOG1OQZymvirB/fuUKMCgBhvHiBQbMy/++tP5AzyUK+7zmWEgKIznxgvPBCGQkANZz4xXtggDIZARK755qfBXouGfkiqX7gazlsRqX7z7gIiUv3m3QVE5JSYtwgABAAic5aYawlpe3vR2tqOtvZ2dHR0ovOPe+jq6kKvTgedrg8GgwF2trawsbWBy8yZ8HCXYaGnJ3y8l0AkIjzzDgC9Xo/mllbUXr6Ky1fqcPvOHRgM9IuS7p4edPf0oKOjE1frvgcAODk6IjxsPbbEbsLEiRP/Vw6r3/8QLa1tI37+qrMzGq7VsT5eS2sbQsOjoO3tpY2Tuc1FWWkh7OzszBeAa/UN2BC+8aWP03X/PtKPHkdVdQ1OHj8KD3f5uHyf5pZWhEVE88J8Qc4Bfr/bgXWhEWhsajb52E3NLazMl8vcOGG+YCeBOl0fIqI248GDP002ZmNTM2vzNSUqTpgv6FWAVqvFoa/STDLWrcYmhEfGoFenozdfLuNM5XN2FQAADg72CPT3w5LFXpDJ3CCdPh02Ntbo7u7BzcZGKAuKWLX4mq+/ReK+eNjbS41r/sYY6HR9zOaXqGBra0tWAcPJwsICb68Mwbq1a+Dn6wMLC4v/xMyY4YD33l2F1avewZFjJ5BxSkF7TIPBgEu1lxEWut4oOd+42YjI6E2M5ru7y6EpLuCc+Zy4BFhairF2zQeoqz2PrNMZCAzwH9b8waIoCnG7dmLlihDG47e2tY+7+VysfM50gMVei7DYa9Go/vbTbVtw/sJF2pi/uruNYP4tRERtQl/fI9o4D3c5NCUq2NjYcHauxOtJoFwug1gsZpwMjqUabtwUjPm8B4CiKEgmTaKNGUsDrjfcQETUZkbz53m488J83gOg1WoZ192z33xjTMaqv96AyOhYPHrEbL6aJ+bzHoALF2sZY7yXLnnpcR7+/RAbYz5iNH/+PI9n5ltb8+Ycivlq/sDAU+TkKRnvJ/h4e7/0WEwt/7n5pcUFvDKf1x3gREYmfv7lNm3Mju3bIBZbGD2XBfPn8dJ83naAsopKnMykvwm00HMBIsI2mMx8aysrXhYS7zpAZdU5xMUn0O4ZsLeXQpF5wiQbRFxcZmKyRMLbeRSvAChRl2Hnrj3Q6/UjxlhbWUGVnwNHxxkmyamquga74vbS5kQAGAPl5hcgfl8ibeVLJBIUFuRBLnMzaW4VldXYvWcf404mMgcYpU5mKnDk2HHaGCuryVDl58Brkee45Fh+tgoikQjpqYdBURQBYKyUmn4MpxRZtDFTptihpFBptG1gEokEer0eT548oY3TlJ8FJRIhLSWZNxBw9hJgMBiQdDCZ0XypVIqqcrVR9wC+Mm0alHlZrDabqjXliE9I4s3lgJMA6PV6xO3dD2VBEW2cs5MTqivUmDPH1eg5+fv6sIagVF2GvTyBgHMADAw8xWef74amrIJx+VVdqcGsWS4my83f1wfK3DOYMGECqxVLQuIBzkPAKQD6+/uxdfsOnKv5hjbO1XU2zp3VwNnJyeQ5+vv5soagqESN/UkHCQBs9PjxY8TEbsV35+k3eMhlbqgqV0MqlY5brgH+fsjPUbCCoLC4lNMQcAIAna4PkdGx//6Xz0ha6LkA5ZpiTJ06ZdxzDgzwR172aVYQqIpKkHggmQAwnLRaLUIjolB/vYE2ztdnKdQce+ASFBiA3KzTsLS0ZIwtUBXhiy8PkfsAQ3Wp9gqamlsY4374sR6z3xrdUq/zt9tGeyq4LOgZBLEff4L+/n7a2HxlIShQOJCUQDqAkLR8WSByzpxi1QnylCocTE4hAAhNwcuDkK3IZNykCjx7rpF8OJUAIDSFBC9D9hl2EGTn5uNQShoBQGhaEbwcWYoMVhBk5eQhJTWdACA0rQwJZg2BIisXKWlHxi1XCiA/EWtuGvyTsaQDmLkIAAQAIgIAEQGAiABAZO4ADH3HDJFwl4AvAMD0ajEi4em55yImQoiEW/0vAEC6gPlVP0BeG2d2lU/72rjhICAgCKfls3px5EgQEAmn7bMCgIAgbOOJiAAA/wCDa2LN1B14pAAAAABJRU5ErkJggg==", + "360p.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAALQklEQVR42u1daVRT1xb+kiAhwYQxBAdQRFBwBgUtIKBVa2srBSsVq9apirPWgRarpbVUq/X1PVupUKmoVO2kOFVxqCKogEwtKOAUZFBABUkgCgn0x1t0IXDPvUDiMuF+a/GDnH32Pbn7O/vsvc+5uQCLTg0OnYDUtl8De5t0F6X38jjtIgBr+M5BBA4T49/Jz2HvoA6ij+MAWhJwSMZnDa9/RGhOAk5rxmcNr99EaEoCLrvmdz40tTW3eSM7+/UXrdmWy87+zu0FuOzs79xegMveks4NlgAsAViwBGDRaWHwIi5SUVGJK1eTkZaegZu3buOuTIbKykrI5QpwuVwIhQIIBUJ0794N9n3s4OTUH74+3rDvY6eV8aRnZOLCxQSkZ2RBVlCARw8foUapBJ9vCJFIBFsbG/Tv5wh3txHw9RkNExOTDl0vIzML8WfOIS09A3dlMlRUVKKurg5CgQBSqRUcHR0waqQ7Jr3+GiQSyQslAKdpSqDJLODR48c4euwEDh85iozMLDQ0tD3TdHR0wLIli/DWpNfB5XbMWanVahz6+Vfs3BUFmayA+Qwx4MHXxxuzZ82El+cr4HA4jPteuJiAL7dsQ871G4zkeTwe/P0mY92aVbC2lmrN6E0rglojwNz5wTh95qxGdHm8MgqRETvaPRPzb97C8pWr8Xd2x75f4oWz6N27F62cSqVC6IYwxP50sF3XEYvF+ObrrzB+3FitE0AnYoCky1cQGDQTdXV1be57MeES3vKb0mHjM4VKpcac+cHtNj4AVFVVYf7CxTh85CgbBDYiO+c6/vftzjb1uZqcgtnzFkJRXf3Cxhm2KRzn/7zQYT1qtRorV69DRmYWS4BGxB44hPr6ekaylZVPsGT5KtTW1r6w8aWlZyBm734NehMV1oSEQqVS624WwOFwMGTwIIzx9caggQPRz9EBZmamEAqFqFEqUVRUhCtXUrBn737cuXuXqKusrBy5eflwdupPe921H4XiwYNSokyP7t0RNC0QI93dYG9vB1MTEyifPsWjh4/wd3YOUlKv4eSp0ygtLaO9Xn19PdZ+tJ5IUAtzc4Ss/RBjx/hCLBahoOAe9u7/CTH7Yin75ObmIWr3jwheME+3soCP12+ERCLB9GmBsLKiT21qa2uxaMkKnIo/Q5SLiY7C2DE+RJnklFQETA0iknLZkmCsXL4MBgY8oq6GhgYkJCYhMioaX24Kg62tTeuxxqVETJ8xm1KPsbEQp47Hwc6ud4u2nRGRCN+ylbJvt27WSE662OFM6IUGgeGbwrBy+RJGxgcAQ0NDfPlFGG2a9fTpU1pdP0TvIbZ/EhqCNR+upDV+I1m8vTwRuzea0vgAcOjQr0Q9i4MXtGp8AFi4YB762FHXPO7ff4BLiUn6HwNIJBKIRCKiDB2hiktKEH/mHGW7u9sIfDBvjkbHrVQqaT3XOwH+1EbgcuH/9mRi/9+1lBG8VAQoKyuHXC6nbDcyMsKggQOIOs6dvwC1mjpoWrZ0kVYyFFKwaWtrg27drIk63Ea4EtvTMzJ1Mwhkirq6OoRu+JRYMZzi7wcjIyOinmtp6ZRtJiYm8Bg1CvX19Th77k/EnzmLa2npKCt/CKVSCVNTU0gsLeDqMgxenh4YP24sDAzob1E2TY1hgLMTrQ46GZmsAIrqanQ1NtYPAqhUKsjlChQWFuJqSir2xx4kZgE9e/RAyNrVtHpJBHB26o+U1FR8FLoBt++0vFZ5eTnKy8tx/UYu9sUeQI/u3bFi2WJMe3cqrQcggW72N5JTIBBAqVRSBqM5OTfg7jZcNwmw8bNN2B0d066+A5yd8ENkBExNTWgDxHv3Cinb8/NvIjBoJuN9ieKSEqwJCUVCYhK+/mozhEJBq3IPaNJEK4YbPBKJJXH8pWWl+rsEUM3YWTOmI3DqFEauuLLyCbH90ePH7RrHseMnIZcrEBMdCR6vZeagUCiI/Y0Zum1jIVlOIVfodxDYFObmZvDy9IC7uxsj4wPAkydPtDaeCxcT8NW2/7TaRgpcAYBvaMgsFeaT5aporqNXBHj8uAK7onZjzLiJWLPuY0b1/MonVVodU9TuH1FcUtLi85oaJbEfjyGBu9DI1dTUdB4CNKK+vh4HDv0Cv4BAVFRUkr8Ml/le/XtB7yL+j2O4nZeN/Ot/4chvh2i3X2tra1uNYwQCcmaiYriLSbdvIRAIOx8BGpGbm4dFS5cTZURduzLStWrFUmwO/xzOTv3B5/MhFAow3NUF0VHfY/Kbk4h9W9vpoyteMd2QekYjJxZ11V0ChG1YjyLZzX//budlIzPtKo7H/YawDevh6jKMVselxMvEQyYisYhWh0gkwuLgBZTtIetWE8vRt27faRFr0BFArmAWvFUryMsck++nMx6Az+fD0sICQ4cMxtw5sxD3+8/49r/baevzBw7+TNlmamJKe93hri7g8/mU7TY9e6BXL1uijrLyh8/9L6UpT5c3k6eUe0iWk1pZ6fcS4Df5TSyYT972TElNI6RbQuKGDQBGZ+2spdI2ZRsDB5DL0yUl9xkFvc+ePaNs53A4cGZQUdT5GGDC+FfJqVBVFTEjGDJ4ELF/Ww51Uq7FYvHzBBjoTJRnciiUrprYq5ct4xhHpwnAxEC1z2rbTYD79x/Q6n9QSq64WVpYtPAAhoRcv6i4GEXFxUSdySkpxHaXYUO1cr9fOgKQtnIBwMDAAGZm1Gv9G69PJJLoWlo60dUWFhWjoOAecQkxNzd77jOhUEDruX797Qh1mqhS43DcMWJ/f7+3dIMAcrkcb0wOQMy+WNq8vTmOHj+BiF1RRBkbm55EA9v07AFPj1HE8X0XsYuyffOWbcS9Al8f71Y/D3wngDjuiF2RlJtdO76LIO4BWFtL4eXpoRUCaHwvoKGhAVlZfyEr6y98svEzDB40EF6eHnBy6gfHvn1hKbGEWCQCl8tDTU01CouKkZ6RiSNxx5Cckkqrf+KEcbQy78+agUuJlynbt3+zA2Vl5Zg5Yzrs+9hBra7H9Rs3sPP7SFoPFEBxcGO0lyccHfoi/+at1lO86hq8HfAuQtZ+iFfH+kIsFkMmK0B0zD7aI+SzZ81odQ9CI0suoNkzgVVVVXAe7KodthrwcObUCTj0taeVnTN/Ia0x2wpfH2/s2/MDIUO5hoCpQe16CooK/fo54vSJOMb7IUygcw+GNGL50iWMjA8A4Z+HtYjWOwIzM1N8/uknRBm3EcMx870gjRJ+6+YvNGr8lz4IpMLUKf5YujiYsby1tRTRURG0J4iYFq2io75n9FhY2Mb18B7t1XHDcLnYuiVca9G/zhDA2FiI9R+vw/ZtWxid4m2Kke5u+OXgfkYncqhgZ9cbRw//ghHDXRnOWgPs2b0L0wLfafc1RSIRIiN2EA+SvrQEEIvFOHn0d8ydMwtSaftLl1KpFRbMn4ukhPNY+EH7H4oYNnQIzsefxLw577fJG1iYm2PVimU4dTyO0Zm+pujSpQu2bgnH3h+jGD3E0ggej4cAfz+cjz+J1yaMfzF1F00Hgc1x89ZtXLmajOyc67hz5y4KC4sgVyhQU1MNLpcHY2MhjIXGkEgs4dDXHg4ODhjpPgJDhwzWSNWueYB64o/TSLp8BTdy81BSXPLv7wKYmZpBIrGEi8tQjHJ3h6/PaI0sH8D/Hxlr/H0AWUHBv78PIBAYQWolhaOjA14Z6Y5Jb0xk/ByFpoJArROAxcsHnc0CWHTiLIAFSwAWLAFYsARgwRKABUsAFtogQPN3zLDQ3xrAcwSge7UYC/1Do825dAxhob+z/zkCsF6g881+gH1tXKeb+cTXxrVGApYI+uPyGb04kooELPTH7TMiAEsE/TY8CxYAgH8AuU0t46l3asEAAAAASUVORK5CYII=", + "480p.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAJ70lEQVR42u1daViTxxZ+Q4KaQMRqE6BaEBdA8C5dFDfs4xVwoxbhIkpFr16seqsVlEWUVnFBECilLlS9WuVRFBeEKl4WsVogICqogIqiRCsqYkuBsMUk3B8+9VHzbUDCEr7355yZOd83552Zc87MlwAsejQ4dBWMzaxa2GHqvqh8WMppEwFYw/cMInCYGP/+nRJ2BLshhlja0pKAQ2V81vC6R4S3ScAhMj5reN0mwusk0GP3/J6H122t97aQnf26CyLb6rGzv2evAnrs7O/Zq4AeOyQ9GywBWAKwYAnAoseC1xNeUi6XI0eSC0luHu6W3cP9+1LU1NSgvqEBL168gIDPh8BAAGOxGEOHDsEIa2s4TJ4Ey+HDNKK/8Np1pGdk4mpBIcqlUlRX//FKr7GxGJaWwzF2jB2cp0+FSCTq0LHhvB4SdEYUEBEVjZjtu2jrSctug8fjtqrvmpoa7PxhD+LiDkNWX9/qZ7OyssSqlSswfdoUcDicVre/cPEXbA2PRMnNW4zqc7lcuLp8hkD/VTAxMdbamL+eEexUAty4UYSZru5QKJQaJ0BB4TX8e/EyVD1/3u7ndHKcjB0x0RAI+IzqKxQKrPsmBIfjj7ZJX9++ffFd1DY4OU7WOgE6zQeQy+XwWR3AyPitRWnpHcye66UR4wNAekYmFi1eipaWFgbGV2LR4mVtNj4A1NbWYvHSL3Eq6SfddQK3RUbjzt0yrfTtszoATU1NGu0zO0eC+CMJtPVCNofi/M8X2q1PqVTC1y8Qhdeu6x4BrlwtwJ7/7tdK39ev30BRsXa2ssM0BLhaUIiDcYc0pk+hUMB/zTqtrJKdRoDGxkb4+gVCpVJppf+c3Eu0dRwmT0JCfBwKL0twr7QYORczEbo5BKamJtQ+S1ExqTOpUqkQEBRM+V4D+vdHRNgWFORLUHa7CJlpKVjg9Tmlztu3S7F334+6Q4DQsAiUl0vVyvX19TXSf+WzSkq5y0xnHNi3B+PHjYVIJELv3r1hbm6G+fM8kZx4jNbRe/qUuP+sHAlKS++QtjMwECDpZALmzpkNsViEPn36wMrKEls2bcDaQH9KnfsPHNTahOlQAkhy83CAYImcNtUJI6ytNKJDpaJ21Nz/6Uoqe8/UFOPGjmmT3oSEE5TyL5ctgYXFYELZ0iXeGGJhQdr2yZOnyMrO6d4EkNXXY5XfGjVP+t0BAxAWukljegabmbWrPZWnz+Nx8Z6pKeG2lpqeQU08N3Li6enpwXXWZ5TtE7UUEXQYAUI2heJRRYVaedjWTRjQv7/G9EyaNJEyaXP8RCKprOLxY+TmkfsQY8fYEW4RxSU3IZfLSduZmb1P61+MHvURbV6j2xLgwsVfcOToMcJZMdXJUbNJDgsLzHSeQSpP+ukMFnovQY4kF1XPn0Mul+Phw18RdygeLm4eaGhoJJ2lPl+tIJQV00QdtjYjaJ+bro5U+qBN2Uw6aP0soLa2Fn6Ba9XKTU1NsHFDsHYczc0bUHbvHmkKNuPceWScO9+qPteu8Yfd6I+JCVByk7It3ewHACMjI/D5fDQ2NpJuTSUlt0ifocuuAMHrN6p5zhwOB1HbwiAUCrWi08jICCeOxcPD3a1NOfy3fZQfdn6PpV94k0cGlc8o+xAzPOARid5tV4TT5QiQmp6BxFPJauULvD7HRPvxWiWe0NAQURFhyExLwcIFXujVq1er2g8fNhSbN65HbvbPcJ4xjdrBlcko5QYGBox0Ggio68nqZN2HAL//Xo01QV+re+mDzbEuKKBDHM86mQxnU9ORce48pZNGhHv3y5GWnoH8K1fp9dTVUcp7MyRfr97U9Wpp9HQpHyAo+Bs8/+23N8q4XC5ivo0An8/XuvEvZmXDx9e/zQdCKpUKWdkSZGVLMMXRAd9GhsHIyIiwLpnj+Oq9ecyGWZ+mXkNDQ/dYAZJPn0HK2VT1hMcX3vjoww+0bvz/paZj/r+8NXYamJZxDh6e80lnOp/fh7K94sULRnroVik+X9D1CVBVVYXgb0LUyq2treC3aqXWjf+oogI+q/2hVJIfoHzqPB0nEg7jVlEBpGW3cDk3C9FR4TA3J08iFZfcxDqC9wJA68wy3X6aaer1FRp2fQIEBAWjuvqPN/cZHg/fR0dqLN9PhZjtu1BfT75U+q/2ReyOGIyxGw2hUAgejwdTUxO4u7ki9UwShg8bSto28VQyYb6fjgB1MmbOW72MOs4X9hV2bQIoFErC+HqVzwrYjLDWuvGVSiVOn04hlZuZvY/l/1lCPsBCIYJoDmZOJqlHNcZiEc2qyGwrotuyjMXi7pcIAl5e/tgWGd2uPgYPe5NA8zznqJ0h3L1bRpkts58wHlwu9bUy+wnjKOUFBdfUykba2uIYRYr58eMnjKKm5uZmUjmHw4ENg4xil0sEdSQqq6oo5UzOHPh8PmWU8uyZuo6RI20o+2RyKZQum2hubgahoSFLAErQ3Nmrrq6m7aKpqYnyOlkLWghXAKpE06OKCsKDsNdxKT+fUv7hB3/XypDpFAH608zwrBwJ7cWKrGwJ5ZEw0SoiEPAxxcmBst8TJ5MofadTyacp27u6zGQJQIdBAwdS5v6l0geI3b2X3Fuvq8PW8AhqHYMGEpZ7uLtRtovdvQf3y8sJZdt3xuLhw19J25qYGMN+wniWAHR4551++Ntf/0JZZ2t4JJZ/5YtL+ZdRJ5NBoVDi6dNKHD+ZiKnOLrQ3lSd9MpGwfKL9BMovierrGzDLbQ6OHD2GqqoqNDc3o7T0DgLXfo2o6BhKnQsXeNE6r21Fp38Z9CemfzoLN4qKyWcvww9D9u0/iPUbN2vlGQ0NDJCXcxH9+hGnhPMvX4HbbE9G3w8whZWVJdJSksHjaS5g6xIfhmgL8708Ke/XtQc+K5eTGh8ARo/6GPPneWouRudxERG2RaPG1+0oAC9vF+/a8R3pwU1bMcXRAd6LFtLWC1kfjE8m2rffMHp6iAgP1Zr3r7MEeBmW2SAhPo70Fm5r4eHuht2x2xltQTweDwf27cZcD/c26xMKhdgTu53yIilLAAYkyExLQYCfL6MrWWrOEYcDu9GjcPzoIURFhLVqGdbX10dEeCjiftzbqhQ4l8uFm6sLzqefxdQpTh0yTjrnBBJBpVIh71I+8i5dxs1bt1FWVoaa2jrIZDLI5XIIBAIYGhpAJBLBxtoKtrY2cHT4BwYNHKiRd7taUPjq9wGkDx68+n0APr8PjMXGsLQcjnFj7OA8YxrEYu3/PkCX+TycRedAp6MAFqwPwIIlAAuWACxYArBgCcCCJQALxgR4+z9mWOhuDuANAtD9tRgL3cOfNtejYwgL3Z39bxCAXQV63uwH2L+N63Ezn/Jv44hIwBJBd5Z8Rn8cSUYCFrqz7DMiAEsE3TY8CxYAgP8Dbz/IHbAxXVkAAAAASUVORK5CYII=", + "4k.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAFEUlEQVR42u2da0xbZRjH/6cBFijQZY4MLJnLPrjEjbszMeF+26bR6Jx+UO7bzBK/KZeuXaGs0IG6mLkBhUIpUAqILDNq5u3DvGzcyk0zFzHZkmlcnLGLlUsIo/XDXAK0cA7sQ99z+vw+vufl5Mn7/70PPSenPQDh13B8E3bs3OOmZRIvf97+hduUABS8f4jACQn/5vR1WkERsvvJvbwScOuFT8FLT4TVEnDewqfgpS3Ccglk9D/f/1ietWz1Qdr90sVbtjLa/f7dBWS0+/27C8hoSfwbEoAEIEgAwm8JEFOx9+8v4fkXX8b1n2/wzh0d/B5RUZGCzvvCS0cwMTm15vFopRJDV68IrnNicgqv5xXh35mZdefF7NuL3u4OKBQK6gBCuNBoFBS+LxmfmBRN+KISYHr6V5w738B0jWPjE3gjv5g3/NiYfUyELxoBlpaW8HaZCouLi8zWaB8bFxx+j9XCRPiiEaDZZMbk1I/M1jdqH0NeQQlmZmfXDz82hpmdL5oPgTdv3cLZD86xHX5hCWZn5/jDt1oQHh5Ol4FCcblcKC1XY2Fhgcn6hkfsgsKPi4tlMnzmBWi3dGFk1O4x/nhUFBPhFxQfFXX4TAtw+7ffUf/+WY9xjuNgqKn2cfijyC/i3/nx/4cfFhbG7CZjUgC3242yCjXm5uY9jhUV5OGZ/Uk+q21oeAT5RUe91rY6/B7Gw2dWAKutF1evDXqMRyuVUJWX+qyuwaFh5Bcd4w0/IT5OFOEzKcAfd+6g1lDv9Vj9GT3k8hCf1HVtcAgFxccxP88fvk0k4TMpQLnqlNfr6VdfOYy01BSf1OS450BhyZu84ScmxD8IPzQUYoGp+wB9/QO48u13HuMR27ejSqv2WV18Lf9h+N1d7aIKn6kOcPfuX6jWG7weq9XrsHWrgtlFTEpMEGX4THWAk5pKOJ1Oj/FDB3Px3KEDzIcfKpdDjDDRAS598im+/Pobj3GFQgGDXsf0Au7a9QRCgoMhVnwuwN8OByqr9V6PVWnViIiIYHoBBy5eQmn5SbhcLhJgM2i0Ojgc9zzG01KS8dqRw6JYxI8+voiyCjXcbjcJsBEuf/EVPvv8sse4XB6C+roaUS1kX/8AylUa0UngUwHW+tSvKnsH0UolM4sUHByMLVu28M7r6etHhVorKgl8KsA/Xj71P52UiMKCPKYW6bFt22BuNQqSwNbTB5WmUjQSMPdAiH1sHDt373nk8+x/duVdw8jIHbAP/bDp86WlJMPcakTJsRO8zyd023rBAThTexocx1EHkAppKckwm5oQFBTEO9dq64VGq2O+E5AAG5UgNUWwBJ1WG05VVpMAUiM9LRVtLY2CJOjo6mZaAhJgk2Skp6G1uUGQBJZOK7Q6PQkgNTIz0mEyNiAwMJB3brulE1Wn2bu34dOrgBs/jW/q75xOJ56KXf+xsI18N/BRyMp8IMHxE2/xfnGlzdwBDhx0lRrqAFIiOysDLU0XBHWCVrNlzRtgJICIycnORHPjeQQE8DdVU1s79LV1JIDUyM3JQnOTMAmaTW2oWePZRxJAxBzIyYax8UNBEhhbWmGoe5cEkBoHc3MES9BoNMFQ/57PauUA+olYf2P5T8ZSB/BzSAASgCABCBKAIAEIfxdg9TtmCOleAq4QgO/VYoT0eJi5jM8QQrq7f4UA1AX8b/cD9No4v9v56742zpsEJIJ0Wr6gF0euJQEhnbYvSAASQdrBEwQA4D8iUuD1D944jQAAAABJRU5ErkJggg==", + "5k.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAGHUlEQVR42u2daUxUVxiG33vZMggzI9A0FlPRtoKgwAxWsQIDsrRGmzRWTVNFBZe62xKtC4uiVYRS09ACg2wDDIt1qWlN0x82Kq3KDtpoghWaiK3BtqBxFtdMf9gmanTuBWbg3jvf+3e+e843533Oueece5IDkBxaDFfAy6/6W6iZxKvea53MoAAg4x0DBIaP+d1XLlELilATJgZxQsBYM5+Mlx4Iz0LAPM98Ml7aIDwJAUvvfMfTk16zz/5IvV+6ep63LPV+xx4FWOr9jj0KsNQkji0CgAAgEQAkh5WzEJLouf4HZkRE27zcA7nZWDh/Hmfcu+/NR3vHhRf+PtbXFw1nT/Out73jAj5cvAx3DAarcVMmB6GuugIKhYJGAKmorb1DNOYTADZWa1s7FiUmcZofPGWyIMwnAGyoltY23ubX6nWCMJ8AsJGaW1qxeEkyDEajdfODpwim5wtqEih685cmw2g0cZuv10Eul9MqYCB6c2oYvj1SJ8jcGptasCRpOaf5ISHBqK0qF5z5NAIMk/l1eh08PT1pH0A65jcjcdlymExmq3GhIcGoFbD5NAkchBoamyRjPgEwQJ1vaETishWc5qtCQ0RhviheAb29vUjfuRvtHRfw540b6O+/BRcXZygUCigVSgQGBiBMrUJUxEz4+Y2zWx7nzjdgafIqmM3c5tfodfD08BAF1Aww8odBbfEtgGEYaKIisW7NKswInz6gZ7m+Bbi7y2CxgNN8tSoU1VXlgjf/ycOhknkFWCwWnD5TjwUfLEb6zt24d++ezco2mcySMd8h5gDlFVVYtCQZ9+/fH5b6wtQqUZov6UlgQ2MTNqVsIfMdeRXw/YkfcPKnU3atw89vHNxlMtG2keBWAc7OTghTqxAxcyYmBwXCf+IbGD1aCZnMHQaDAV3d3aj/+Sz0NbXo7b3JWV5WTi7iYmPslu/RY8fhxLLIzckCy7IEwOBm8I/3/BfOn4e5c2a/cP2sVCoQplYhTK3CyhVJ2LY9Dce/O2G17M7OK+jsvAJ//4l2y/+bI8fAMAxyc7LAMAwBMFCN9fUd8Acfj1GjkPflF7j51984d77BauypM/V2BQAADh0+CpZlkbN/r6ggEPUcgGVZfLr5E864zs7fhlSPTCaDm5sbZ1ztocPYuiMdFouFABguhalVcHe3Pgn7p69vSHV4e3mhrETLC4Ka2kPYlpohGghEDwDDMFAqlFZjbt++PeR6NJERvCGorqnDdpFAIHoALBYL+m/dshqjVNrmCJYmMgJlxYVwdXXljNXX1CE1fZfgIRA9AM0trZzbtC/5+NisPk1UJG8IKvU1SMvIJAC4hsus7Fz09fUP+NlHjx4h+/MDnHFBgZNsmnO0JgqlBwt4QVBRVS1oCEYcgDsGA/ILixAeoUFaRiYuXvyV13MGoxHrN6WgsamZMzY+PtbmecdEa1BSlM8LAl2lHum79tA+gDWZTGboKvXQVerx+msTEBUVgWlTpyIgwB8+3t7w8PCA0WjA1a5unKn/BdW1dbx2At+aEY6xvr52yXlWTDSKtflY8dFaPHjwwGpsua4SLMsgMyONAODS1a5uXO3qRll55ZDKcXJyQubOVLvmGjvrMQQrV6/jhKC0rAIMGOzKSBVMW0v6Y9DmlE2YFBBg93riYmNwsPBruLi4cMaWlOmQuWcfAWBvbVy/FhvWrRm2+uLjZqGo4Cs4O3MPqsWl5dizdz8BYA/5eHtDm5/Ha4vY1kqIj0VRIT8IiopL8dm+bAIgaWkiyoq1WPD+PHh5jR50Oa+MGYOUjzfg1MkfMXfO7BH7P2/Hx0FbkMcLAu3BEuzbn+PYk0A3NzckxMci4b+lWlf372hpbcOly5fR03Md13quo6+vH2aTCea7d+Hq6gq53BMKuRwTxo9HaGgw1KpQhE+fJpjv8e8kxENbkIfVazfi4cOHVmMLtMUAw2DH1i0jkqsgTgWThleSPBVMokkgiQAgEQAkAoBEAJAGA8Czd8yQpLsEfAoArqvFSNLT/56zXISQpNv7nwKARgHH6/0AXRvncD3f6rVxz4OAQJDOkM/r4sgXQUCSzrDPCwACQdrGk0gAgH8BlyY53hxDwyoAAAAASUVORK5CYII=", + "6k.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAHoklEQVR42u2de1hTdRjHv2cMBOammz1hASr4pJWAwpCLCQMVsotpmVkpgqBlZfGUeeUiSpH62E2T20Bu4xZCPF0elDLxyh28pKgVWuJjBoiRIIO59Vc9SOyco2zsbPt9/9z7nrP3nO/nd93ZcwAisxbFlGA3brKG3Cbj1fXfL1D3BQAx3jxAoNiY33zxLLmDRijnSVMYIaDozCfGmx4IAyGgBjOfGG/aIPSHgEfGfPNTf695A4Ok9ZuuBvOWR1q/efcCPNL6zbsX4JFbYt4iABAAiMxZfGMoUqlUouLIUVRWVuPMT2dx5UoLbv51E0plL2ysrSEUCTHO0RGTJz0CT6kHAmT+kEjExF0WovrPCLk2CWxtbUVishwFhUX4+9Yt9t0ajwcfby+ELHkFT80NBp9Pz/m8BS+i8eQprXEHe3tUHa9g/f2NJ0/h1aVhjDW7ukxBQW4WRo0aNaz3tf+GEGeHAEVeAfwDgyFPz7gn8wFArVbjRGUV3lgdiQPlPwxr3Q2NJzltPueHAJXqDtau34Si4hKj607rGxqxdFk4o/luri7IV2Qa3HzOAaBWq7E68l18+12Z0ZlfV9+ApcvCcaury2jM59wqYOcnnxul+bV19ezMd3PlRLfPyR6gtq4eu/ckMeZNdHbCy4tfgq+PFxwdHTBKJEJ3dzfa2tvx09lzqKtvQHn5QbRcvTp85oeGo6urm9l8RSZEIhFZBg6URqPBpug4aDTaf5KwtLREbPRGhIYsAY93d8clEokgEong7OSE5559Bls3x6CuvgGZWTmwtLTUW93VNXVYtjyC0fypU92Qn5PBOfM5A0DZ/nI0nT9Pu6xLTfoCQXNmsT6np9QDnlIPTphfoMiEUCgkG0HalJWTSxtfGbH8nszXt6prahESFoHu7tu0edOmuiGfw+ZzYhJ4/fqfqKyq1hoXCGyx+s1VnLlhVdU1JmM+JwA4fPQY1Gq11nhggAxi8WhO3KzKqmqEhK1gNN992lSjMJ8TQ0BNTS1tPEDmDwBov3EDxSWlOFFZhaamC+i42YE7d9SQSMSwe/BBeE33hN/MGZD5+/1vkqgLnaisQmj4a7h9m9n8PEUmhCNHGsUS1uAAnDt/gTbuYP8wtsQnIFuRB6VS+b/4tWt/4Nq1P3Dy1Gmkpu3FRGcnvBv5NhbMn6ezGm903GBlvof7NOTmZBiN+ZwA4PLl32jjb0euQWtbG+vz/dp8Casj38Ohw0ewPSEe1tbWQ66Rqcs3VvMNPgfo6elBZ2cnbc69mN9fxSWlCIt4HSrVHb1fh9TD3SjNNzgAnZ1/6/X8x46fwOYt8cR8rgKg7O3V+3dkK/Jw9lyT3s4/YcJ42NrYwFhlUAD4Fhasc4VCIWKjN6LyWAUu/9KEM421kCfvweTJk2iP02g0+OTTXXq7huKSUry/biPtUpZMArWZKmK3Tubz+SjMzYKbm+t/n4nFo/HU3GD4+T2B5xa8iIs//6L1+IojR9HT06OTCeFg+nJfCSiKws4dH4GiKNIDsJXA1pbVmn3RwufvMr+/RgoEWL9uDf1Qo1SiurZOr9dSWFSMdRuiaH/QIgAMEEVRrHbLZs8OpI0HyvwZn/u7cqXlvuu0sbHBiBEjGPPyC4uwflOMUUFg8K3gic5OjDnOTvQ5VlZWcHCwp81pa2u/7xrHSCTYm5bMCoK8/EJsiIo1GggMDoCbqwtjjq0t8yybaSY+1LFZ5jeTNQS5eQXYaCQQGBwAF5cpjDlsduK6GbZpHxgjGXKtMr+Z2CtPgpWVFWOuIq8AUTFxnIfA4AA8McOXsXU2X7pEG+/r60NLC/0jYHZ2djqpV+bvxxqCbEUeomO3EADo5OhgD18fb9qcgwcP0cZ/rDgMlUpFs4y0gNd0qc5qDpD5Iz01kRUEWTm5nIaAE08FL160kDZeVPwVTp8+M2jsVlcXtu/4mPZ4qYe7zp/HCwyQIS1lDysIMrMViImLJwBo07xnn6ad6atUKixeEgp5egZarl6FSqVCR8dNlO0vx/wXXqLdBAKAFeHL9VL3rMAAyJP3sHrwNCMzG5u3fsA5ADjz38Cq6hosenmpzidNHu7T8PVXRfQADvG/gT8cPISVq95CX18fYz0rwsMQFxtlUNM5+d9AH28vhIYs0ek5JRIxdn/2sd5rnzM7EKlJX7DqCdL2ZmJLfAIZAgZTXGw05gYH6eRcIwUCZKSlYPz4ccNSe9CcWUhJ3M24IwkA8vQMxH+4jQAw2Gw9JWk3Xl8ZMaSNm8cfexRl35ZC6uE+rPUHB81GShI7CFLk6fggYTsBYKAsLCwQE7UBJUX58PbyvKdjHeztsXVzDL4p3QcnpwkGqf/JoDlITtzFCoLk1DQkbNth2EbH1fXpdE8pir/Mx8WLP6PswPeoratHc3MzWtva0dvbC4FAALF4NB4aOxbTPaXw9fHGDF8f8PkWBq99bnAQkhN3YdWb79DuTwBAYrIcoChsWr/WvFcBRGa+CiAicwAiAgARAYCIAEBEACAyBAAD3zFDZLpLwLsAYHq1GJHp6V/PeUyEEJlu678LANILmF/rB8hr48yu5dO+Nm4wCAgIptPls3pxpDYIiEyn22cFAAHBtI0nIgIA/AOfr9sHu3uhowAAAABJRU5ErkJggg==", + "720p.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAJBklEQVR42u1da1BURxb+mBkIwzCwwQgiCmpkMIAPMKJJfKJBVEqRKXwgID5YpDRlTDSsrtaacvFtmY27MQEVJGh4iialERCJQY2igEZRIa7yFBXlOYCDM8z+2MJSM7fvhZkBufT3c87pPn3v+frcPqd77gUoejWM2BRs7J009Db1XDwqKzLqFAGo43sHEYy4OP9ecSG9gz0QQ2QurCQwIjmfOp5/RHidBEbanE8dz28ivEwCAX3m9z687GvB60I6+/kLbb4V0Nnfu6OAgM7+3h0FBPSW9G5QAlACUFACUPRaiPh+geUVlcjLy8fNwlsoKSnF/dJS1NTUoqlJAaWyFRKJBJYWUtja2mL4cBe4u42C17SpEIvFehtDwbXryMjMQl5+Ae6XlKC2tg7Pnz+HmVgMGxtryGSO+GDcWPjM9Ebfvn279P4YvZwS6CMLaGhogPOI0QYd9JXfcmBr20+rrLW1FecvXER6Zhayzmbj4cNHHe5fam4OuZ8vPl+zGm+//ZdOj/OXc79i247dKLx1m5O+UCiEn+8cRKz7DP362Rjs/r1cEeTdIyAu/iiCl4TiyNGETjkfABoVCsTGxWPy1Ok4k5Xd4fYqlQoRGzYhcPEyzs4HALVajeTUY/D0momMzCy6BuhuPK2pwfKwcPx8OqMDzldjaWg4jhxN0CmKhq5YibTjP1ICdDdUKjU++fRzlFdUctL/8p9bcTb7F53tqtVqrFkbgYJr1ykBXofJWyZdau/Zs2fYErmNVS8vvwCH4+L1SD4V1v3t71Cp1JQA7Zg0cQL6WFl1qI272yisWb0KyQnxuHzxHO4VF+L2zQKcOJaEJSHBEInYk6GMzCw0NDQwytva2vDF+o1oa2tj1OljZYVd2yORn3sRd+/cQFb6SSwOWkS0e+dOEaIPxvScNNDCwgIVJX90qq1Go8FUr5ko/uMuo0542HJuK3mpFIsWzsfCBfPw7pDBf44iJiYY7e6G0e5u8POdjYDAEDQqFMTZ+GvOBfjMmqFVnnPhIoqKihnbSyRmOJ6aiMGDB734zclJhsgtm2HXvz+27tjF2PZQ7GGEhS6FQCDgdwQ4m32O6HxXF2eM/+hDYh8SiQSfrAzHpfPZ2LghQqvzX4fbqJGI3LKZVa+svIJRlpiYQmy7MjzsFee/jBVhyzFkMPM4q6oeIuf8Bf4/AvZ/F02UrwgLZe1j4Xx/RKz7DJaWlh2yPWe2D2ubp0+fav29paUFpzMyiW395X7MThAI4Dd3DrH9MQNlBG8MAX7//QYuXc5llA+ws4PPzBkGsy8UCjH03SFEHWNjY62/3yy8hdbWVsZ29vYDGQtX7fAYQy6e5Rdc4zcB9kcdIMpDly2BSCQ06BhaWlqI8oED7LQT4Ca5guri/B6rbTadkpJSKJqa+EmAsvIKnPo5nVFuaWmJBfP9DTqG5uYW3C8pJeqMGfM+YwQggW32t18jaf9Bo9GgsPA2PwkQfTAGajVzrhscGACJxMygY0hNO06MAK4uzpA5DtUqe/joMbFva44bPH37vkOUP3r8iH8EqKurR2JSMnPRx8QES0OCDDqGqqqH2L5zD1Hn09WrGGUKQvrYnplwgcSMrKdoVPCPAHHxR9DczDzz5H6+Bt0ira+vx9LQFaivr2fUmTXTG95eHzPKGxsbiTbeMuFWuWSrcDaw2OlxBGhtbUVM7PeMciMjI4SFLjOY/ZqaWiwMDMENwiJOJnPE7p3bWNcPxAxDxK3eZsyi19zcrPd70K0HQlJS01D95Amj/ONpnqypWWdRXlGJRUFLcO/+fUad/ra2iIuJhtTcnNiXWGxKlKueP+c8Ich2zPgTATQaDaIOHCIXfv663CC2b92+gzl+/kTn9+tng+SEeAyws2PtTyqV6uTYdihZ9Cyk5vwhQOaZs7j733uMcne3UfBgSLt0wW+XLkM+LwCPH1cTCzepiUfh4GDPqU82AjQquC3emhTkPF9qIeUPAb5lKfyEcyj7dhQnT53GouClxEXbsGFOOJ6SyNn5AGBjTV6kVlc/4dQP6XH4fzvW/CBAwbXryL1ylVE+aJADpntN06vNmMPfI3zVamI4HjfWA2nJP8DaumNZh6uLC1H+4EEVpwWpUqkkLoidOVQUe8QikG3TJyx0mV63Pnfu3ouv//0NUcdn1gx8vXc3TEw6ftjE1dWZKOdyLpCtmujgYM+6GO0REaC0tAyn05l3zvpYWcFfPlcvtlQqNdZ+sZ7V+UtCgvHNvq865fz2CEBqW1FZiYpK8pGyy7m5RLm72yiD+KPLCRB14BDx1EzI4iCYmprqbKelpQXLw8KRkJRCDKvrI9Ziy+ZNOkUcMzMx6yMrJfU4kahpJ34itvfznd3zCVBbW4eklGOEPFeMkOBAne3U1dVjwaLFxCPdIpEIX+3ZiZXhYXq5tvn+cpbHXhRj2rnvP/tRVlZOTEknjP+o5xMgNi6euOEy31+u0x8xAKDywQP4yuchL7+AUUciMcPhQ1GQ+/nq7domThjPuFkEAE1NzZgrX4AfEpJQXV0NpVKJoqJiRGzYhD17/0V+RC0OglBomK1wvf8ziLHIoVTC44OJeFpTo1UuFAqRk50Je/uBBl/wdRYjhrvi1E9pjPLcK1chnxcAjUZ/79twcpIh/eQJTgdXuaJb/hmUnJrG6HwAmOHtpbPzuxseY95HcGCA/lI0kRC7tkfq1fnd8ghoa2tDVDS57BtuoLJvV+PLf2zEpIkTdHeMQIBdO7YabPXfpQTIyDxDrLuPG+uBkSNH8IIAIpEIsQe/w0IdTjBJpVJE7d9HPEjaowjwbdRB8uw3QNm3O2FsbIxdO7YiLiYazu8N49xOKBRC7ueLsxmn4D3dq2sIa2gDefkFuJqXzyiXOQ6F55RJ4CM8p0yG55TJyMsvePF+gJLS0hfvBxCLTWFjbQOZzBEfjhsLn1kzOlyG7jFZAMWbA16/H4DiDVwDUFACUFACUFACUFACUFACULypBHj9GzMU/K0BvEIAtk+LUfAP7T4XsDGEgr+z/xUC0CjQ+2Y/QD8b1+tmPvGzcdpIQInAn5DP6cORTCSg4E/Y50QASgR+O56CAgDwP5RjPmzaHh9CAAAAAElFTkSuQmCC", + "8k.png": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAABACAYAAADS1n9/AAAH0UlEQVR42u1da1BUZRh+zsbKcNXULESTSpZcbl6m20yGwC5eG0vTMQME1LyUhCiSEiShjtnkBQUXl5vcDSVnMhEVEa3kKhoXDbZCHCk0oYFdHAbc7U+XTdlzDuzt7O73/H3f857v+57ne7/rmQMQWDQoJoenn3VTkWYyXXS0/UQNSwCEeMsQAsWG/F+aG0kLmiCeF7gzioCiI58Qb35CeFQE1GDkE+LNWwjqIuCRMd/yoM4171Ej6f3mi8G45ZHeb9lZgEd6v2VnAR5pEssGEQARAIElw4qrBfv99w6cK72AhsYmyGQy3LnzG+QKORSKXgCAnZ0t7Gzt4Ow8Hq6uk+HhLoTY3w/PPPM0YXUIoNRnhFyYBP5wpQJf7juAqupaqFRDW5xQFIWXX5qBzZEReO3VV1g/9+Zb76Du2nWN9gnOzqj4/iLreHXXrmN5YAh65HJaP08PdxTkHsXIkSMN2sbqG0KcGQL6+/vxUWQUlr4bhMqqmiGTDwAqlQqVVTVYsiwQGzdFo7+/3+D1uFp3jdPkc3IIUCqVCAl7H+WXv9NZzMITRfjj/n1kZUhBUZRB6lF7tQ6BwWGM5Ht5eiA/J9Po5HNmEpiemaVT8v9B2cVyZB7NNkgdamqv4r2gUJMinxMCUCqVOJwi1Vt8yZG0YQ0nQ0F1TS0Cg8MgVyjoyffy5ETa55QA6q5dR0fHXVofPp+PkOBAFJ86icbrNbhRfxXFp04iOHA5rKzoR7E77e2or2/QL/krWJKfkwlHR0eyDFTHrVttjD6HDuzF/HlzHptEee6Ix4zp0/BRZBT9O9puw8vLU+dlr6yqQXDoyn+Xpprg7e2F/OwMzpHPiQzQ1fUnrd3FZdJj5Ktj8aK3MG7cU7QxOru6jEo+F3s+ZwTg4OhAa3ceP54xxngnJ1q7o4ODjsmvRlBIGCP5U/8m30HH7zcrAbgLpzCO4Uxr/zvtv9G/w12os/JWVFYhKGQlensfMJKfz3HyOSEA4ZQX8cLzz2m0t7beQvGZsxrtx4u+xr179zTa3dwEELhO1klZr1RUIihkFSP506Z6mwT5nBAARVHY+jH9JO6D8I2Ii09AQ2MTuru70dPTg4bGJsTEbkdUdAzts59s3aKTcv5wpQLBoavx4AEz+XkmQj7AobOAvfsTsXf/QZ3G3BYdhfXr3mf0YzoLsLW1gUoFRvKnT5uK3OwMONjbc5p0Tp4FREaEI+ngPowdM0brWOPGPYXUlCRW5LNBb+8DsyGfc/sA6lj45gIEiPzx1fEiZB7NRovs5yE97+YmQNiKILyz+G1YW1sbrNwzpk9DTla6yZHPiTnAYEusMyVn8cuvrUN+Vib7GafPnEVVda1ByTfFns+5DPDH/fvYuCkaZRfLhx3j4cOHKL90GeWXLmO2WIQvv9iNUaP0u+/u4jIJtjY2MFVwIgPcvXsPCxct1Yr8R1Fy7jwWLVmGzs4uvZb9RNFJbN6yFUqlkghgOFCpVFj3YTjtmYCtrQ0iIzagtORbtNz4EbKb9bhw9jQiI8JhQ9P7mltk2BCxSe91+Op4EaKit+n91NEsh4BT3xajsqpGo93OzhZFhQWP7RgKBK6IFLhC5O+LxUuXa5yll1+6jNILF+HvN0uv9ThWeAI8Hg97du802AUUs8gAeQXHaO3r166h3S728vTAmtVhtDFy8wu0KqONjQ2rVUX+sUJEb4s1qUxgVAEMDDxETW0drc+C+XMZ48ybO4fWXlFZrRUpY0aPRnqqhJUI8vKP4eOYOJMRgVEF0HG3g3GDZdKzExnjTJw4gdbe3d2t9ZGwz8zXWYsgN68AW01EBEYVgEKuYOHFPJ6yGXPlPXKty+sz83WkSw9jxIgRjL45eQWIid3OeREYVQBsDkzabjPfGGpru83oo6sLGT5vzGQtgqycPHwSF08EoAljxzLv+58uLmH0KT5D72Nl9YRON4Rm+byBtCPJrERwNDuX0yIwqgD4fD4EAldan0NJEty8+ZNGe2PTDaRI02hjCIVCnS/NfGf5IDUliZUIMrNyELs9gQhgMIj8fenHboUCCxcvxb4Dh9Dc3IK+vj709fWhuUWG/YlJWLRkGeMFDZGe9gD8fGdBKkkCn89n9M3IzMKnn+3gnACMfh+gtfUWfMVz9fYZl7W1NcovlGCCs7NGH22/DTxfWobVaz9gVYdVYSHYHhdjVNI5dR/AxWUSVq8M0Vv8dWtW0ZKvqyx25PAhVpkgNT0T8Qm7yBCgji2bIzFbLNJ53Pnz5iAifINB6iAW+SEl+SDjhyoAIE3LQMLO3UQA/83SrSBJTkToiiDweNoXicfjISw0GEmJ+2Bl9YTB6hEg9kfKYXYiSJGmYceuz4kA1FcECfFx+ObrQgSI/Vk14mBCChD749TJ4/js09hhxdAWs8UiSJITWb1bciQVu3bvMW7n49qs1NvbC+lSCTo7u1BaVob6hkY0Nd1Ee3s7euRyyP/ePbS3t4ODvT2cnJwgFE6Bp4c7RH6+GD36SaPXYU6AGJLkRKxdH46BgQFa32SJFKAobIuOssxVAIGFrwIIyByAgAiAgAiAgAiAgAiAwJgCePQfMwTmuwT8nwCYfi1GYH74h3Mek0IIzLf3/08AJAtYXu8HyG/jLK7n0/42bjARECGYT8pn9eNITSIgMJ+0z0oARAjmTTwBAQDgL086DChbL3X4AAAAAElFTkSuQmCC", + }; + window.GroupDetailsImages = MAP; +})(); diff --git a/plugins/GroupDetails/screenshot.png b/plugins/GroupDetails/screenshot.png new file mode 100644 index 00000000..600941a7 Binary files /dev/null and b/plugins/GroupDetails/screenshot.png differ diff --git a/plugins/ImageBlackout/README.md b/plugins/ImageBlackout/README.md new file mode 100644 index 00000000..edb8d9a7 --- /dev/null +++ b/plugins/ImageBlackout/README.md @@ -0,0 +1,7 @@ +# Image Blackout (100% Work Safe) + +https://discourse.stashapp.cc/t/image-blackout-100-work-safe/6557 + +Simple toggle button in the navbar that completely hides all images, videos, hover previews, scrubber sprites, and AI tagger images. + +Click 🖼️ to turn black-out on/off. diff --git a/plugins/ImageBlackout/image-blackout.js b/plugins/ImageBlackout/image-blackout.js new file mode 100644 index 00000000..a5d02d53 --- /dev/null +++ b/plugins/ImageBlackout/image-blackout.js @@ -0,0 +1,45 @@ +(function() { + let enabled = false; + const cssId = 'image-blackout-style'; + + const blackoutCSS = `img, video, .card-image, .card-image-container, .scene-card-preview-video, .scene-card-preview-image, .scene-scrubber, .scrubber, .scrubber-sprites, .scrubber-viewport, .preview-strip, .tag-card img, .tag-image, .tag-thumbnail, .sprite, .sprites, .sprite-image, .scrubber-sprite, .scrubber-item, [class*="svelte-1d03wug"], .ai-tagger, .ai-tag, .tagger-preview, [class*="sprite"], [class*="preview"], .performer-card img, .scene-card img, .gallery-card img, .hover-card, [class*="hover"] { display: none !important; visibility: hidden !important; background-image: none !important; }`; + + function createButton() { + const nav = document.querySelector('.navbar-nav'); + if (!nav || document.getElementById('blackout-btn')) return; + + const btn = document.createElement('button'); + btn.id = 'blackout-btn'; + btn.textContent = '🖼️'; + btn.style.margin = '0 4px'; + btn.style.padding = '2px 8px'; + btn.style.border = 'none'; + btn.style.borderRadius = '4px'; + btn.style.cursor = 'pointer'; + btn.style.backgroundColor = '#6c757d'; + btn.style.color = 'white'; + btn.style.fontSize = '16px'; + + btn.onclick = function() { + enabled = !enabled; + if (enabled) { + if (!document.getElementById(cssId)) { + const style = document.createElement('style'); + style.id = cssId; + style.innerHTML = blackoutCSS; + document.head.appendChild(style); + } + btn.style.backgroundColor = '#28a745'; + } else { + document.getElementById(cssId)?.remove(); + btn.style.backgroundColor = '#6c757d'; + } + }; + + nav.appendChild(btn); + } + + createButton(); + const observer = new MutationObserver(createButton); + observer.observe(document.body, { childList: true, subtree: true }); +})(); diff --git a/plugins/ImageBlackout/image-blackout.yml b/plugins/ImageBlackout/image-blackout.yml new file mode 100644 index 00000000..1ef2efe7 --- /dev/null +++ b/plugins/ImageBlackout/image-blackout.yml @@ -0,0 +1,7 @@ +name: Image Blackout (100% Work Safe) +description: Toggle button to completely hide all images, videos, hovers, scrubber sprites, tags, and AI previews +version: 1.0 +url: https://discourse.stashapp.cc/t/image-blackout-100-work-safe/6557 +ui: + javascript: + - image-blackout.js diff --git a/plugins/PythonToolsInstaller/PythonToolsInstaller.py b/plugins/PythonToolsInstaller/PythonToolsInstaller.py index da1f99f8..b2ea75e2 100644 --- a/plugins/PythonToolsInstaller/PythonToolsInstaller.py +++ b/plugins/PythonToolsInstaller/PythonToolsInstaller.py @@ -51,8 +51,8 @@ def get_download_py_stashapp_tools(PLUGIN_DIR): # venv/lib/python3.11/site-packages/stashapp_tools- - src = f"{used_dir}/venv/lib/python3.12/site-packages" - destination = shutil.copytree(src, org_packagedir,ignore_func,None,shutil.copy2,False,True) + src = f"{used_dir}/venv/lib/python3.13/site-packages" + destination = shutil.copytree(src, org_packagedir,ignore_func,None,shutil.copy2,False,True) fp = open(f'{used_dir}/copydo.txt', 'w+') fp.write("%s\n" % print(destination)) diff --git a/plugins/PythonToolsInstaller/PythonToolsInstaller.yml b/plugins/PythonToolsInstaller/PythonToolsInstaller.yml index f8e750ae..6378fe7d 100644 --- a/plugins/PythonToolsInstaller/PythonToolsInstaller.yml +++ b/plugins/PythonToolsInstaller/PythonToolsInstaller.yml @@ -1,6 +1,6 @@ name: "Python Tools Installer" description: Download stashapp-tools for DockerEnv -version: 0.1.2 +version: 0.1.3 url: https://github.com/stashapp/CommunityScripts/tree/main/plugins/PythonToolsInstaller exec: - python diff --git a/plugins/SFWSwitch/README.md b/plugins/SFWSwitch/README.md index 874e2f78..02cb3507 100644 --- a/plugins/SFWSwitch/README.md +++ b/plugins/SFWSwitch/README.md @@ -9,6 +9,8 @@ https://discourse.stashapp.cc/t/sfw-switch/4658 - Gray = Blur disabled - Toggling the button blurs cover images and other content. - Hovering over an image temporarily removes the blur. +- Extends the blurring functionality to some community plugins. + - Custom selectors should should be added to `additional_plugins.css` file. ## Screenshots @@ -17,4 +19,4 @@ https://discourse.stashapp.cc/t/sfw-switch/4658 ## Credit Original plugin by Belleyy [here](https://github.com/Belleyy/CommunityScripts/tree/pluginUI_SFWSwitch/plugins/SFW%20Switch). -The CSS code used is provided by fl0w#9497 [here](https://discourse.stashapp.cc/t/custom-css-snippets/4043#p-8143-blur-nsfw-images-and-unblur-on-mouse-over-41). \ No newline at end of file +The CSS code used is provided by fl0w#9497 [here](https://discourse.stashapp.cc/t/custom-css-snippets/4043#p-8143-blur-nsfw-images-and-unblur-on-mouse-over-41). diff --git a/plugins/SFWSwitch/additional_plugins.css b/plugins/SFWSwitch/additional_plugins.css new file mode 100644 index 00000000..da05c382 --- /dev/null +++ b/plugins/SFWSwitch/additional_plugins.css @@ -0,0 +1,94 @@ +/* [Global changes] Blur NSFW images and unblur on mouse over */ + +/*Credit: fl0w#9497 */ + +/* === MORE BLUR === */ + +/* Stash Battle */ +.pwr-scene-image-container, +.pwr-scene-image-container, +.pwr-hover-preview, +.pwr-scene-image-container .pwr-scene-image, + +/* HotOrNot */ +.hon-performer-image, +.hon-performer-card, +.hon-scene-image, +.hon-selection-image, +.hon-image-image-container, +.hon-image-image, + +/* Deck Viewer */ +.swiper-zoom-container, +.gallery-cover-link, + +/* O Stats */ +.custom-stats-row .stats-element img, +#on-this-day-section [style*="position: relative; height: 400px"], + +/* Sprite Tab */ +.sprite-cell +{ + filter: blur(30px); + transition: filter 0.25s ease; +} + +/* === LESS BLUR === */ + +/* StashBattle */ +.pwr-scene-info, + +/* Deck Viewer */ +.gallery-cover-title, +.gallery-performers, + +/* HotOrNot */ +.hon-selection-name, +.hon-performer-info.hon-scene-info, + +/* O Stats */ +.custom-stats-row .stats-element, +#on-this-day-section [style*="display: flex"][style*="cursor: pointer"] img, +#on-this-day-section [style*="display: flex"][style*="cursor: pointer"] img + div, +#on-this-day-section > div:last-child +{ + filter: blur(2px); + transition: filter 0.25s ease; +} + +/* Deck Viewer */ +.swiper-zoom-container:hover, +.gallery-cover-link:hover, +.gallery-cover-title:hover, +.gallery-performers:hover, + +/* StashBattle */ +.pwr-scene-image-container:hover, +.pwr-scene-image-container:hover .pwr-hover-preview, +.pwr-scene-image-container:hover .pwr-scene-image, +.pwr-scene-info:hover, + +/* HotOrNot */ +.hon-performer-image:hover, +.hon-performer-card:hover, +.hon-scene-image:hover, +.hon-image-image-container:hover, +.hon-image-image:hover, +.hon-performer-info.hon-scene-info:hover, +.hon-selection-card:hover, +.hon-selection-name:hover, +.hon-selection-image:hover, + +/* Sprite Tab */ +.sprite-cell:hover, + +/* O Stats */ +.custom-stats-row .stats-element:hover, +.custom-stats-row .stats-element:hover img, +#on-this-day-section [style*="display: flex"][style*="cursor: pointer"]:hover img, +#on-this-day-section [style*="display: flex"][style*="cursor: pointer"]:hover img + div, +#on-this-day-section > div:last-child:hover, +#on-this-day-section [style*="position: relative; height: 400px"]:hover +{ +filter: blur(0px); +} diff --git a/plugins/SFWSwitch/sfw.css b/plugins/SFWSwitch/sfw.css index a8cc23e3..3f6719fb 100644 --- a/plugins/SFWSwitch/sfw.css +++ b/plugins/SFWSwitch/sfw.css @@ -1,116 +1,175 @@ -/* [Global changes] Blur NSFW images and unblur on mouse over */ - -/*Credit: fl0w#9497 */ /* === MORE BLUR === */ +/* common */ +.thumbnail-container img, +.detail-header-image, +.wall-item-gallery, + /* scene */ -.scene-card-preview, -.vjs-poster, -video, +.scene-player-container, .scene-cover, +.scene-card-preview, .scrubber-item, .scene-image, +.scene-card img, +.wall-item-media, +.wall-item.show-title, /* image */ -.image-card-preview, +.image-card img, +.image-thumbnail, +.Lightbox-carousel, .image-image, -.gallery-image, +.react-photo-gallery--gallery img, /* group */ .group-card-image, -.group-images, /* gallery */ +.gallery-image, .gallery-card-image, -table > tbody > tr > td > a > img.w-100, +.gallery-card img, +.gallery-cover img, +.GalleryWallCard.GalleryWallCard-portrait, +.GalleryWallCard.GalleryWallCard-landscape, /* performer */ -.performer-card-image, -img.performer, +.performer-card img, /* studio */ .studio-card-image, +.studio-card img, /* tag */ -.tag-card-image - +.tag-card img { -filter: blur(30px); + + filter: blur(30px); + transition: filter 0.25s ease; } /* === LESS BLUR === */ + /* common */ .card-section-title, +.detail-item-value.description, +.detail-item-value, +.TruncatedText, /* scene */ .scene-studio-overlay, .scene-header > h3, h3.scene-header, -.studio-logo, -.image-thumbnail, - -/* image */ -h3.image-header, +.TruncatedText.scene-card__description, +.queue-scene-details, +.marker-wall, -/* group */ -.group-details > div > h2, +/* performer */ +.performer-name, +.card-section, +.name-data, +.aliases-data, /* gallery */ -h3.gallery-header, +.gallery-header.no-studio, +.TruncatedText.gallery-card__description, + /* studio */ -.studio-details .logo, -.studio-details > div > h2, +.studio-name, +.studio-overlay a, +.studio-logo, +.studio-parent-studios, + +/* group */ +.group-details > div > h2, + +/* image */ +h3.image-header, +.Lightbox-carousel:hover, +.TruncatedText.image-card__description, /* tag */ -.logo-container > .logo, -.logo-container > h2 +.TruncatedText.tag-description, +.tag-item.tag-link.badge.badge-secondary, +.tag-name { filter: blur(2px); } + /* === UNBLUR ON HOVER === */ + /* common */ -.thumbnail-section:hover *, -.card:hover .card-section-title, +.detail-item-value:hover, +.scene-cover:hover, + +.card-section-title:hover, +.TruncatedText.tag-description:hover, +.detail-item-value.description:hover, +.TruncatedText:hover, /* scene */ -.card:hover .scene-studio-overlay, -.video-js:hover .vjs-poster, -video:hover, +.scene-player-container:hover, +.scene-card-preview:hover, +.queue-scene-details:hover, +.scene-card:hover img, +.TruncatedText.scene-card__description:hover, +.scene-player-container:hover, .scene-header:hover > h3, div:hover > .scene-header, -.studio-logo:hover, -.scene-cover:hover, -.image-thumbnail:hover, -.scene-card-preview:hover, -.scrubber-item:hover, -.scene-image:hover, +.wall-item-media:hover, +.marker-wall:hover, +.wall-item.show-title:hover, /* image */ -.image-image:hover, +.detail-header-image:hover, div:hover > .image-header, -.gallery-image:hover, +.image-card:hover img, +.react-photo-gallery--gallery img:hover, +.image-thumbnail:hover, +.TruncatedText.image-card__description:hover, +.wall-item:hover img, +.image-image:hover, + /* group */ -.group-images:hover, -.group-details > div > h2:hover, +.group-card:hover img, /* gallery */ -div:hover > .gallery-header, -table > tbody > tr > td:hover > a > img.w-100, +.gallery-header.no-studio, +.gallery-card:hover img, +.gallery-cover:hover img, +.gallery-image:hover, +.gallery-card-image:hover, +.TruncatedText.gallery-card__description:hover, +.GalleryWallCard.GalleryWallCard-portrait:hover, +.GalleryWallCard.GalleryWallCard-landscape:hover, /* performer */ -img.performer:hover, +.performer-card-image:hover, +.performer-name:hover, +.card-section:hover, +.name-data:hover, +.aliases-data:hover, +.performer-card img:hover, /* studio */ -.studio-details .logo:hover, -.studio-details:hover > div > h2, +.studio-name:hover, +.studio-overlay:hover a, +.studio-card:hover img, +.studio-parent-studios:hover, +.studio-logo:hover, + /* tag */ -.logo-container > .logo:hover, -.logo-container:hover > h2 +.tag-card:hover img, +.tag-item.tag-link.badge.badge-secondary:hover, +.tag-name:hover { -filter: blur(0px); + filter: blur(0); + transition: filter 0.25s ease; } + +/*Credit: fl0w#9497 */ diff --git a/plugins/SFWSwitch/sfw.js b/plugins/SFWSwitch/sfw.js index 2fe3f08c..8e3c62ae 100644 --- a/plugins/SFWSwitch/sfw.js +++ b/plugins/SFWSwitch/sfw.js @@ -1,29 +1,59 @@ -function sfw_mode() { +let sfw_mediaObserver = null; +let sfw_playListener = null; +let sfw_extraListeners = null; + +async function getSfwConfig() { + try { + const response = await fetch('/graphql', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + query: `{ + configuration { + plugins + } + }` + }), + }); + const result = await response.json(); + const pluginSettings = result.data.configuration.plugins.sfwswitch; + return pluginSettings?.audio_setting === true; + } catch (e) { + console.error("SFW Switch: Could not fetch config", e); + return false; + } +} +async function sfw_mode() { const stash_css = sfwswitch_findstashcss(); const button = document.getElementById("plugin_sfw"); - if (stash_css && stash_css.disabled) { - // SFW mode is disabled - button.style.color = "#f5f8fa"; // Default color + if (!stash_css) return; + + const sfwState = localStorage.getItem("sfw_mode") === "true"; + const audioMuteEnabled = await getSfwConfig(); + + stash_css.disabled = !sfwState; + + if (sfwState && audioMuteEnabled) { + sfw_mute_all_media(); } else { - // SFW mode is enabled - button.style.color = "#5cff00"; // Active color + sfw_unmute_all_media(); + } + + if (button) { + button.style.color = sfwState ? "#5cff00" : "#f5f8fa"; } } function sfwswitch_createbutton() { const buttonId = "plugin_sfw"; - // Check if the button already exists - if (document.getElementById(buttonId)) { - return; - } + if (document.getElementById(buttonId)) return; - // Create the button element const buttonContainer = document.createElement("a"); buttonContainer.className = "mr-2"; buttonContainer.innerHTML = ` - `; - // Poll for the navbar-buttons container const intervalId = setInterval(() => { const navbarButtons = document.querySelector(".navbar-buttons"); if (navbarButtons) { - clearInterval(intervalId); // Stop polling + clearInterval(intervalId); navbarButtons.insertBefore(buttonContainer, navbarButtons.childNodes[0]); - // Add click event listener document.getElementById(buttonId).addEventListener("click", sfwswitch_switcher); - // Initialize the button state + // Initialize the button based on saved state sfw_mode(); } - }, 100); // Check every 100ms + }, 100); - // Stop polling after a timeout to avoid infinite loops - setTimeout(() => clearInterval(intervalId), 10000); // 10 seconds max + setTimeout(() => clearInterval(intervalId), 10000); } -function sfwswitch_switcher() { - const stash_css = sfwswitch_findstashcss(); - if (!stash_css) { - console.error("SFW stylesheet not found."); - return; +// Function to strictly handle the muted state +function sfw_forceMute(media) { + if (!media) return; + media.muted = true; +} + +function sfw_mute_all_media() { + // Initial sweep + document.querySelectorAll("audio, video").forEach(sfw_forceMute); + + // Global event listener for play, seek, and volume changes + if (!sfw_playListener) { + sfw_playListener = function(e) { + if (e.target.tagName === "VIDEO" || e.target.tagName === "AUDIO") { + sfw_forceMute(e.target); + } + }; + + document.addEventListener("play", sfw_playListener, true); + document.addEventListener("volumechange", sfw_playListener, true); + document.addEventListener("loadeddata", sfw_playListener, true); + document.addEventListener("seeking", sfw_playListener, true); + } + + // MutationObserver for content loaded via AJAX/Dynamic updates + if (!sfw_mediaObserver) { + sfw_mediaObserver = new MutationObserver(mutations => { + for (const mutation of mutations) { + mutation.addedNodes.forEach(node => { + if (node.tagName === "VIDEO" || node.tagName === "AUDIO") { + sfw_forceMute(node); + } else if (node.querySelectorAll) { + node.querySelectorAll("video, audio").forEach(sfw_forceMute); + } + }); + } + }); + sfw_mediaObserver.observe(document.body, { childList: true, subtree: true }); + } +} + +function sfw_unmute_all_media() { + // 1. Remove listeners FIRST to prevent them from firing during the unmute loop + if (sfw_playListener) { + document.removeEventListener("play", sfw_playListener, true); + document.removeEventListener("volumechange", sfw_playListener, true); + document.removeEventListener("loadeddata", sfw_playListener, true); + document.removeEventListener("seeking", sfw_playListener, true); + sfw_playListener = null; } + if (sfw_mediaObserver) { + sfw_mediaObserver.disconnect(); + sfw_mediaObserver = null; + } + + // 2. Unmute existing media + document.querySelectorAll("audio, video").forEach(media => { + media.muted = false; + // Optional: media.volume = 1.0; // Use if volume was also forced to 0 + }); +} + +async function sfwswitch_switcher() { + const stash_css = sfwswitch_findstashcss(); + if (!stash_css) return; + + // Toggle the CSS stash_css.disabled = !stash_css.disabled; + const enabled = !stash_css.disabled; - const button = document.getElementById("plugin_sfw"); - if (stash_css.disabled) { - console.log("SFW mode disabled"); - button.style.color = "#f5f8fa"; // Default color + localStorage.setItem("sfw_mode", enabled); + + const audioMuteEnabled = await getSfwConfig(); + + // Logic Check: If we just disabled SFW, we MUST run unmute immediately + if (enabled && audioMuteEnabled) { + sfw_mute_all_media(); } else { - console.log("SFW mode enabled"); - button.style.color = "#5cff00"; // Active color + // This clears observers and sets muted = false + sfw_unmute_all_media(); + + // CRITICAL: Force a pause/reset on any media that might be stuck in a background buffer + document.querySelectorAll("audio, video").forEach(media => { + if (media.paused && media.muted) { + // If it was supposed to be stopped, make sure it stays stopped + media.muted = false; + } + }); + } + + const button = document.getElementById("plugin_sfw"); + if (button) { + button.style.color = enabled ? "#5cff00" : "#f5f8fa"; } } @@ -76,19 +181,8 @@ function sfwswitch_findstashcss() { return stylesheet; } } - return null; // Return null if no matching stylesheet is found -} - -function waitForElementClass(elementId, callBack, time) { - time = (typeof time !== 'undefined') ? time : 100; - window.setTimeout(function () { - var element = document.getElementsByClassName(elementId); - if (element.length > 0) { - callBack(elementId, element); - } else { - waitForElementClass(elementId, callBack); - } - }, time); + return null; } +// Initialize button on page load sfwswitch_createbutton(); diff --git a/plugins/SFWSwitch/sfwswitch.yml b/plugins/SFWSwitch/sfwswitch.yml index 4067e28e..c3dce90a 100644 --- a/plugins/SFWSwitch/sfwswitch.yml +++ b/plugins/SFWSwitch/sfwswitch.yml @@ -1,9 +1,15 @@ name: SFW Switch description: Add a button to blur covers and images. -version: 1.2 +version: 1.7 url: https://discourse.stashapp.cc/t/sfw-switch/4658 ui: javascript: - sfw.js css: - - sfw.css \ No newline at end of file + - sfw.css + - additional_plugins.css +settings: + audio_setting: + displayName: Enable Sound Mute + description: By default the plugin does not mute sound. Enabling this feature will have sound sources included when the SFW button is enabled. + type: BOOLEAN \ No newline at end of file diff --git a/plugins/SmartResolve/README.md b/plugins/SmartResolve/README.md new file mode 100644 index 00000000..7cf9dad5 --- /dev/null +++ b/plugins/SmartResolve/README.md @@ -0,0 +1,109 @@ +# Smart Resolve + +https://discourse.stashapp.cc/t/smart-resolver/6680 + +UI plugin for Stash’s **Scene Duplicate Checker** (`Settings → Tools → Scene Duplicate Checker`). + +## What it does + +1. **Smart Resolve analysis/tagging** — Adds `Select Smart Resolve` to Stash’s native **Select Options** menu and annotates duplicate rows with reason text plus sync recommendations. +2. **Row tools** — Adds per-row **Sync data** / **Sync rec.** buttons and stash ID badges in the scene details icon row. +3. **Safe selection** — Auto-selects delete candidates only when rules say it is safe; unresolved/sync-required sets are left unchecked. + +## Smart Resolve rule flow + +*Goal:* Identify a single candidate keeper regardless of initial candidate scene count. + +Rules run per each duplicate group on the page. + +1. **Determine a primary keep candidate** + - Process each rule in order. + - Any scene which is deficient of the identified criteria level is eliminated from being the potential keeper and any remaining tied survivors are evaluated as the next sub-step. + - When a sub-step leaves only one file as the potential keeper, move to step 2. + - For any step where the value is not known or null, assume 0 for this phase + - Store step result as selection reason. + - Rules 1-13 are toggleable via plugin settings; rule 14 remains always-on, deterministic fallback. + 1) Prefer the scene with the greatest total pixel resolution (product of x and y, 1920x1080=2073600) + - Apply a 1% pixel-area tolerance: candidates within 1% of the top area are treated as tied for this step. + 2) Prefer the scene with the greatest framerate + 3) Prefer the scene with the better codec (AV1 > H265 > H264 > Others) + 3b) Prefer candidates whose primary file path includes `upgrade` (toggleable) + 4) Prefer the scene with greater duration + 5) Prefer the scene with smaller size (unless file name includes `upgrade`) + - Files with the word `upgrade` cannot be eliminated from candidacy as per having a larger file size. + - Apply file-size tolerance when eliminating larger files: allow `max(1MB, 1% of min file size)` above the smallest file size. + - In a multi-file scenario, files larger than `min + tolerance` may be eliminated (unless `upgrade` token applies). + 6) Prefer the scene with an older scene date (2 day tollerance) + 7) Prefer the scene with more groups + 8) Prefer the scene with stashID + 9) Prefer the scene with more performers + 10) Prefer the scene with markers + 11) Prefer the scene with more tags + 12) Prefer the scene with LESS associated files + 13) Prefer the scene with more non-null metadata elements (title, studio_code, urls, date, director, galleries, studio, performers, groups, tags, details) + 14) Final deterministic tiebreaker, the scene with a lower scene_id +2. **Evaluate each non-keeper iteratively to prevent data loss** + Process each scene and each rule to determine status: + ``` + markForDeletion: (boolean) + markParentForSync: (boolean) + exceptions: ([array](string)) + ``` + Exception rules (any of these will trigger markForDeletion=false) + - Protection rules a-f are toggleable via plugin settings. + a) Protect O-count: O-count scenes should never be marked for deletion + b) Protect Group associations: Only mark for deletion if the same or more group information is attached to the primary candidate (i.e. k.groups{id,index} contains all (n.groups{id,index}) + - null allows a match with only other null + - null does not match a non-null + - Scenes may be members of multiple groups. A primary source (k) must replicate all non-keeper (n) sources to not have an exception + - Miss-matched scenes should be flagged according to reason message and marked for manual resolution + c) Protect performer mismatch by ID (markParentForSync=true) + - If non-keeper has any performer ID not present on keeper, trigger exception + - Only identical performer ID sets avoid this exception + d) Protect tag loss >1 for non-stash'd scenes (markParentForSync=true) + e) Protect older dates + if K.date > n.date (markParentForSync=true) + if K.date == null && n.date != null (markParentForSync=true) + if K.date != null && n.date == null (no action) + f) Protect scenes tagged with "Ignore:Smart Resolve" (case-insensitive); never auto-delete these scenes + +3. **Generate decision reason (`reasonAgainst`)** + - Generate message from decision code + - If exception code array is not empty, expand message. Block marking, recommend sync. + - row button becomes **Sync rec.** + - unresolved count increments + - smart auto-selection skips that set + +Notes: +A primary file is determined, then we determine if non-primary needs to be protected from loss. The primary file is never changed mid-analysis. Exceptions do not change the primary file, only protect loss. + +## Usage + +1. Install the plugin folder under your Stash plugins directory and enable it in **Settings → Plugins**. +2. Open the **Scene Duplicate Checker**. +3. Open **Select Options** and click **Select Smart Resolve**. +4. Review unresolved/sync-rec rows (`Sync rec.` buttons and unresolved counter). +5. Use row **Sync data** where recommended, then re-run Smart Resolve. +6. Use Stash’s native Delete/Merge actions on remaining selected rows. + +Optional setting: **After Sync, mark source scenes for deletion** — default for the “check sources after sync” checkbox in the Sync modal. + +## Settings UI + +![Smart Resolve settings](about.png) + +## Limits + +- Rules evaluate **only visible duplicate groups on the current page**. Pagination and page size can change outcomes seen in a single run. +- Any missing/unknown criterion values are normalized to `0` during candidate selection (line 20 behavior). This preserves determinism but can favor records with more populated metadata fields. +- Step 1 is a strict elimination pipeline. Once a scene is eliminated by an earlier criterion, later criteria do not reintroduce it. +- The `upgrade` filename exception is a string heuristic. It is not case-sensitive. This step is not expected to be a major factor, but creates an easy way to work around the plugin. +- Group protection depends on `{group.id, scene_index}` containment semantics; mismatches are expected to force manual/sync resolution. +- Date protection assumes parseable comparable date values. Stash provides some date parse semantics. However all null dates should be assumed to be the last date of any incomplete window. (i.e. 2020 -> 2020-12-31, 2020-06 -> 2020-06-30) Null, Invalid, or unparseable values should be treated as 2999-12-31 by implementation. +- `markParentForSync` and `exceptions` are structured outputs in this spec, but UI sync-rec indicators must be wired to those flags in implementation. +- This flow is designed for deterministic outcomes, not probabilistic ranking; tie-break behavior is intentionally resolved by lower `scene_id`. +- Sync actions are `sceneUpdate`-based (metadata transfer), not full `sceneMerge`; scene IDs remain separate after sync. + +## Repository + +Maintained in [Stash-KennyG/CommunityScripts](https://github.com/Stash-KennyG/CommunityScripts). diff --git a/plugins/SmartResolve/SmartResolve.css b/plugins/SmartResolve/SmartResolve.css new file mode 100644 index 00000000..06e7e782 --- /dev/null +++ b/plugins/SmartResolve/SmartResolve.css @@ -0,0 +1,387 @@ +#duplicate-resolver-toolbar { + margin: 0.75rem 0 1rem; + padding: 0.75rem 1rem; + background: var(--bs-secondary-bg, rgba(0, 0, 0, 0.15)); + border-radius: 0.25rem; + border: 1px solid var(--bs-border-color, rgba(255, 255, 255, 0.12)); +} + +#duplicate-resolver-toolbar .dr-toolbar-title { + font-weight: 600; + margin-bottom: 0.5rem; +} + +#duplicate-resolver-toolbar .dr-btn-row { + display: flex; + flex-wrap: wrap; + gap: 0.5rem; + align-items: center; + margin-bottom: 0.35rem; +} + +#duplicate-resolver-toolbar .dr-btn-row:empty { + display: none; + margin: 0; +} + +#scene-duplicate-checker .dr-core-actions { + display: inline-flex; + gap: 0.35rem; + align-items: center; + margin-left: 0.4rem; +} + +#scene-duplicate-checker .dr-processing-indicator { + display: inline-flex; + align-items: center; + gap: 0.35rem; + color: var(--bs-body-color, #d7dbe0); + font-size: 0.75rem; + line-height: 1; +} + +#scene-duplicate-checker .dr-processing-spinner { + width: 0.85rem; + height: 0.85rem; + border: 2px solid rgba(255, 255, 255, 0.25); + border-top-color: var(--bs-info, #7cc7ff); + border-radius: 50%; + animation: dr-spin 0.8s linear infinite; +} + +#scene-duplicate-checker .dr-processing-bar { + width: 80px; + height: 8px; + overflow: hidden; + border-radius: 999px; + background: rgba(255, 255, 255, 0.18); +} + +#scene-duplicate-checker .dr-processing-bar-fill { + display: block; + width: 40px; + height: 100%; + border-radius: 999px; + background: linear-gradient( + 90deg, + rgba(124, 199, 255, 0.2) 0%, + rgba(124, 199, 255, 0.95) 50%, + rgba(124, 199, 255, 0.2) 100% + ); + animation: dr-progress-slide 1s ease-in-out infinite; +} + +@keyframes dr-spin { + from { + transform: rotate(0deg); + } + to { + transform: rotate(360deg); + } +} + +@keyframes dr-progress-slide { + 0% { + transform: translateX(-40px); + } + 100% { + transform: translateX(80px); + } +} + +/* Keep duplicate rows compact and prevent action/icon wrapping. */ +#scene-duplicate-checker table.duplicate-checker-table td.scene-details .btn-group { + flex-wrap: nowrap !important; +} + +#scene-duplicate-checker table.duplicate-checker-table td.scene-details, +#scene-duplicate-checker table.duplicate-checker-table td:last-child { + white-space: nowrap; +} + +#scene-duplicate-checker .dr-inline-reason { + font-size: calc(1em + 2pt); + color: var(--bs-warning, #ffd54a); +} + +#scene-duplicate-checker table.duplicate-checker-table tbody tr.dr-unresolved-highlight { + background: rgba(255, 0, 0, 0.08) !important; +} + +#scene-duplicate-checker .dr-stashid-btn { + display: inline-flex !important; + align-items: center; + gap: 0.3rem; +} + +#scene-duplicate-checker .dr-stashid-btn.dr-stashid-btn-link { + cursor: pointer; +} + +#scene-duplicate-checker .dr-stashid-box-icon { + width: 0.95em; + height: 0.95em; + display: inline-block; + vertical-align: middle; +} + +#duplicate-resolver-toolbar .dr-drawer { + margin-top: 0.15rem; +} + +#duplicate-resolver-toolbar .dr-drawer-toggle { + display: inline-flex; + align-items: center; + gap: 0.35rem; + padding: 0.15rem 0 !important; + font-size: 0.875rem; + text-decoration: none; + border: 0; + box-shadow: none; +} + +#duplicate-resolver-toolbar .dr-drawer-toggle:hover, +#duplicate-resolver-toolbar .dr-drawer-toggle:focus { + text-decoration: underline; +} + +#duplicate-resolver-toolbar .dr-drawer-panel { + margin-top: 0.5rem; + padding-top: 0.5rem; + border-top: 1px solid var(--bs-border-color, rgba(255, 255, 255, 0.12)); +} + +#duplicate-resolver-toolbar .dr-drawer-toolbar { + margin-bottom: 0.5rem; +} + +#duplicate-resolver-toolbar .dr-preview { + font-size: 0.875rem; + max-height: 14rem; + overflow: auto; + white-space: pre-wrap; + margin: 0; + padding: 0.5rem; + background: var(--bs-body-bg, #1a1a1a); + border-radius: 0.25rem; +} + +#duplicate-resolver-toolbar .dr-preview .dr-match-link { + color: var(--bs-info, #7cc7ff); + text-decoration: underline; + cursor: pointer; +} + +.duplicate-resolver-sync-btn { + margin-left: 0.35rem !important; +} + +#duplicate-resolver-modal-overlay { + position: fixed; + inset: 0; + background: rgba(0, 0, 0, 0.6); + z-index: 1050; + display: flex; + align-items: center; + justify-content: center; + padding: 1rem; +} + +#duplicate-resolver-modal-overlay .dr-modal { + background: var(--bs-secondary-bg, #30404d); + color: var(--bs-body-color, #eee); + border: 1px solid var(--bs-border-color, rgba(255, 255, 255, 0.18)); + border-radius: 0.35rem; + max-width: 60rem; + width: 100%; + max-height: 90vh; + overflow: auto; + padding: 1rem 1.25rem; +} + +#duplicate-resolver-modal-overlay h3 { + margin-top: 0; + font-size: 1.1rem; +} + +#duplicate-resolver-modal-overlay .dr-modal-header { + align-items: center; + display: flex; + gap: 0.45rem; + margin: -1rem -1.25rem 0.75rem; + padding: 0.7rem 1rem; +} + +#duplicate-resolver-modal-overlay .dr-modal-options { + margin: 0.75rem 0; +} + +#duplicate-resolver-modal-overlay .dr-modal-options label { + display: block; + margin: 0.25rem 0; + cursor: pointer; +} + +#duplicate-resolver-modal-overlay .dr-field-title { + display: block; + margin: 0.1rem 0 0.35rem; + font-size: 0.88rem; + font-weight: 600; + line-height: 1.15; +} + +#duplicate-resolver-modal-overlay .dr-opt-hint { + font-size: 0.82rem; + opacity: 0.9; + font-weight: 400; +} + +#duplicate-resolver-modal-overlay .dr-sync-compare { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 0.75rem; + margin: 0.75rem 0 1rem; +} + +#duplicate-resolver-modal-overlay .dr-sync-compare .dr-col { + background: var(--bs-secondary-bg, rgba(255, 255, 255, 0.03)); + border: 1px solid var(--bs-border-color, rgba(255, 255, 255, 0.08)); + border-radius: 0.25rem; + padding: 0.5rem 0.6rem; +} + +#duplicate-resolver-modal-overlay .dr-sync-compare h4 { + margin: 0 0 0.4rem; + font-size: 1rem; + font-weight: 700; + letter-spacing: 0.01em; +} + +#duplicate-resolver-modal-overlay .dr-sync-compare p { + margin: 0; + font-size: 0.82rem; + opacity: 0.9; +} + +#duplicate-resolver-modal-overlay .dr-field-row { + border-top: 1px solid var(--bs-border-color, rgba(255, 255, 255, 0.08)); + padding-top: 0.55rem; + margin-top: 0.55rem; +} + +#duplicate-resolver-modal-overlay .dr-field-desc { + font-size: 0.78rem; + opacity: 0.85; + margin: 0.15rem 0 0.35rem; +} + +#duplicate-resolver-modal-overlay .dr-field-grid { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 0.6rem; +} + +#duplicate-resolver-modal-overlay .dr-field-col { + background: transparent; + border-radius: 0.18rem; + padding: 0.35rem 0.45rem; + min-height: 3rem; + height: auto; +} + +#duplicate-resolver-modal-overlay .dr-list-control { + width: 100%; +} + +#duplicate-resolver-modal-overlay .dr-list-control .input-group + .input-group { + margin-top: 0.3rem; +} + +#duplicate-resolver-modal-overlay .dr-chip-list { + display: flex; + flex-wrap: wrap; + gap: 0.28rem; +} + +#duplicate-resolver-modal-overlay .dr-chip { + max-width: 100%; + padding: 0.10rem 0.35rem; + border-radius: 0.14rem; + line-height: 1.15; + font-size: 0.84rem; + font-weight: 400; + border: 0; + background-color: #b9c4cd; + color: #21313f; + white-space: nowrap; + text-overflow: ellipsis; + overflow: hidden; +} + +#duplicate-resolver-modal-overlay .dr-field-col .input-group { + align-items: stretch; +} + +#duplicate-resolver-modal-overlay .dr-field-col .input-group .dr-list-control { + flex: 1 1 auto; +} + +#duplicate-resolver-modal-overlay .dr-field-col .form-control, +#duplicate-resolver-modal-overlay .dr-field-col .scene-description { + min-height: calc(1.5em + 0.75rem + 2px); +} + +#duplicate-resolver-modal-overlay .dr-empty { + font-size: 0.74rem; + opacity: 0.75; +} + +#duplicate-resolver-modal-overlay .dr-modal-actions { + display: flex; + gap: 0.5rem; + justify-content: flex-end; + margin-top: 1rem; +} + +#duplicate-resolver-modal-overlay .dr-field-col .input-group-prepend .btn { + min-width: 2.2rem; +} + +#duplicate-resolver-modal-overlay .dr-cover-value { + min-width: 0; +} + +#duplicate-resolver-modal-overlay .dr-cover-frame { + background: rgba(0, 0, 0, 0.2); + border-radius: 0.18rem; + padding: 0.35rem; + text-align: center; +} + +#duplicate-resolver-modal-overlay .dr-cover-thumb { + display: block; + max-height: 180px; + max-width: 100%; + width: auto; + height: auto; + margin: 0 auto; + object-fit: contain; + border-radius: 0.12rem; +} + +#duplicate-resolver-modal-overlay .dr-cover-caption { + font-size: 0.72rem; + opacity: 0.8; + margin-top: 0.3rem; + text-align: center; +} + +#duplicate-resolver-modal-overlay .dr-cover-placeholder { + font-size: 0.78rem; + opacity: 0.75; + padding: 0.35rem 0.25rem; + margin-bottom: 0.25rem; + border: 1px dashed rgba(255, 255, 255, 0.2); + border-radius: 0.12rem; + text-align: center; +} diff --git a/plugins/SmartResolve/SmartResolve.js b/plugins/SmartResolve/SmartResolve.js new file mode 100644 index 00000000..09eb5abe --- /dev/null +++ b/plugins/SmartResolve/SmartResolve.js @@ -0,0 +1,2874 @@ +/** + * SmartResolve — Scene Duplicate Checker helper. + * Smart Select: rule-based checks on Stash’s native row checkboxes. + * Sync Data: mergeless sceneUpdate merge from sibling duplicates. + */ +(function () { + "use strict"; + + var ROUTE = "/sceneDuplicateChecker"; + var ROOT_ID = "scene-duplicate-checker"; + var PLUGIN_ID = "SmartResolve"; + + function defaultRuleToggles() { + return { + step_01_total_pixels: true, + step_02_framerate: true, + step_03_codec: true, + step_upgrade_token: true, + step_04_duration: true, + step_05_smaller_size: true, + step_06_older_date: true, + step_07_more_groups: true, + step_08_has_stashid: true, + step_09_more_performers: true, + step_10_more_markers: true, + step_11_more_tags: true, + step_12_less_associated_files: true, + step_13_more_metadata_cardinality: true, + }; + } + + function defaultProtectionToggles() { + return { + protect_o_count: true, + protect_group_association: true, + protect_performer_mismatch: true, + protect_tag_loss_gt_1_non_stashed: true, + protect_older_date: true, + protect_ignore_smart_resolve_tag: true, + }; + } + + var state = { + groups: null, + lastPlan: null, + loading: false, + autoCheckDefault: true, + applyingDomEnhancements: false, + lastBadgePageKey: "", + ruleToggles: defaultRuleToggles(), + protectionToggles: defaultProtectionToggles(), + /** True after user runs Select Smart Resolve — sync/other refreshes preserve UI when set. */ + smartResolveUiActive: false, + observer: null, + attachedRoot: null, + retryTimer: null, + }; + + function parseParams() { + var q = new URLSearchParams(window.location.search); + return { + page: Math.max(1, parseInt(q.get("page") || "1", 10) || 1), + size: Math.max(1, parseInt(q.get("size") || "20", 10) || 20), + distance: parseInt(q.get("distance") || "0", 10) || 0, + durationDiff: parseFloat(q.get("durationDiff") || "1"), + }; + } + + /** Same green/red banners Stash uses for “Updated scene” etc. (see hooks/Toast.tsx). */ + var stashInlineNotifyRef = null; + var stashInlineNotifyBridgeInstalled = false; + + /** Must be a stable function identity — defining inside patch.after() remounts every App render and breaks the UI. */ + function DuplicateResolverStashNotifyMount() { + var P = window.PluginApi; + var R = P.React; + var t = P.hooks.useToast(); + R.useEffect( + function () { + stashInlineNotifyRef = t; + return function () { + stashInlineNotifyRef = null; + }; + }, + [t] + ); + return null; + } + + function installStashInlineNotifyBridge() { + if (stashInlineNotifyBridgeInstalled || typeof window.PluginApi === "undefined") return; + var P = window.PluginApi; + if (!P.patch || !P.patch.after || !P.React || !P.hooks || !P.hooks.useToast) return; + stashInlineNotifyBridgeInstalled = true; + P.patch.after("App", function () { + var R = P.React; + /** Patch passes afterFn(...originalArgs, renderedTree). Last arg is always App output; arity can be 1 if a before() cleared args. */ + var prevTree = arguments[arguments.length - 1]; + return R.createElement( + R.Fragment, + null, + R.createElement(DuplicateResolverStashNotifyMount, null), + prevTree + ); + }); + } + + function notifyStashSuccess(message) { + if (stashInlineNotifyRef) stashInlineNotifyRef.success(message); + else window.alert(message); + } + + function notifyStashError(err) { + if (stashInlineNotifyRef) stashInlineNotifyRef.error(err); + else + window.alert( + err && err.message ? err.message : typeof err === "string" ? err : String(err) + ); + } + + function notifyStashWarning(message) { + if (stashInlineNotifyRef && stashInlineNotifyRef.toast) + stashInlineNotifyRef.toast({ content: message, variant: "warning" }); + else window.alert(message); + } + + async function gql(query, variables) { + var res = await fetch("/graphql", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ query: query, variables: variables || {} }), + }); + var j = await res.json(); + if (j.errors && j.errors.length) + throw new Error(j.errors.map(function (e) { return e.message; }).join("; ")); + return j.data; + } + + var DUPLICATE_QUERY = + "query FindDuplicateScenesDr($distance: Int, $duration_diff: Float) {" + + " findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) {" + + " id title code details director urls date rating100" + + " o_counter" + + " paths { screenshot }" + + " files { id path size width height video_codec bit_rate duration }" + + " scene_markers { id }" + + " studio { id name }" + + " tags { id name }" + + " performers { id name }" + + " groups { group { id name } scene_index }" + + " galleries { id title }" + + " stash_ids { endpoint stash_id }" + + " }" + + "}"; + + function groupTotalSize(group) { + return group.reduce(function (acc, s) { + return ( + acc + + (s.files || []).reduce(function (a, f) { + return a + (f.size || 0); + }, 0) + ); + }, 0); + } + + function sortGroupsLikeStash(groups) { + return groups.slice().sort(function (a, b) { + return groupTotalSize(b) - groupTotalSize(a); + }); + } + + function codecRank(codec) { + var c = (codec || "").toLowerCase(); + if (c.indexOf("av01") !== -1 || c.indexOf("av1") !== -1) return 5; + if (c.indexOf("hevc") !== -1 || c.indexOf("h265") !== -1) return 4; + if (c.indexOf("vp9") !== -1) return 3; + if (c.indexOf("h264") !== -1 || c.indexOf("avc") !== -1) return 2; + return 1; + } + + function primaryFile(scene) { + return (scene.files && scene.files[0]) || {}; + } + + function countSignals(scene) { + return { + tags: (scene.tags || []).length, + performers: (scene.performers || []).length, + groups: (scene.groups || []).length, + markers: (scene.scene_markers || []).length, + oCount: scene.o_counter || 0, + }; + } + + function isNearlySameDuration(a, b) { + var da = Math.max(0, a || 0); + var db = Math.max(0, b || 0); + if (!da || !db) return false; + var diff = Math.abs(da - db); + var max = Math.max(da, db); + return diff <= 2 || diff / max <= 0.02; + } + + function efficiencyWinner(a, b) { + var fa = primaryFile(a); + var fb = primaryFile(b); + var aPixels = (fa.width || 0) * (fa.height || 0); + var bPixels = (fb.width || 0) * (fb.height || 0); + if (!aPixels || !bPixels || aPixels !== bPixels) return null; + if (!isNearlySameDuration(fa.duration || 0, fb.duration || 0)) return null; + + var aCodec = codecRank(fa.video_codec); + var bCodec = codecRank(fb.video_codec); + if (aCodec === bCodec) return null; + + var aSize = fa.size || 0; + var bSize = fb.size || 0; + var aRate = fa.bit_rate || 0; + var bRate = fb.bit_rate || 0; + + var aMuchSmaller = !!(aSize && bSize && aSize <= bSize * 0.75); + var bMuchSmaller = !!(aSize && bSize && bSize <= aSize * 0.75); + var aLowerRate = !!(aRate && bRate && aRate <= bRate * 0.8); + var bLowerRate = !!(aRate && bRate && bRate <= aRate * 0.8); + + if (aCodec > bCodec && (aMuchSmaller || aLowerRate)) return "a"; + if (bCodec > aCodec && (bMuchSmaller || bLowerRate)) return "b"; + return null; + } + + // Returns "a" or "b" when one side is >= across all categories and > in at least one. + function unanimousCategoryWinner(a, b) { + var ka = countSignals(a); + var kb = countSignals(b); + // Tags are noisy/drift-prone; keep them out of decisive unanimity. + var keys = ["performers", "groups", "markers", "oCount"]; + var aGeAll = true; + var bGeAll = true; + var aGtAny = false; + var bGtAny = false; + keys.forEach(function (k) { + if (ka[k] < kb[k]) aGeAll = false; + if (kb[k] < ka[k]) bGeAll = false; + if (ka[k] > kb[k]) aGtAny = true; + if (kb[k] > ka[k]) bGtAny = true; + }); + if (aGeAll && aGtAny) return "a"; + if (bGeAll && bGtAny) return "b"; + return null; + } + + function compareKeeper(a, b) { + // O-count is a hard guard: never prefer deleting an O-count scene. + var oa = a.o_counter || 0; + var ob = b.o_counter || 0; + if (!!oa !== !!ob) return ob - oa; + + // Prefer the best source file first; metadata can be synced. + var af = primaryFile(a); + var bf = primaryFile(b); + var aPixels = (af.width || 0) * (af.height || 0); + var bPixels = (bf.width || 0) * (bf.height || 0); + if (aPixels !== bPixels) return bPixels - aPixels; + + // If one side clearly wins category-by-category, keep it. + var unanimous = unanimousCategoryWinner(a, b); + if (unanimous === "a") return -1; + if (unanimous === "b") return 1; + + var ga = (a.groups && a.groups.length) || 0; + var gb = (b.groups && b.groups.length) || 0; + if (ga !== gb) return gb - ga; + + var sa = (a.stash_ids && a.stash_ids.length) || 0; + var sb = (b.stash_ids && b.stash_ids.length) || 0; + if (sa !== sb) return sb - sa; + + // Prefer clearly better encoding efficiency at equivalent visual profile. + var eff = efficiencyWinner(a, b); + if (eff === "a") return -1; + if (eff === "b") return 1; + + var aCounts = countSignals(a); + var bCounts = countSignals(b); + if (aCounts.groups !== bCounts.groups) return bCounts.groups - aCounts.groups; + if (aCounts.performers !== bCounts.performers) return bCounts.performers - aCounts.performers; + if (aCounts.markers !== bCounts.markers) return bCounts.markers - aCounts.markers; + if (aCounts.tags !== bCounts.tags) return bCounts.tags - aCounts.tags; + + var fa = primaryFile(a); + var fb = primaryFile(b); + var aCodec = codecRank(fa.video_codec); + var bCodec = codecRank(fb.video_codec); + if (aCodec !== bCodec) return bCodec - aCodec; + + var pa = (fa.width || 0) * (fa.height || 0); + var pb = (fb.width || 0) * (fb.height || 0); + if (pa !== pb) return pb - pa; + + var za = fa.size || 0; + var zb = fb.size || 0; + if (za !== zb) return zb - za; + + return String(a.id).localeCompare(String(b.id)); + } + + function pickKeeper(group) { + return group.slice().sort(compareKeeper)[0]; + } + + function reasonAgainst(keeper, other) { + if (!keeper || !other) return "deterministic fallback"; + + function dataSignals(scene) { + return { + hasTitle: !!(scene.title && String(scene.title).trim()), + hasCode: !!(scene.code && String(scene.code).trim()), + hasDetails: !!(scene.details && String(scene.details).trim()), + hasDirector: !!(scene.director && String(scene.director).trim()), + hasDate: !!(scene.date && String(scene.date).trim()), + tagCount: (scene.tags || []).length, + performerCount: (scene.performers || []).length, + groupCount: (scene.groups || []).length, + stashIdCount: (scene.stash_ids || []).length, + urlCount: (scene.urls || []).length, + galleryCount: (scene.galleries || []).length, + hasStudio: !!(scene.studio && scene.studio.id), + }; + } + + function isSparse(sig) { + return ( + !sig.hasTitle && + !sig.hasCode && + !sig.hasDetails && + !sig.hasDirector && + !sig.hasDate && + sig.tagCount === 0 && + sig.performerCount === 0 && + sig.groupCount === 0 && + sig.stashIdCount === 0 && + sig.urlCount === 0 && + sig.galleryCount === 0 && + !sig.hasStudio + ); + } + + function groupIdSafe(g) { + return g && g.group && g.group.id != null ? String(g.group.id) : null; + } + + function groupSummary(scene) { + var groups = scene.groups || []; + var ids = groups + .map(function (g) { return groupIdSafe(g); }) + .filter(function (id) { return !!id; }); + var idxMap = new Map(); + groups.forEach(function (g) { + var id = groupIdSafe(g); + if (id) idxMap.set(id, g.scene_index); + }); + return { ids: ids, idxMap: idxMap }; + } + + function haveSameGroupSet(aSummary, bSummary) { + if (aSummary.ids.length !== bSummary.ids.length) return false; + var setB = new Set(bSummary.ids); + for (var i = 0; i < aSummary.ids.length; i++) { + if (!setB.has(aSummary.ids[i])) return false; + } + return true; + } + + function groupReasonPrefix() { + var kg = groupSummary(keeper); + var og = groupSummary(other); + if (!kg.ids.length && !og.ids.length) return ""; + if (!!kg.ids.length !== !!og.ids.length) { + return "Group presence differs"; + } + var overlap = kg.ids.filter(function (id) { return og.ids.indexOf(id) !== -1; }); + if (!overlap.length) return "Different groups"; + var details = overlap.map(function (id) { + var kIdx = kg.idxMap.get(id); + var oIdx = og.idxMap.get(id); + return "(" + id + "," + String(kIdx) + "/" + String(oIdx) + ")"; + }); + return "Same group(s) " + details.join(", "); + } + + function withGroupContext(msg) { + var ctx = groupReasonPrefix(); + return ctx ? ctx + ", " + msg : msg; + } + + var kSig = dataSignals(keeper); + var oSig = dataSignals(other); + var kCounts = countSignals(keeper); + var oCounts = countSignals(other); + var kg = groupSummary(keeper); + var og = groupSummary(other); + var kf = primaryFile(keeper); + var of = primaryFile(other); + var kPixels = (kf.width || 0) * (kf.height || 0); + var oPixels = (of.width || 0) * (of.height || 0); + + // Never auto-resolve by deleting a better source file. + if (oPixels > kPixels) { + return withGroupContext( + "duplicate has higher resolution (" + + (of.width || 0) + + "x" + + (of.height || 0) + + " vs " + + (kf.width || 0) + + "x" + + (kf.height || 0) + + "). Recommend synch data from duplicate." + ); + } + + // Keeper has substantive metadata while duplicate is sparse -> keep keeper. + if (!isSparse(kSig) && isSparse(oSig)) { + return ( + "Keep scene with data." + ); + } + + // Different group sets are a hard sync case to avoid losing group associations. + if (!haveSameGroupSet(kg, og)) { + return withGroupContext( + "different group associations. Recommend synch data from duplicate." + ); + } + + var kGroups = (keeper.groups || []).length; + var oGroups = (other.groups || []).length; + // Group cardinality differs -> keep group-richer scene, but sync to avoid loss. + if (kGroups !== oGroups) { + if (kGroups > oGroups) { + return "Keep Scene with Group Association."; + } + return "Duplicate has additional Group Association. Recommend synch data from duplicate."; + } + + var kStash = (keeper.stash_ids && keeper.stash_ids.length) || 0; + var oStash = (other.stash_ids && other.stash_ids.length) || 0; + // External IDs differ -> prefer scene with more confirmed IDs. + if (kStash !== oStash) { + return withGroupContext( + "keep scene with confirmed IDs while duplicate has fewer/none." + ); + } + + // One side has O-count and the other does not -> keep O-count anchor, sync remaining deltas. + if (!!kCounts.oCount !== !!oCounts.oCount) { + return withGroupContext( + "keep scene with O-count signal. Recommend synch data from duplicate." + ); + } + + // Both scenes are stashed. + // No discernable difference in metadata beyond tags + // Chosing scene with more tags. + var stashLinked = ((keeper.stash_ids || []).length + (other.stash_ids || []).length) > 0; + var highValueEqual = + kCounts.performers === oCounts.performers && + kCounts.groups === oCounts.groups && + kCounts.markers === oCounts.markers && + kCounts.oCount === oCounts.oCount; + // Both scenes are stashed and only tags differ -> de-prioritize tags noise, keep tag-richer scene. + if (stashLinked && highValueEqual && kCounts.tags !== oCounts.tags) { + return withGroupContext( + "All scenes stashed, tag-only difference; keep scene with more tags." + ); + } + + // Non-stashed variant of tag-only delta -> keep tag-richer scene. + if (highValueEqual && kCounts.tags !== oCounts.tags) { + return withGroupContext( + "tag-only difference; keep scene with more tags." + ); + } + + // No metadata signal separates them -> keeper came from deterministic ordering. + if ( + highValueEqual && + kCounts.tags === oCounts.tags + ) { + return withGroupContext( + "no meaningful metadata delta; deterministic keeper tie-break." + ); + } + + var effWinner = efficiencyWinner(keeper, other); + // Same visual profile but keeper is clearly more efficient codec/bitrate/size -> sync then delete duplicate. + if (effWinner === "a") { + return withGroupContext( + "codec/efficiency winner at equivalent resolution/duration. Recommend synch data from duplicate." + ); + } + // Duplicate is more efficient while current keeper remained selected by ordering -> sync recommended. + if (effWinner === "b") { + return withGroupContext( + "duplicate is codec/efficiency winner at equivalent resolution/duration. Recommend synch data from duplicate." + ); + } + + var unanimous = unanimousCategoryWinner(keeper, other); + // Keeper is >= duplicate across decisive categories -> safe keep decision. + if (unanimous === "a") { + return withGroupContext( + "unanimous category winner (tags/performers/groups/markers/o-count)." + ); + } + // Duplicate is unanimous winner, but keeper was chosen by upstream ordering -> sync before cleanup. + if (unanimous === "b") { + return withGroupContext( + "duplicate is unanimous category winner; keeper chosen by deterministic fallback. Recommend synch data from duplicate." + ); + } + + // Category split means potential data loss either way -> force sync recommendation. + return withGroupContext( + "category split (tags/performers/groups/markers/o-count). Recommend synch data from duplicate." + ); + } + + async function loadDuplicateGroups() { + var p = parseParams(); + var data = await gql(DUPLICATE_QUERY, { + distance: p.distance, + duration_diff: p.durationDiff, + }); + state.groups = sortGroupsLikeStash(data.findDuplicateScenes || []); + return state.groups; + } + + async function refreshPlanAndDecorations() { + var scrollY = typeof window !== "undefined" ? window.scrollY : 0; + await loadDuplicateGroups(); + state.lastPlan = buildPlan(); + renderInlineReasons(state.lastPlan); + renderSyncRecommendations(state.lastPlan); + if ( + state.smartResolveUiActive && + state.lastPlan && + state.lastPlan.entries && + state.lastPlan.entries.length + ) { + ensureMatchSetAnchors(); + renderPlanDetailsIntoDrawer(state.lastPlan); + updateUnresolvedButton(state.lastPlan, true); + setSmartResolveDetailsVisible(true, false); + } + if (typeof window !== "undefined") { + requestAnimationFrame(function () { + requestAnimationFrame(function () { + window.scrollTo(0, scrollY); + }); + }); + } + } + + function shouldRefreshAfterSync() { + var p = parseParams(); + var distance = Number(p && p.distance) || 0; + var durationDiff = Number(p && p.durationDiff); + if (!Number.isFinite(durationDiff)) durationDiff = 1; + // Near-dupe mode can make duplicate query expensive; let user refresh manually. + return !(distance > 0 && durationDiff > 1); + } + + function visibleGroups(groups) { + var p = parseParams(); + var start = (p.page - 1) * p.size; + return groups.slice(start, start + p.size); + } + + function parseDateForComparison(v) { + if (v == null) return new Date("2999-12-31T00:00:00Z").getTime(); + var s = String(v).trim(); + if (!s) return new Date("2999-12-31T00:00:00Z").getTime(); + if (/^\d{4}$/.test(s)) s = s + "-12-31"; + else if (/^\d{4}-\d{2}$/.test(s)) { + var y = parseInt(s.slice(0, 4), 10); + var m = parseInt(s.slice(5, 7), 10); + var lastDay = new Date(Date.UTC(y, m, 0)).getUTCDate(); + s = s + "-" + String(lastDay).padStart(2, "0"); + } + var t = Date.parse(s); + if (Number.isNaN(t)) return new Date("2999-12-31T00:00:00Z").getTime(); + return t; + } + + var EARLIER_DATE_BUFFER_MS = 36 * 60 * 60 * 1000; // 1.5 days + + function roundedDurationSeconds(v) { + var n = Number(v || 0) || 0; + return Math.round(n); + } + + function fileHasUpgradeToken(scene) { + var f = primaryFile(scene); + var p = String(f.path || "").toUpperCase(); + return p.indexOf("UPGRADE") !== -1; + } + + function metadataCardinality(scene) { + var score = 0; + function hasText(v) { + return !!(v != null && String(v).trim()); + } + if (hasText(scene.title)) score += 1; + if (hasText(scene.code)) score += 1; + if ((scene.urls || []).length) score += (scene.urls || []).length; + if (hasText(scene.date)) score += 1; + if (hasText(scene.director)) score += 1; + if ((scene.galleries || []).length) score += (scene.galleries || []).length; + if (scene.studio && scene.studio.id != null) score += 1; + if ((scene.performers || []).length) score += (scene.performers || []).length; + if ((scene.groups || []).length) score += (scene.groups || []).length; + if ((scene.tags || []).length) score += (scene.tags || []).length; + if (hasText(scene.details)) score += 1; + return score; + } + + function eliminateByMetric(candidates, metricFn, mode) { + if (!candidates.length) return candidates; + var vals = candidates.map(metricFn); + var target = mode === "min" ? Math.min.apply(null, vals) : Math.max.apply(null, vals); + return candidates.filter(function (s) { + return metricFn(s) === target; + }); + } + + function eliminateByMaxWithinPercent(candidates, metricFn, tolerancePercent) { + if (!candidates.length) return candidates; + var vals = candidates.map(metricFn); + var maxVal = Math.max.apply(null, vals); + if (maxVal <= 0) return candidates.slice(); + var tolerance = Math.max(0, Number(tolerancePercent || 0)) / 100; + var minAllowed = maxVal * (1 - tolerance); + return candidates.filter(function (s) { + return metricFn(s) >= minAllowed; + }); + } + + function eliminateByEarliestDateWithBuffer(candidates) { + if (!candidates.length) return candidates; + var vals = candidates.map(function (s) { + return parseDateForComparison(s.date); + }); + var minVal = Math.min.apply(null, vals); + return candidates.filter(function (s) { + return parseDateForComparison(s.date) <= minVal + EARLIER_DATE_BUFFER_MS; + }); + } + + function chooseKeeperBySpec(group) { + var candidates = group.slice(); + var decision = "step_14_scene_id"; + var toggles = state.ruleToggles || defaultRuleToggles(); + function enabled(key) { + return toggles[key] !== false; + } + function step(code, reducer) { + if (candidates.length <= 1) return; + var next = reducer(candidates); + if (next.length < candidates.length) { + decision = code; + } + candidates = next; + } + + if (enabled("step_01_total_pixels")) step("step_01_total_pixels", function (arr) { + return eliminateByMaxWithinPercent( + arr, + function (s) { + var f = primaryFile(s); + return (f.width || 0) * (f.height || 0); + }, + 1 + ); + }); + if (enabled("step_02_framerate")) step("step_02_framerate", function (arr) { + return eliminateByMetric( + arr, + function (s) { + var f = primaryFile(s); + return Number(f.frame_rate || f.framerate || 0) || 0; + }, + "max" + ); + }); + if (enabled("step_03_codec")) step("step_03_codec", function (arr) { + return eliminateByMetric( + arr, + function (s) { + return codecRank(primaryFile(s).video_codec); + }, + "max" + ); + }); + if (enabled("step_upgrade_token")) step("step_upgrade_token", function (arr) { + return eliminateByMetric( + arr, + function (s) { return fileHasUpgradeToken(s) ? 1 : 0; }, + "max" + ); + }); + if (enabled("step_04_duration")) step("step_04_duration", function (arr) { + return eliminateByMetric( + arr, + function (s) { + return roundedDurationSeconds(primaryFile(s).duration || 0); + }, + "max" + ); + }); + if (enabled("step_05_smaller_size")) step("step_05_smaller_size", function (arr) { + var minSize = Math.min.apply( + null, + arr.map(function (s) { return Number(primaryFile(s).size || 0) || 0; }) + ); + var sizeTolerance = Math.max(1024 * 1024, minSize * 0.01); + return arr.filter(function (s) { + var size = Number(primaryFile(s).size || 0) || 0; + return size <= minSize + sizeTolerance || fileHasUpgradeToken(s); + }); + }); + if (enabled("step_06_older_date")) step("step_06_older_date", function (arr) { + return eliminateByEarliestDateWithBuffer(arr); + }); + if (enabled("step_07_more_groups")) step("step_07_more_groups", function (arr) { + return eliminateByMetric(arr, function (s) { return (s.groups || []).length; }, "max"); + }); + if (enabled("step_08_has_stashid")) step("step_08_has_stashid", function (arr) { + return eliminateByMetric( + arr, + function (s) { return (s.stash_ids || []).length > 0 ? 1 : 0; }, + "max" + ); + }); + if (enabled("step_09_more_performers")) step("step_09_more_performers", function (arr) { + return eliminateByMetric(arr, function (s) { return (s.performers || []).length; }, "max"); + }); + if (enabled("step_10_more_markers")) step("step_10_more_markers", function (arr) { + return eliminateByMetric(arr, function (s) { return (s.scene_markers || []).length; }, "max"); + }); + if (enabled("step_11_more_tags")) step("step_11_more_tags", function (arr) { + return eliminateByMetric(arr, function (s) { return (s.tags || []).length; }, "max"); + }); + if (enabled("step_12_less_associated_files")) step("step_12_less_associated_files", function (arr) { + return eliminateByMetric(arr, function (s) { return (s.files || []).length; }, "min"); + }); + if (enabled("step_13_more_metadata_cardinality")) step("step_13_more_metadata_cardinality", function (arr) { + return eliminateByMetric(arr, metadataCardinality, "max"); + }); + // Step 14 is intentionally always on as deterministic fallback. + step("step_14_scene_id", function (arr) { + return eliminateByMetric( + arr, + function (s) { return parseInt(String(s.id), 10) || 0; }, + "min" + ); + }); + + var keeper = candidates[0] || group[0]; + return { keeper: keeper, decisionCode: decision }; + } + + function groupEntries(scene) { + return (scene.groups || []) + .map(function (g) { + if (!g || !g.group || g.group.id == null) return null; + return { + id: String(g.group.id), + index: g.scene_index == null ? null : Number(g.scene_index), + }; + }) + .filter(function (x) { return !!x; }); + } + + function containsAllGroupEntries(keeper, other) { + var k = groupEntries(keeper); + var n = groupEntries(other); + return n.every(function (ne) { + return k.some(function (ke) { + return ke.id === ne.id && ke.index === ne.index; + }); + }); + } + + function missingGroupEntries(keeper, other) { + var k = groupEntries(keeper); + var n = groupEntries(other); + return n.filter(function (ne) { + return !k.some(function (ke) { + return ke.id === ne.id && ke.index === ne.index; + }); + }); + } + + function performerIds(scene) { + var seen = {}; + return (scene.performers || []) + .map(function (p) { + if (!p || p.id == null) return null; + return String(p.id); + }) + .filter(function (id) { + if (!id || seen[id]) return false; + seen[id] = true; + return true; + }); + } + + function missingPerformerIds(keeper, other) { + var kIds = performerIds(keeper); + var nIds = performerIds(other); + return nIds.filter(function (id) { + return kIds.indexOf(id) === -1; + }); + } + + function decisionReasonFromCode(code) { + var map = { + step_01_total_pixels: "keeper selected by highest total pixel resolution.", + step_02_framerate: "keeper selected by highest framerate.", + step_03_codec: "keeper selected by best codec tier.", + step_upgrade_token: "keeper selected by upgrade token preference.", + step_04_duration: "keeper selected by greater duration.", + step_05_smaller_size: "keeper selected by smaller file size.", + step_06_older_date: "keeper selected by older scene date.", + step_07_more_groups: "keeper selected by greater group count.", + step_08_has_stashid: "keeper selected by stash ID presence.", + step_09_more_performers: "keeper selected by greater performer count.", + step_10_more_markers: "keeper selected by greater marker count.", + step_11_more_tags: "keeper selected by greater tag count.", + step_12_less_associated_files: "keeper selected by fewer associated files.", + step_13_more_metadata_cardinality: "keeper selected by richer metadata cardinality.", + step_14_scene_id: "keeper selected by deterministic scene ID tie-break.", + }; + return map[code] || "keeper selected by deterministic rule ordering."; + } + + function evaluateNonKeeperProtection(keeper, nonKeeper) { + var res = { + markForDeletion: true, + markParentForSync: false, + exceptions: [], + }; + var toggles = state.protectionToggles || defaultProtectionToggles(); + function enabled(key) { + return toggles[key] !== false; + } + + if (enabled("protect_o_count") && (nonKeeper.o_counter || 0) > 0) { + res.markForDeletion = false; + res.exceptions.push("protect_o_count"); + } + var hasIgnoreSmartResolveTag = (nonKeeper.tags || []).some(function (t) { + return ( + t && + t.name != null && + String(t.name).trim().toLowerCase() === "ignore:smart resolve" + ); + }); + if (enabled("protect_ignore_smart_resolve_tag") && hasIgnoreSmartResolveTag) { + res.markForDeletion = false; + res.exceptions.push("protect_ignore_smart_resolve_tag"); + } + if (enabled("protect_group_association") && !containsAllGroupEntries(keeper, nonKeeper)) { + res.markForDeletion = false; + res.markParentForSync = true; + res.exceptions.push("protect_group_association"); + } + + var missingPerfs = missingPerformerIds(keeper, nonKeeper); + if (enabled("protect_performer_mismatch") && missingPerfs.length > 0) { + res.markForDeletion = false; + res.markParentForSync = true; + res.exceptions.push("protect_performer_mismatch"); + } + + var nStashed = (nonKeeper.stash_ids || []).length > 0; + var kTags = (keeper.tags || []).length; + var nTags = (nonKeeper.tags || []).length; + if (enabled("protect_tag_loss_gt_1_non_stashed") && !nStashed && nTags - kTags > 1) { + res.markForDeletion = false; + res.markParentForSync = true; + res.exceptions.push("protect_tag_loss_gt_1_non_stashed"); + } + + var kd = parseDateForComparison(keeper.date); + var nd = parseDateForComparison(nonKeeper.date); + var keeperRaw = keeper.date; + var nonRaw = nonKeeper.date; + if ( + enabled("protect_older_date") && + ((keeperRaw == null && nonRaw != null) || kd - nd > EARLIER_DATE_BUFFER_MS) + ) { + res.markForDeletion = false; + res.markParentForSync = true; + res.exceptions.push("protect_older_date"); + } + return res; + } + + function formatExceptionMessages(keeper, nonKeeper, exceptions) { + if (!exceptions || !exceptions.length) return []; + return exceptions.map(function (code) { + if (code === "protect_o_count") { + return "Non-keeper has O-count and is protected from deletion."; + } + if (code === "protect_ignore_smart_resolve_tag") { + return 'Target is tagged "Ignore:Smart Resolve" and is protected from deletion.'; + } + if (code === "protect_group_association") { + var missing = missingGroupEntries(keeper, nonKeeper); + var details = missing.length + ? missing + .map(function (m) { + return m.id + ":" + (m.index == null ? "null" : String(m.index)); + }) + .join(", ") + : "unknown"; + return "Target has unmatched group associations (" + details + ")."; + } + if (code === "protect_performer_mismatch") { + var missingPerfIds = missingPerformerIds(keeper, nonKeeper); + return missingPerfIds.length + ? "Target has unmatched performer IDs (" + missingPerfIds.join(", ") + ")." + : "Target has unmatched performer IDs."; + } + if (code === "protect_tag_loss_gt_1_non_stashed") { + var kTags = (keeper.tags || []).length; + var nTags = (nonKeeper.tags || []).length; + return ( + "Target has more than 1 additional tag than keeper (" + + nTags + + " vs " + + kTags + + ")." + ); + } + if (code === "protect_older_date") { + return "Target has an older date than keeper."; + } + return code; + }); + } + + function buildPlan() { + var groups = state.groups; + if (!groups || !groups.length) + return { entries: [], checks: {}, reasonsBySceneId: {}, syncRecommendedTargets: {} }; + var vis = visibleGroups(groups); + var entries = []; + var checks = {}; + var reasonsBySceneId = {}; + var syncRecommendedTargets = {}; + var unresolvedHighlightSceneIds = {}; + vis.forEach(function (group, gi) { + if (!group || group.length < 2) return; + var keeperDecision = chooseKeeperBySpec(group); + var keeper = keeperDecision.keeper; + var baseReason = decisionReasonFromCode(keeperDecision.decisionCode); + var nonKeepers = group.filter(function (s) { return s.id !== keeper.id; }); + var deleteIds = []; + var keeperNeedsSync = false; + nonKeepers.forEach(function (loser) { + var pr = evaluateNonKeeperProtection(keeper, loser); + if (pr.markForDeletion) deleteIds.push(loser.id); + else checks[loser.id] = false; + if (pr.markParentForSync || pr.exceptions.length) keeperNeedsSync = true; + var loserReason = baseReason; + if (pr.exceptions.length) { + var pretty = formatExceptionMessages(keeper, loser, pr.exceptions); + loserReason += + " Exceptions: " + + pretty.join(" ") + + ". Recommend synch data from duplicate."; + } + reasonsBySceneId[String(loser.id)] = loserReason; + }); + if (keeperNeedsSync) { + syncRecommendedTargets[String(keeper.id)] = true; + group.forEach(function (s) { + unresolvedHighlightSceneIds[String(s.id)] = true; + }); + } + entries.push({ + setNumber: gi + 1, + keeperId: keeper.id, + deleteIds: deleteIds, + reason: + baseReason + + (keeperNeedsSync ? " Recommend synch data from duplicate." : ""), + }); + group.forEach(function (s) { + if (s.id === keeper.id) { + checks[s.id] = false; + return; + } + if (!Object.prototype.hasOwnProperty.call(checks, s.id)) { + checks[s.id] = deleteIds.indexOf(s.id) !== -1; + } + }); + }); + return { + entries: entries, + checks: checks, + reasonsBySceneId: reasonsBySceneId, + syncRecommendedTargets: syncRecommendedTargets, + unresolvedHighlightSceneIds: unresolvedHighlightSceneIds, + }; + } + + function ensureMatchSetAnchors() { + var root = document.getElementById(ROOT_ID); + if (!root) return; + var rows = root.querySelectorAll("table.duplicate-checker-table tbody tr"); + var setNum = 0; + Array.prototype.forEach.call(rows, function (tr) { + if (tr.classList.contains("duplicate-group")) { + setNum += 1; + tr.id = "dr-match-set-" + setNum; + } + }); + } + + function escapeHtml(text) { + return String(text) + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/\"/g, """) + .replace(/'/g, "'"); + } + + function renderPreviewHtml(plan) { + if (!plan || !plan.entries || !plan.entries.length) { + return "No duplicate pairs on this results page."; + } + return plan.entries + .map(function (e) { + return ( + '' + + "Match Set " + + e.setNumber + + "" + + ": KEEP " + + escapeHtml(e.keeperId) + + " | Select for DELETE " + + escapeHtml(e.deleteIds.join(", ")) + + " | Reason: " + + escapeHtml(e.reason) + ); + }) + .join("
"); + } + + function bindPreviewLinks(previewEl) { + var links = previewEl.querySelectorAll(".dr-match-link"); + Array.prototype.forEach.call(links, function (a) { + a.addEventListener("click", function (ev) { + ev.preventDefault(); + var id = a.getAttribute("data-target"); + if (!id) return; + var target = document.getElementById(id); + if (!target) return; + target.scrollIntoView({ behavior: "smooth", block: "center" }); + }); + }); + } + + function sceneIdFromRow(tr) { + var a = tr.querySelector('td a[href^="/scenes/"]'); + if (!a || !a.getAttribute("href")) return null; + var m = a.getAttribute("href").match(/\/scenes\/(\d+)/); + return m ? m[1] : null; + } + + function setCheckboxForRow(tr, wantChecked) { + var inp = tr.querySelector("input[type=checkbox]"); + if (!inp) return; + var cur = !!inp.checked; + if (cur !== wantChecked) { + inp.click(); + } + } + + function applyChecks(checkMap) { + var root = document.getElementById(ROOT_ID); + if (!root) return; + var rows = root.querySelectorAll("table.duplicate-checker-table tbody tr"); + rows.forEach(function (tr) { + if (tr.classList.contains("separator")) return; + var sid = sceneIdFromRow(tr); + if (!sid || !Object.prototype.hasOwnProperty.call(checkMap, sid)) return; + setCheckboxForRow(tr, checkMap[sid]); + }); + } + + function clearInlineReasons() { + var root = document.getElementById(ROOT_ID); + if (!root) return; + root.querySelectorAll(".dr-inline-reason").forEach(function (el) { + el.remove(); + }); + root + .querySelectorAll("table.duplicate-checker-table tbody tr.dr-unresolved-highlight") + .forEach(function (tr) { + tr.classList.remove("dr-unresolved-highlight"); + }); + } + + function renderInlineReasons(plan) { + var root = document.getElementById(ROOT_ID); + if (!root) return; + clearInlineReasons(); + if (!plan || !plan.reasonsBySceneId) return; + var highlightMap = (plan && plan.unresolvedHighlightSceneIds) || {}; + + var rows = root.querySelectorAll("table.duplicate-checker-table tbody tr"); + rows.forEach(function (tr) { + if (tr.classList.contains("separator")) return; + var sid = sceneIdFromRow(tr); + if (!sid) return; + if (highlightMap[String(sid)]) { + tr.classList.add("dr-unresolved-highlight"); + } + var reason = plan.reasonsBySceneId[String(sid)]; + if (!reason) return; + + var titleCell = tr.querySelector("td.text-left"); + if (!titleCell) return; + var p = document.createElement("p"); + p.className = "scene-path dr-inline-reason"; + p.textContent = "Smart Resolve: " + reason; + titleCell.appendChild(p); + }); + } + + function renderSyncRecommendations(plan) { + var root = document.getElementById(ROOT_ID); + if (!root) return; + var targets = (plan && plan.syncRecommendedTargets) || {}; + root.querySelectorAll(".duplicate-resolver-sync-btn").forEach(function (btn) { + var sid = String(btn.getAttribute("data-scene-id") || ""); + var recommend = !!targets[sid]; + var desiredLabel = recommend ? "Sync rec." : "Sync data"; + var desiredTitle = recommend + ? "Recommended: sync data from duplicate into this scene." + : ""; + var hasWarning = btn.classList.contains("btn-warning"); + var hasSecondary = btn.classList.contains("btn-secondary"); + var classMismatch = recommend + ? !hasWarning || hasSecondary + : hasWarning || !hasSecondary; + + // Only mutate DOM if state actually changed (prevents observer churn loops). + if (classMismatch) { + btn.classList.remove("btn-secondary", "btn-warning"); + btn.classList.add(recommend ? "btn-warning" : "btn-secondary"); + } + if (btn.textContent !== desiredLabel) btn.textContent = desiredLabel; + if ((btn.getAttribute("title") || "") !== desiredTitle) btn.setAttribute("title", desiredTitle); + }); + } + + function buildSmartResolveChecks(plan) { + var checks = {}; + if (!plan || !plan.entries || !plan.entries.length) return checks; + var syncTargets = (plan && plan.syncRecommendedTargets) || {}; + plan.entries.forEach(function (entry) { + var keeperId = String(entry.keeperId); + if (syncTargets[keeperId]) return; + (entry.deleteIds || []).forEach(function (id) { + checks[String(id)] = true; + }); + }); + return checks; + } + + function unresolvedInfo(plan) { + var info = { count: 0, firstSetNumber: null }; + if (!plan || !plan.entries || !plan.entries.length) return info; + var syncTargets = (plan && plan.syncRecommendedTargets) || {}; + plan.entries.forEach(function (entry) { + if (!syncTargets[String(entry.keeperId)]) return; + info.count += 1; + if (info.firstSetNumber == null) info.firstSetNumber = entry.setNumber; + }); + return info; + } + + function updateUnresolvedButton(plan, show) { + var btn = document.getElementById("dr-btn-unresolved"); + if (!btn) return; + if (!show) { + btn.hidden = true; + btn.disabled = true; + btn.removeAttribute("data-target-set"); + return; + } + + var info = unresolvedInfo(plan); + btn.hidden = false; + btn.textContent = info.count + " Unresolved"; + btn.disabled = info.count === 0; + if (info.firstSetNumber == null) { + btn.removeAttribute("data-target-set"); + btn.setAttribute("title", "No unresolved sync recommendations on this page."); + } else { + btn.setAttribute("data-target-set", String(info.firstSetNumber)); + btn.setAttribute("title", "Jump to first unresolved match set."); + } + } + + function setProcessingIndicator(mode) { + var el = document.getElementById("dr-processing-indicator"); + if (!el) return; + var spinner = el.querySelector(".dr-processing-spinner"); + var bar = el.querySelector(".dr-processing-bar"); + var label = el.querySelector(".dr-processing-label"); + var normalized = mode === "bar" || mode === "spinner" ? mode : "none"; + if (normalized === "none") { + el.hidden = true; + return; + } + el.hidden = false; + if (spinner) spinner.hidden = normalized !== "spinner"; + if (bar) bar.hidden = normalized !== "bar"; + if (label) label.textContent = "Processing…"; + } + + function goToFirstUnresolved(plan) { + ensureMatchSetAnchors(); + var info = unresolvedInfo(plan); + if (info.firstSetNumber == null) return; + var target = document.getElementById("dr-match-set-" + info.firstSetNumber); + if (target) target.scrollIntoView({ behavior: "smooth", block: "center" }); + } + + function renderPlanDetailsIntoDrawer(plan) { + var prev = document.getElementById("dr-preview-out"); + if (!prev) return; + prev.innerHTML = renderPreviewHtml(plan); + bindPreviewLinks(prev); + } + + function ensureCoreSelectSmartResolveOption() { + var root = document.getElementById(ROOT_ID); + if (!root) return; + var menuItems = root.querySelectorAll(".dropdown-menu .dropdown-item"); + if (!menuItems || !menuItems.length) return; + + var anchor = null; + menuItems.forEach(function (item) { + if ((item.textContent || "").trim() === "Select None") anchor = item; + }); + if (!anchor) return; + + var menu = anchor.closest(".dropdown-menu"); + if (!menu || menu.querySelector("#dr-smart-resolve-option")) return; + + var btn = document.createElement("button"); + btn.type = "button"; + btn.id = "dr-smart-resolve-option"; + btn.className = "dropdown-item"; + btn.textContent = "Select Smart Resolve"; + btn.onclick = async function () { + setProcessingIndicator("spinner"); + try { + // Always refresh to avoid stale state after SPA table changes (pagination/deletes). + await loadDuplicateGroups(); + // Use URL page-size for indicator mode. If absent/unparseable, assume 20. + var pageSize = parseParams().size || 20; + if (pageSize > 20) { + setProcessingIndicator("bar"); + } + state.smartResolveUiActive = true; + // Let the processing indicator paint before running heavier rule evaluation. + await new Promise(function (resolve) { + requestAnimationFrame(resolve); + }); + state.lastPlan = buildPlan(); + ensureMatchSetAnchors(); + renderPlanDetailsIntoDrawer(state.lastPlan); + renderInlineReasons(state.lastPlan); + renderSyncRecommendations(state.lastPlan); + applyChecks(buildSmartResolveChecks(state.lastPlan)); + updateUnresolvedButton(state.lastPlan, true); + setSmartResolveDetailsVisible(true, false); + } catch (e) { + notifyStashError(e); + } finally { + setProcessingIndicator("none"); + } + }; + anchor.parentNode.insertBefore(btn, anchor.nextSibling); + } + + function placeToolbarButtonsInCoreRow() { + var root = document.getElementById(ROOT_ID); + if (!root) return; + var bar = document.getElementById("duplicate-resolver-toolbar"); + if (!bar) return; + var unresolvedBtn = bar.querySelector("#dr-btn-unresolved"); + var processingIndicator = bar.querySelector("#dr-processing-indicator"); + var autoBtn = bar.querySelector("#dr-btn-apply"); + var resetBtn = bar.querySelector("#dr-btn-reset"); + if (!unresolvedBtn || !processingIndicator || !autoBtn || !resetBtn) return; + var toggle = root.querySelector(".dropdown .dropdown-toggle"); + if (!toggle || !toggle.parentNode) return; + + var host = document.getElementById("dr-core-actions"); + if (!host) { + host = document.createElement("span"); + host.id = "dr-core-actions"; + host.className = "dr-core-actions"; + toggle.parentNode.insertBefore(host, toggle.nextSibling); + } + + host.appendChild(unresolvedBtn); + host.appendChild(processingIndicator); + host.appendChild(resetBtn); + host.appendChild(autoBtn); + } + + function setSmartResolveDetailsVisible(show, expandDrawer) { + var bar = document.getElementById("duplicate-resolver-toolbar"); + if (!bar) return; + var drawerToggle = bar.querySelector("#dr-drawer-toggle"); + var drawerPanel = bar.querySelector("#dr-drawer-panel"); + var resetBtn = bar.querySelector("#dr-btn-reset"); + var unresolvedBtn = bar.querySelector("#dr-btn-unresolved"); + var processingIndicator = bar.querySelector("#dr-processing-indicator"); + if (!drawerToggle || !drawerPanel) return; + + bar.hidden = !show; + drawerToggle.hidden = !show; + if (resetBtn) resetBtn.hidden = !show; + if (unresolvedBtn) unresolvedBtn.hidden = !show; + if (processingIndicator) processingIndicator.hidden = !show; + if (!show) { + state.smartResolveUiActive = false; + drawerPanel.hidden = true; + drawerToggle.setAttribute("aria-expanded", "false"); + drawerToggle.textContent = "Match Details: \u25b6"; + updateUnresolvedButton(null, false); + return; + } + + if (expandDrawer) { + drawerPanel.hidden = false; + drawerToggle.setAttribute("aria-expanded", "true"); + drawerToggle.textContent = "Match Details: \u25bc"; + } + } + + async function loadPluginSetting() { + try { + var data = await gql( + "query DrCfg { configuration { plugins } }" + ); + var plug = data.configuration && data.configuration.plugins; + var cfg = null; + if (plug && typeof plug === "object") { + cfg = plug[PLUGIN_ID] || null; + if (!cfg) { + var k = Object.keys(plug).find(function (key) { + return String(key).toLowerCase() === String(PLUGIN_ID).toLowerCase(); + }); + if (k) cfg = plug[k]; + } + } + if (cfg && typeof cfg === "object") { + var v = cfg.autoCheckAfterSync; + if (v === true || v === "true") state.autoCheckDefault = true; + else if (v === false || v === "false") state.autoCheckDefault = false; + function boolOrDefault(key, fallback) { + var raw = cfg[key]; + if (raw === true || raw === "true") return true; + if (raw === false || raw === "false") return false; + return fallback; + } + state.ruleToggles = { + step_01_total_pixels: !boolOrDefault("ignoreRule01TotalPixels", false), + step_02_framerate: !boolOrDefault("ignoreRule02Framerate", false), + step_03_codec: !boolOrDefault("ignoreRule03Codec", false), + step_upgrade_token: !boolOrDefault("ignoreRule05bUpgradeToken", false), + step_04_duration: !boolOrDefault("ignoreRule04Duration", false), + step_05_smaller_size: !boolOrDefault("ignoreRule05SmallerSize", false), + step_06_older_date: !boolOrDefault("ignoreRule06OlderDate", false), + step_07_more_groups: !boolOrDefault("ignoreRule07MoreGroups", false), + step_08_has_stashid: !boolOrDefault("ignoreRule08HasStashId", false), + step_09_more_performers: !boolOrDefault("ignoreRule09MorePerformers", false), + step_10_more_markers: !boolOrDefault("ignoreRule10MoreMarkers", false), + step_11_more_tags: !boolOrDefault("ignoreRule11MoreTags", false), + step_12_less_associated_files: !boolOrDefault("ignoreRule12LessAssociatedFiles", false), + step_13_more_metadata_cardinality: !boolOrDefault( + "ignoreRule13MoreMetadataCardinality", + false + ), + }; + state.protectionToggles = { + protect_o_count: !boolOrDefault("unprotectAOCount", false), + protect_group_association: !boolOrDefault("unprotectBGroupAssociation", false), + protect_performer_mismatch: !boolOrDefault("unprotectCPerformerMismatch", false), + protect_tag_loss_gt_1_non_stashed: !boolOrDefault("unprotectDTagLossGt1NonStashed", false), + protect_older_date: !boolOrDefault("unprotectEOlderDate", false), + protect_ignore_smart_resolve_tag: !boolOrDefault("unprotectFIgnoreSmartResolveTag", false), + }; + } + } catch (e) { + state.autoCheckDefault = true; + state.ruleToggles = defaultRuleToggles(); + state.protectionToggles = defaultProtectionToggles(); + } + } + + function mergeIds(target, additions) { + var set = new Set(); + (target || []).forEach(function (x) { set.add(String(x)); }); + (additions || []).forEach(function (x) { set.add(String(x)); }); + return Array.from(set); + } + + function mergeStashIds(target, additions) { + var map = new Map(); + (target || []).forEach(function (s) { + if (!s || !s.endpoint || !s.stash_id) return; + // Stash enforces UNIQUE(scene_id, endpoint): keep one stash_id per endpoint. + // Prefer existing target value when endpoint already exists. + if (!map.has(s.endpoint)) { + map.set(s.endpoint, { endpoint: s.endpoint, stash_id: s.stash_id }); + } + }); + (additions || []).forEach(function (s) { + if (!s || !s.endpoint || !s.stash_id) return; + if (!map.has(s.endpoint)) { + map.set(s.endpoint, { endpoint: s.endpoint, stash_id: s.stash_id }); + } + }); + return Array.from(map.values()).map(function (s) { + return { endpoint: s.endpoint, stash_id: s.stash_id }; + }); + } + + /** + * Image URL/base64 for the scene *cover* (UI + sceneUpdate `cover_image`). + * Stash `/scene/{id}/webp` is an animated *stream preview*, not cover — do not use. + * `/scene/{id}/screenshot` is served from the cover store first (see Stash SceneServer.ServeScreenshot). + */ + function sceneCoverDataUrl(scene) { + if (!scene) return ""; + var c = scene.cover_image; + if (c && String(c).trim()) return String(c).trim(); + var p = scene.paths || {}; + var shot = p.screenshot; + return shot && String(shot).trim() ? String(shot).trim() : ""; + } + + function sceneResolution(scene) { + var f = primaryFile(scene); + var w = Number(f.width || 0) || 0; + var h = Number(f.height || 0) || 0; + return { width: w, height: h, totalPixels: w * h }; + } + + function sceneResolutionLabel(scene) { + var r = sceneResolution(scene); + if (r.width > 0 && r.height > 0) { + return r.width + "px x " + r.height + "px"; + } + return "resolution unknown"; + } + + function mergeGroups(target, additions) { + var map = new Map(); + (target || []).forEach(function (g) { + map.set(String(g.group.id), { + group_id: g.group.id, + scene_index: g.scene_index != null ? g.scene_index : null, + }); + }); + (additions || []).forEach(function (g) { + var id = String(g.group.id); + if (!map.has(id)) + map.set(id, { + group_id: g.group.id, + scene_index: g.scene_index != null ? g.scene_index : null, + }); + }); + return Array.from(map.values()); + } + + /** Union groups from target + sources (scene `groups` shape). */ + function collectMergedGroups(target, sources, enabled) { + if (!enabled) return mergeGroups(target.groups, []); + var map = new Map(); + function addAll(arr) { + (arr || []).forEach(function (g) { + var id = String(g.group.id); + if (!map.has(id)) + map.set(id, { + group_id: g.group.id, + scene_index: g.scene_index != null ? g.scene_index : null, + }); + }); + } + addAll(target.groups); + sources.forEach(function (s) { + addAll(s.groups); + }); + return Array.from(map.values()); + } + + function buildSceneUpdateInput(target, sources, opt) { + var tag_ids = (target.tags || []).map(function (t) { return t.id; }); + var performer_ids = (target.performers || []).map(function (t) { return t.id; }); + var gallery_ids = (target.galleries || []).map(function (t) { return t.id; }); + var urls = (target.urls || []).slice(); + var stash_ids = target.stash_ids || []; + + sources.forEach(function (src) { + if (opt.tags) + tag_ids = mergeIds( + tag_ids, + (src.tags || []).map(function (t) { return t.id; }) + ); + if (opt.performers) + performer_ids = mergeIds( + performer_ids, + (src.performers || []).map(function (t) { return t.id; }) + ); + if (opt.galleries) + gallery_ids = mergeIds( + gallery_ids, + (src.galleries || []).map(function (t) { return t.id; }) + ); + if (opt.urls) { + (src.urls || []).forEach(function (u) { + if (urls.indexOf(u) === -1) urls.push(u); + }); + } + if (opt.stash_ids) + stash_ids = mergeStashIds(stash_ids, src.stash_ids || []); + }); + + var groups = collectMergedGroups(target, sources, opt.groups); + + var input = { + id: target.id, + tag_ids: tag_ids, + performer_ids: performer_ids, + gallery_ids: gallery_ids, + groups: groups, + urls: urls, + stash_ids: stash_ids, + }; + + function hasText(v) { + return !!String(v || "").trim(); + } + function sceneHasStashId(s) { + return !!((s && s.stash_ids && s.stash_ids.length) || 0); + } + function dateUpperBoundParts(raw) { + if (!raw || !String(raw).trim()) return null; + var m = String(raw).trim().match(/^(\d{4})(?:-(\d{2})(?:-(\d{2}))?)?$/); + if (!m) return null; + var y = parseInt(m[1], 10); + var mo = m[2] ? parseInt(m[2], 10) : 12; + var d; + if (m[3]) { + d = parseInt(m[3], 10); + } else { + d = new Date(y, mo, 0).getDate(); + } + return [y, mo, d]; + } + function isDateBefore(a, b) { + var pa = dateUpperBoundParts(a); + var pb = dateUpperBoundParts(b); + if (!pa && !pb) return false; + if (!pa) return false; + if (!pb) return true; + if (pa[0] !== pb[0]) return pa[0] < pb[0]; + if (pa[1] !== pb[1]) return pa[1] < pb[1]; + return pa[2] < pb[2]; + } + function pickSourceValue(field) { + var candidates = sources.filter(function (s) { + if (field === "studio") return !!(s.studio && s.studio.id); + if (field === "cover_image") return !!sceneCoverDataUrl(s); + return hasText(s[field]); + }); + if (!candidates.length) return null; + if (field === "date") { + return candidates.reduce(function (best, cur) { + return isDateBefore(cur.date, best.date) ? cur : best; + }).date; + } + if (field === "cover_image") { + var bestCover = candidates.reduce(function (best, cur) { + var b = sceneResolution(best).totalPixels; + var c = sceneResolution(cur).totalPixels; + if (c !== b) return c > b ? cur : best; + var bestStash = sceneHasStashId(best) ? 1 : 0; + var curStash = sceneHasStashId(cur) ? 1 : 0; + if (curStash !== bestStash) return curStash > bestStash ? cur : best; + return best; + }); + return sceneCoverDataUrl(bestCover); + } + var stashPreferred = candidates.find(sceneHasStashId); + var chosen = stashPreferred || candidates[0]; + if (field === "studio") return chosen.studio.id; + if (field === "cover_image") return sceneCoverDataUrl(chosen); + return chosen[field]; + } + + var scalarWins = opt.scalarWins || {}; + if (scalarWins.title === "source") { + var srcTitle = pickSourceValue("title"); + if (hasText(srcTitle)) input.title = srcTitle; + } + if (scalarWins.code === "source") { + var srcCode = pickSourceValue("code"); + if (hasText(srcCode)) input.code = srcCode; + } + if (scalarWins.director === "source") { + var srcDirector = pickSourceValue("director"); + if (hasText(srcDirector)) input.director = srcDirector; + } + if (scalarWins.details === "source") { + var srcDetails = pickSourceValue("details"); + if (hasText(srcDetails)) input.details = srcDetails; + } + if (scalarWins.date === "source") { + var srcDate = pickSourceValue("date"); + if (hasText(srcDate)) input.date = srcDate; + } + if (scalarWins.studio === "source") { + var srcStudio = pickSourceValue("studio"); + if (srcStudio) input.studio_id = srcStudio; + } + if (scalarWins.cover_image === "source") { + var srcCover = pickSourceValue("cover_image"); + if (hasText(srcCover)) input.cover_image = srcCover; + } + + return input; + } + + /** + * Stash resolves `cover_image` URLs on the *server*. If the server cannot + * reach its public hostname (split DNS / hairpin), fetch here in the browser + * and send base64 data instead. + */ + function absolutizeMediaUrl(u) { + var s = String(u || "").trim(); + if (!s) return s; + if (s.indexOf("/") === 0) return window.location.origin + s; + return s; + } + + function fetchUrlAsDataUrl(url) { + var abs = absolutizeMediaUrl(url); + return fetch(abs, { credentials: "include" }).then(function (res) { + if (!res.ok) + throw new Error("Could not load cover image (" + res.status + ")"); + return res.blob(); + }).then(function (blob) { + return new Promise(function (resolve, reject) { + var r = new FileReader(); + r.onload = function () { + resolve(r.result); + }; + r.onerror = function () { + reject(new Error("Could not read cover image data")); + }; + r.readAsDataURL(blob); + }); + }); + } + + async function inlineRemoteCoverImages(input) { + var c = input && input.cover_image; + if (!c || typeof c !== "string") return; + var t = c.trim(); + if (!t) return; + if (t.toLowerCase().indexOf("data:image") === 0) return; + if ( + t.indexOf("http://") === 0 || + t.indexOf("https://") === 0 || + t.indexOf("/") === 0 + ) { + input.cover_image = await fetchUrlAsDataUrl(t); + } + } + + async function runSceneUpdate(input) { + var mut = + "mutation DrSceneUpdate($input: SceneUpdateInput!) { sceneUpdate(input: $input) { id } }"; + await gql(mut, { input: input }); + } + + function showModal(target, group) { + var sources = group.filter(function (s) { return s.id !== target.id; }); + var overlay = document.createElement("div"); + overlay.id = "duplicate-resolver-modal-overlay"; + + var autoId = "dr-auto-check"; + var opt = { + tags: true, + performers: true, + groups: true, + galleries: true, + urls: true, + stash_ids: true, + scalarWins: {}, + }; + + function hasText(v) { + return !!String(v || "").trim(); + } + function sceneHasStashId(s) { + return !!((s && s.stash_ids && s.stash_ids.length) || 0); + } + function dateUpperBoundParts(raw) { + if (!raw || !String(raw).trim()) return null; + var m = String(raw).trim().match(/^(\d{4})(?:-(\d{2})(?:-(\d{2}))?)?$/); + if (!m) return null; + var y = parseInt(m[1], 10); + var mo = m[2] ? parseInt(m[2], 10) : 12; + var d; + if (m[3]) { + d = parseInt(m[3], 10); + } else { + d = new Date(y, mo, 0).getDate(); + } + return [y, mo, d]; + } + function isDateBefore(a, b) { + var pa = dateUpperBoundParts(a); + var pb = dateUpperBoundParts(b); + if (!pa && !pb) return false; + if (!pa) return false; + if (!pb) return true; + if (pa[0] !== pb[0]) return pa[0] < pb[0]; + if (pa[1] !== pb[1]) return pa[1] < pb[1]; + return pa[2] < pb[2]; + } + function pickSourceScene(field) { + var candidates = sources.filter(function (s) { + if (field === "studio") return !!(s.studio && s.studio.id); + if (field === "cover_image") return !!sceneCoverDataUrl(s); + return hasText(s[field]); + }); + if (!candidates.length) return null; + if (field === "date") { + return candidates.reduce(function (best, cur) { + return isDateBefore(cur.date, best.date) ? cur : best; + }); + } + if (field === "cover_image") { + return candidates.reduce(function (best, cur) { + var b = sceneResolution(best).totalPixels; + var c = sceneResolution(cur).totalPixels; + if (c !== b) return c > b ? cur : best; + var bestStash = sceneHasStashId(best) ? 1 : 0; + var curStash = sceneHasStashId(cur) ? 1 : 0; + if (curStash !== bestStash) return curStash > bestStash ? cur : best; + return best; + }); + } + var stashPreferred = candidates.find(sceneHasStashId); + return stashPreferred || candidates[0]; + } + function sourceValueForField(field) { + var s = pickSourceScene(field); + if (!s) return ""; + if (field === "studio") { + if (s.studio && s.studio.name) return s.studio.name; + if (s.studio && s.studio.id) return "Studio " + s.studio.id; + return ""; + } + if (field === "cover_image") return sceneCoverDataUrl(s); + return String(s[field] || "").trim(); + } + function defaultScalarWinner(field) { + var targetHas = sceneHasStashId(target); + var sourceHasAny = sources.some(sceneHasStashId); + if (field === "date") { + var targetDate = String(target.date || "").trim(); + var sourceDate = sourceValueForField("date"); + if (!targetDate && sourceDate) return "source"; + if (targetDate && sourceDate && isDateBefore(sourceDate, targetDate)) return "source"; + return "dest"; + } + if (field === "cover_image") { + var tc = sceneCoverDataUrl(target); + var sc = sourceValueForField("cover_image"); + var sourceScene = pickSourceScene("cover_image"); + var sourcePixels = sourceScene ? sceneResolution(sourceScene).totalPixels : 0; + var targetPixels = sceneResolution(target).totalPixels; + if (sourcePixels > targetPixels) return "source"; + if (targetPixels > sourcePixels) return "dest"; + if (!tc && sc) return "source"; + if (!targetHas && sourceHasAny && sc) return "source"; + return "dest"; + } + // For text/scalar fields (including title/details), prefer source when destination is blank. + var targetFieldHasValue = + field === "studio" + ? !!(target.studio && target.studio.id) + : hasText(target[field]); + if (!targetFieldHasValue && hasText(sourceValueForField(field))) return "source"; + if (!targetHas && sourceHasAny && hasText(sourceValueForField(field))) return "source"; + return "dest"; + } + + function row(name, key) { + var lab = document.createElement("label"); + lab.className = "dr-field-title"; + var toggle = choicePrepend(!!opt[key], "Toggle " + name); + toggle.root.style.marginRight = "0.45rem"; + toggle.button.onclick = function () { + opt[key] = !opt[key]; + toggle.button.innerHTML = opt[key] + ? '' + : ''; + if (hint) hint.hidden = !opt[key]; + }; + lab.appendChild(toggle.root); + lab.appendChild(document.createTextNode(name)); + var unionKeys = { + tags: true, + performers: true, + groups: true, + galleries: true, + urls: true, + stash_ids: true, + }; + var hint = null; + if (unionKeys[key]) { + hint = document.createElement("span"); + hint.className = "dr-opt-hint"; + hint.textContent = " union all"; + hint.hidden = !opt[key]; + lab.appendChild(hint); + } + return lab; + } + + var modal = document.createElement("div"); + modal.className = "dr-modal"; + modal.innerHTML = + '"; + function textOrFallback(v, fallback) { + return v && String(v).trim() ? String(v).trim() : fallback; + } + + function groupLabel(g) { + var gid = g && g.group && g.group.id != null ? String(g.group.id) : null; + var gname = g && g.group && g.group.name ? String(g.group.name).trim() : ""; + if (gname) return gname; + if (gid) return "Group " + gid; + return "Group (unknown)"; + } + + function destListFor(key) { + if (key === "tags") return (target.tags || []).map(function (t) { return t.name; }); + if (key === "performers") + return (target.performers || []).map(function (x) { return x.name; }); + if (key === "groups") + return (target.groups || []).map(function (g) { + return groupLabel(g); + }); + if (key === "galleries") + return (target.galleries || []).map(function (g) { + return textOrFallback(g.title, "Gallery " + g.id); + }); + if (key === "urls") return (target.urls || []).slice(); + if (key === "stash_ids") + return (target.stash_ids || []).map(function (s) { + return s.endpoint + ":" + s.stash_id; + }); + return []; + } + + function sourceUnionFor(key) { + var set = new Set(); + sources.forEach(function (s) { + if (key === "tags") (s.tags || []).forEach(function (t) { set.add(t.name); }); + if (key === "performers") + (s.performers || []).forEach(function (x) { set.add(x.name); }); + if (key === "groups") + (s.groups || []).forEach(function (g) { + set.add(groupLabel(g)); + }); + if (key === "galleries") + (s.galleries || []).forEach(function (g) { + set.add(textOrFallback(g.title, "Gallery " + g.id)); + }); + if (key === "urls") (s.urls || []).forEach(function (u) { set.add(u); }); + if (key === "stash_ids") + (s.stash_ids || []).forEach(function (x) { set.add(x.endpoint + ":" + x.stash_id); }); + }); + return Array.from(set); + } + + function destDisplayFor(key) { + if (key === "stash_ids") { + return (target.stash_ids || []).map(function (s) { + return { + text: s.stash_id || "", + title: s.endpoint || "", + }; + }); + } + if (key !== "groups") return destListFor(key); + return (target.groups || []).map(function (g) { + var label = groupLabel(g); + var idx = + g && g.scene_index != null && String(g.scene_index).trim() + ? String(g.scene_index) + : "?"; + return { text: label, title: "Scene number: " + idx }; + }); + } + + function sourceUnionDisplayFor(key) { + if (key === "stash_ids") { + var map = new Map(); + sources.forEach(function (s) { + (s.stash_ids || []).forEach(function (x) { + var endpoint = x.endpoint || ""; + var stashId = x.stash_id || ""; + var k = endpoint + "\0" + stashId; + if (!map.has(k)) { + map.set(k, { + text: stashId, + title: endpoint, + }); + } + }); + }); + return Array.from(map.values()); + } + if (key !== "groups") return sourceUnionFor(key); + var byLabel = new Map(); + sources.forEach(function (s) { + (s.groups || []).forEach(function (g) { + var label = groupLabel(g); + if (!byLabel.has(label)) byLabel.set(label, new Set()); + var idx = + g && g.scene_index != null && String(g.scene_index).trim() + ? String(g.scene_index) + : "?"; + byLabel.get(label).add(idx); + }); + }); + return Array.from(byLabel.entries()).map(function (entry) { + var label = entry[0]; + var indices = Array.from(entry[1]).sort(); + return { + text: label, + title: "Scene number(s): " + indices.join(", "), + }; + }); + } + + function choicePrepend(selected, title) { + var pre = document.createElement("div"); + pre.className = "input-group-prepend"; + var btn = document.createElement("button"); + btn.type = "button"; + btn.className = "btn btn-secondary"; + btn.title = title || ""; + btn.innerHTML = selected + ? '' + : ''; + pre.appendChild(btn); + return { root: pre, button: btn }; + } + + function renderListControl(values, placeholder, isReadOnly) { + var wrap = document.createElement("div"); + wrap.className = "dr-list-control"; + var list = values || []; + if (!list.length) list = [""]; + list.slice(0, 20).forEach(function (v) { + var inputGroup = document.createElement("div"); + inputGroup.className = "input-group"; + var input = document.createElement("input"); + input.className = "text-input form-control"; + input.placeholder = placeholder; + if (v && typeof v === "object") { + input.value = v.text || ""; + if (v.title) input.title = v.title; + } else { + input.value = String(v || ""); + } + input.readOnly = !!isReadOnly; + inputGroup.appendChild(input); + wrap.appendChild(inputGroup); + }); + if ((values || []).length > 20) { + var more = document.createElement("div"); + more.className = "dr-empty"; + more.textContent = "+" + (values.length - 20) + " more"; + wrap.appendChild(more); + } + return wrap; + } + + function renderChipList(values, emptyText) { + var wrap = document.createElement("div"); + wrap.className = "dr-chip-list"; + if (!values || !values.length) { + var em = document.createElement("span"); + em.className = "dr-empty"; + em.textContent = emptyText; + wrap.appendChild(em); + return wrap; + } + values.forEach(function (v) { + var chip = document.createElement("span"); + chip.className = "tag-item badge badge-secondary dr-chip"; + if (v && typeof v === "object") { + chip.textContent = v.text || ""; + if (v.title) chip.title = v.title; + } else { + chip.textContent = String(v || ""); + } + wrap.appendChild(chip); + }); + return wrap; + } + + function renderScalarInput(value, placeholder, isReadOnly, multiline) { + if (multiline) { + var ta = document.createElement("textarea"); + ta.className = "bg-secondary text-white border-secondary scene-description form-control"; + ta.placeholder = placeholder; + ta.value = String(value || ""); + ta.readOnly = !!isReadOnly; + ta.rows = 4; + return ta; + } + var input = document.createElement("input"); + input.className = "bg-secondary text-white border-secondary form-control"; + input.placeholder = placeholder; + input.value = String(value || ""); + input.readOnly = !!isReadOnly; + return input; + } + + function appendCoverValueToGroup(group, value, placeholder) { + var outer = document.createElement("div"); + outer.className = "dr-cover-value flex-grow-1"; + var v = String(value || "").trim(); + var caption = placeholder || "Cover"; + if (!v) { + var emptyPh = document.createElement("div"); + emptyPh.className = "dr-cover-placeholder"; + emptyPh.textContent = "No cover"; + outer.appendChild(emptyPh); + var cap0 = document.createElement("div"); + cap0.className = "dr-cover-caption"; + cap0.textContent = caption; + outer.appendChild(cap0); + } else { + var wrap = document.createElement("div"); + wrap.className = "dr-cover-frame"; + var img = document.createElement("img"); + img.className = "dr-cover-thumb"; + img.alt = caption; + img.loading = "lazy"; + img.src = v; + img.onerror = function () { + wrap.style.display = "none"; + var capEl = outer.querySelector(".dr-cover-caption"); + if (capEl) capEl.style.display = "none"; + if (outer.querySelector("[data-dr-cover-fail]")) return; + var fail = document.createElement("div"); + fail.className = "dr-cover-placeholder"; + fail.setAttribute("data-dr-cover-fail", "1"); + fail.textContent = "Preview unavailable"; + outer.appendChild(fail); + }; + wrap.appendChild(img); + outer.appendChild(wrap); + var cap = document.createElement("div"); + cap.className = "dr-cover-caption"; + cap.textContent = caption; + outer.appendChild(cap); + } + group.appendChild(outer); + } + + function uniqueSorted(values) { + return Array.from(new Set((values || []).map(function (v) { return String(v); }))).sort(); + } + + function setsEqual(a, b) { + var aa = uniqueSorted(a); + var bb = uniqueSorted(b); + if (aa.length !== bb.length) return false; + for (var i = 0; i < aa.length; i++) { + if (aa[i] !== bb[i]) return false; + } + return true; + } + + var compare = document.createElement("div"); + compare.className = "dr-sync-compare"; + compare.innerHTML = + '
' + + '

Sources (data to union)

' + + "

" + + sources.length + + " scene(s): " + + sources.map(function (s) { return s.id; }).join(", ") + + "

" + + "
" + + '
' + + '

Destination (target scene)

' + + "

ID " + + target.id + + " - " + + textOrFallback(target.title, "(no title)") + + "

" + + "
"; + modal.appendChild(compare); + + var opts = document.createElement("div"); + opts.className = "dr-modal-options"; + function addFieldRow(label, key, desc) { + var sourceVals = []; + var destVals = []; + var sourceDisplayVals = []; + var destDisplayVals = []; + var isComparable = + key === "tags" || + key === "performers" || + key === "groups" || + key === "galleries" || + key === "urls" || + key === "stash_ids"; + + if (isComparable) { + sourceVals = sourceUnionFor(key); + destVals = destListFor(key); + sourceDisplayVals = sourceUnionDisplayFor(key); + destDisplayVals = destDisplayFor(key); + // Suppress rows where both sides are effectively the same set. + if (setsEqual(sourceVals, destVals)) return; + } + + var wrapper = document.createElement("div"); + wrapper.className = "dr-field-row"; + var top = row(label, key); + wrapper.appendChild(top); + if (desc) { + var d = document.createElement("div"); + d.className = "dr-field-desc"; + d.textContent = desc; + wrapper.appendChild(d); + } + if ( + isComparable + ) { + var useChips = key === "tags" || key === "performers" || key === "groups"; + var grid = document.createElement("div"); + grid.className = "dr-field-grid"; + var left = document.createElement("div"); + left.className = "dr-field-col"; + var right = document.createElement("div"); + right.className = "dr-field-col"; + if (useChips) { + left.appendChild(renderChipList(sourceDisplayVals, "none")); + right.appendChild(renderChipList(destDisplayVals, "none")); + } else { + left.appendChild(renderListControl(sourceDisplayVals, label, true)); + right.appendChild(renderListControl(destDisplayVals, label, true)); + } + grid.appendChild(left); + grid.appendChild(right); + wrapper.appendChild(grid); + } + opts.appendChild(wrapper); + } + // Scalar field preview (shown only when different/non-empty on at least one side). + function scalarFieldRow(label, fieldKey, destValue, sourceValues) { + var srcJoined = uniqueSorted( + (sourceValues || []).filter(function (v) { return !!String(v || "").trim(); }) + ); + var destText = String(destValue || "").trim(); + var sourceText = srcJoined.join(" | "); + if (!destText && !sourceText) return; + if (sourceText && sourceText.split(" | ").indexOf(destText) !== -1 && srcJoined.length === 1) { + return; + } + + var wrapper = document.createElement("div"); + wrapper.className = "dr-field-row"; + var title = document.createElement("div"); + title.className = "dr-field-title"; + title.textContent = label; + wrapper.appendChild(title); + + var grid = document.createElement("div"); + grid.className = "dr-field-grid"; + var left = document.createElement("div"); + left.className = "dr-field-col"; + var right = document.createElement("div"); + right.className = "dr-field-col"; + var leftGroup = document.createElement("div"); + leftGroup.className = "input-group"; + var rightGroup = document.createElement("div"); + rightGroup.className = "input-group"; + var leftPre = choicePrepend(false, "Use source"); + var rightPre = choicePrepend(false, "Keep destination"); + leftGroup.appendChild(leftPre.root); + rightGroup.appendChild(rightPre.root); + var srcValue = srcJoined.length ? srcJoined[0] : ""; + leftGroup.appendChild( + renderScalarInput(srcValue, label, true, fieldKey === "details") + ); + rightGroup.appendChild( + renderScalarInput(destText, label, true, fieldKey === "details") + ); + left.appendChild(leftGroup); + right.appendChild(rightGroup); + grid.appendChild(left); + grid.appendChild(right); + wrapper.appendChild(grid); + var winner = defaultScalarWinner(fieldKey); + opt.scalarWins[fieldKey] = winner; + function setChoiceHeads() { + var destOn = opt.scalarWins[fieldKey] === "dest"; + leftPre.button.innerHTML = !destOn + ? '' + : ''; + rightPre.button.innerHTML = destOn + ? '' + : ''; + } + rightPre.button.onclick = function () { + opt.scalarWins[fieldKey] = "dest"; + setChoiceHeads(); + }; + leftPre.button.onclick = function () { + opt.scalarWins[fieldKey] = "source"; + setChoiceHeads(); + }; + setChoiceHeads(); + opts.appendChild(wrapper); + } + + function coverImageFieldRow() { + var srcJoined = uniqueSorted( + sources + .map(function (s) { return sceneCoverDataUrl(s); }) + .filter(function (v) { return hasText(v); }) + ); + var destText = sceneCoverDataUrl(target); + var sourceText = srcJoined.join(" | "); + if (!destText && !sourceText) return; + if ( + sourceText && + sourceText.split(" | ").indexOf(destText) !== -1 && + srcJoined.length === 1 + ) { + return; + } + + var wrapper = document.createElement("div"); + wrapper.className = "dr-field-row"; + var titleEl = document.createElement("div"); + titleEl.className = "dr-field-title"; + titleEl.textContent = "Cover"; + wrapper.appendChild(titleEl); + + var grid = document.createElement("div"); + grid.className = "dr-field-grid"; + var left = document.createElement("div"); + left.className = "dr-field-col"; + var right = document.createElement("div"); + right.className = "dr-field-col"; + var leftGroup = document.createElement("div"); + leftGroup.className = "input-group"; + var rightGroup = document.createElement("div"); + rightGroup.className = "input-group"; + var leftPre = choicePrepend(false, "Use source cover"); + var rightPre = choicePrepend(false, "Keep destination cover"); + leftGroup.appendChild(leftPre.root); + rightGroup.appendChild(rightPre.root); + var bestSourceCoverScene = pickSourceScene("cover_image"); + appendCoverValueToGroup( + leftGroup, + bestSourceCoverScene ? sceneCoverDataUrl(bestSourceCoverScene) : (srcJoined.length ? srcJoined[0] : ""), + sceneResolutionLabel(bestSourceCoverScene) + ); + appendCoverValueToGroup( + rightGroup, + destText, + sceneResolutionLabel(target) + ); + left.appendChild(leftGroup); + right.appendChild(rightGroup); + grid.appendChild(left); + grid.appendChild(right); + wrapper.appendChild(grid); + + var fieldKey = "cover_image"; + var winner = defaultScalarWinner(fieldKey); + opt.scalarWins[fieldKey] = winner; + function setChoiceHeads() { + var destOn = opt.scalarWins[fieldKey] === "dest"; + leftPre.button.innerHTML = !destOn + ? '' + : ''; + rightPre.button.innerHTML = destOn + ? '' + : ''; + } + rightPre.button.onclick = function () { + opt.scalarWins[fieldKey] = "dest"; + setChoiceHeads(); + }; + leftPre.button.onclick = function () { + opt.scalarWins[fieldKey] = "source"; + setChoiceHeads(); + }; + setChoiceHeads(); + opts.appendChild(wrapper); + } + + scalarFieldRow( + "Title", + "title", + target.title, + sources.map(function (s) { return s.title; }) + ); + scalarFieldRow( + "Studio Code", + "code", + target.code, + sources.map(function (s) { return s.code; }) + ); + addFieldRow("URLs", "urls"); + scalarFieldRow( + "Date", + "date", + target.date, + sources.map(function (s) { return s.date; }) + ); + scalarFieldRow( + "Director", + "director", + target.director, + sources.map(function (s) { return s.director; }) + ); + scalarFieldRow( + "Studio", + "studio", + target.studio && (target.studio.name || ("Studio " + target.studio.id)), + sources.map(function (s) { + return s.studio && (s.studio.name || ("Studio " + s.studio.id)); + }) + ); + addFieldRow("Performers", "performers"); + addFieldRow("Groups", "groups"); + addFieldRow("Tags", "tags"); + scalarFieldRow( + "Details", + "details", + target.details, + sources.map(function (s) { return s.details; }) + ); + coverImageFieldRow(); + addFieldRow("Stash IDs", "stash_ids"); + addFieldRow("Galleries", "galleries"); + + modal.appendChild(opts); + + var autoLab = document.createElement("label"); + var autoCb = document.createElement("input"); + autoCb.type = "checkbox"; + autoCb.id = autoId; + autoCb.checked = state.autoCheckDefault; + autoLab.appendChild(autoCb); + autoLab.appendChild( + document.createTextNode( + " On Sync, mark source scene as duplicate." + ) + ); + modal.appendChild(autoLab); + + var actions = document.createElement("div"); + actions.className = "dr-modal-actions"; + + function close() { + if (overlay.parentNode) overlay.parentNode.removeChild(overlay); + } + + var btnCancel = document.createElement("button"); + btnCancel.className = "btn btn-secondary"; + btnCancel.textContent = "Cancel"; + btnCancel.onclick = close; + + var btnOk = document.createElement("button"); + btnOk.className = "btn btn-primary"; + btnOk.textContent = "Sync"; + btnOk.onclick = async function () { + btnOk.disabled = true; + try { + var input = buildSceneUpdateInput(target, sources, opt); + await inlineRemoteCoverImages(input); + await runSceneUpdate(input); + if (shouldRefreshAfterSync()) { + await refreshPlanAndDecorations(); + } + ensureStashIdBadges(); + if (autoCb.checked) { + var m = {}; + sources.forEach(function (s) { + m[s.id] = true; + }); + applyChecks(m); + } + close(); + notifyStashSuccess("Sync completed for scene " + target.id); + } catch (e) { + notifyStashError(e); + btnOk.disabled = false; + } + }; + + actions.appendChild(btnCancel); + actions.appendChild(btnOk); + modal.appendChild(actions); + overlay.appendChild(modal); + overlay.addEventListener("click", function (ev) { + if (ev.target === overlay) close(); + }); + document.body.appendChild(overlay); + } + + function sceneByIdInVisible(sid) { + var vis = visibleGroups(state.groups || []); + for (var i = 0; i < vis.length; i++) { + var g = vis[i]; + for (var j = 0; j < g.length; j++) { + if (String(g[j].id) === String(sid)) return { group: g, scene: g[j] }; + } + } + return null; + } + + function stashIdCountForScene(sceneId) { + return stashIdsForScene(sceneId).length; + } + + function stashIdsForScene(sceneId) { + if (!state.groups || !sceneId) return []; + var sid = String(sceneId); + for (var i = 0; i < state.groups.length; i++) { + var g = state.groups[i] || []; + for (var j = 0; j < g.length; j++) { + var s = g[j]; + if (String(s.id) === sid) return (s.stash_ids || []).slice(); + } + } + return []; + } + + function stashEndpointToSceneBase(endpoint) { + var e = String(endpoint || "").trim(); + if (!e) return ""; + if (/\/graphql\/?$/i.test(e)) return e.replace(/\/graphql\/?$/i, "/scenes"); + return e.replace(/\/+$/, "") + "/scenes"; + } + + function createStashIdBoxIcon() { + // Prefer the exact icon Stash already renders (faBox) so style matches 1:1. + var existingFaBox = document.querySelector( + "#scene-duplicate-checker td.scene-details svg[data-icon='box']" + ); + if (existingFaBox) { + var cloned = existingFaBox.cloneNode(true); + cloned.removeAttribute("width"); + cloned.removeAttribute("height"); + cloned.setAttribute("class", "dr-stashid-box-icon"); + return cloned; + } + // Fallback: original custom Stash-style box icon from prior script. + var svg = document.createElementNS("http://www.w3.org/2000/svg", "svg"); + svg.setAttribute("aria-hidden", "true"); + svg.setAttribute("focusable", "false"); + svg.setAttribute("viewBox", "0 0 444.185 444.184"); + svg.setAttribute("class", "dr-stashid-box-icon"); + [ + "M404.198,205.738c-0.917-0.656-2.096-0.83-3.165-0.467c0,0-119.009,40.477-122.261,41.598 c-2.725,0.938-4.487-1.42-4.487-1.42l-37.448-46.254c-0.935-1.154-2.492-1.592-3.89-1.098c-1.396,0.494-2.332,1.816-2.332,3.299 v167.891c0,1.168,0.583,2.26,1.556,2.91c0.584,0.391,1.263,0.59,1.945,0.59c0.451,0,0.906-0.088,1.336-0.267l168.045-69.438 c1.31-0.541,2.163-1.818,2.163-3.234v-91.266C405.66,207.456,405.116,206.397,404.198,205.738z", + "M443.487,168.221l-32.07-42.859c-0.46-0.615-1.111-1.061-1.852-1.27L223.141,71.456c-0.622-0.176-1.465-0.125-2.096,0.049 L34.62,124.141c-0.739,0.209-1.391,0.654-1.851,1.27L0.698,168.271c-0.672,0.898-0.872,2.063-0.541,3.133 c0.332,1.07,1.157,1.918,2.219,2.279l157.639,53.502c0.369,0.125,0.749,0.187,1.125,0.187c1.035,0,2.041-0.462,2.718-1.296 l44.128-54.391l13.082,3.6c0.607,0.168,1.249,0.168,1.857,0v-0.008c0.064-0.016,0.13-0.023,0.192-0.041l13.082-3.6l44.129,54.391 c0.677,0.834,1.683,1.295,2.718,1.295c0.376,0,0.756-0.061,1.125-0.186l157.639-53.502c1.062-0.361,1.887-1.209,2.219-2.279 C444.359,170.283,444.159,169.119,443.487,168.221z M222.192,160.381L88.501,123.856l133.691-37.527l133.494,37.479 L222.192,160.381z", + "M211.238,198.147c-1.396-0.494-2.955-0.057-3.889,1.098L169.901,245.5c0,0-1.764,2.356-4.488,1.42 c-3.252-1.121-122.26-41.598-122.26-41.598c-1.07-0.363-2.248-0.189-3.165,0.467c-0.918,0.658-1.462,1.717-1.462,2.846v91.267 c0,1.416,0.854,2.692,2.163,3.233l168.044,69.438c0.43,0.178,0.885,0.266,1.336,0.266c0.684,0,1.362-0.199,1.946-0.59 c0.972-0.65,1.555-1.742,1.555-2.91V201.445C213.57,199.963,212.635,198.641,211.238,198.147z" + ].forEach(function (d) { + var p = document.createElementNS("http://www.w3.org/2000/svg", "path"); + p.setAttribute("d", d); + p.setAttribute("fill", "currentColor"); + svg.appendChild(p); + }); + return svg; + } + + function ensureStashIdBadges() { + try { + var root = document.getElementById(ROOT_ID); + if (!root || !state.groups) return; + var rows = root.querySelectorAll("table.duplicate-checker-table tbody tr"); + rows.forEach(function (tr) { + if (tr.classList.contains("separator")) return; + var sid = sceneIdFromRow(tr); + if (!sid) return; + var stashIds = stashIdsForScene(sid).filter(function (s) { + return s && s.endpoint && s.stash_id; + }); + var count = stashIds.length; + var detailsTd = tr.querySelector("td.scene-details"); + if (!detailsTd) return; + var btnGroup = detailsTd.querySelector(".btn-group"); + if (!btnGroup) return; + + var existing = detailsTd.querySelector(".dr-stashid-btn"); + if (count <= 0) { + if (existing && existing.parentNode) existing.parentNode.removeChild(existing); + return; + } + if (!existing) { + var btn = document.createElement("button"); + btn.type = "button"; + btn.className = "minimal dr-stashid-btn"; + btn.setAttribute("title", "Stash IDs"); + btn.appendChild(createStashIdBoxIcon()); + var c = document.createElement("span"); + c.className = "dr-stashid-count"; + btn.appendChild(c); + btnGroup.insertBefore(btn, btnGroup.firstChild); + existing = btn; + } + var endpointList = Array.from( + new Set( + stashIds.map(function (s) { + return String(s.endpoint || "").trim(); + }) + ) + ).filter(function (x) { return !!x; }); + var title = endpointList.length + ? "Stash IDs:\n" + endpointList.join("\n") + : "Stash IDs"; + existing.setAttribute("title", title); + existing.classList.remove("dr-stashid-btn-link"); + existing.onclick = null; + existing.removeAttribute("aria-label"); + if (stashIds.length === 1) { + var single = stashIds[0]; + var sceneUrl = + stashEndpointToSceneBase(single.endpoint) + "/" + String(single.stash_id).trim(); + existing.classList.add("dr-stashid-btn-link"); + existing.setAttribute("aria-label", "Open stash scene"); + existing.onclick = function (ev) { + ev.preventDefault(); + window.open(sceneUrl, "_blank", "noopener,noreferrer"); + }; + } + + var countEl = existing.querySelector(".dr-stashid-count"); + if (countEl) countEl.textContent = String(count); + }); + } catch (_e) { + // Do not let badge rendering break duplicate checker page. + } + } + + function ensureToolbar() { + var root = document.getElementById(ROOT_ID); + if (!root) return; + var table = root.querySelector("table.duplicate-checker-table"); + if (!table || document.getElementById("duplicate-resolver-toolbar")) return; + + var bar = document.createElement("div"); + bar.id = "duplicate-resolver-toolbar"; + bar.hidden = true; + bar.innerHTML = + '
Smart Resolve
' + + '
' + + '' + + '" + + '' + + '' + + "
" + + '
' + + '" + + '" + + "
"; + + table.parentNode.insertBefore(bar, table); + + var drawerPanel = bar.querySelector("#dr-drawer-panel"); + var drawerToggle = bar.querySelector("#dr-drawer-toggle"); + drawerToggle.onclick = function () { + var open = drawerPanel.hidden; + drawerPanel.hidden = !open; + drawerToggle.setAttribute("aria-expanded", open ? "true" : "false"); + drawerToggle.textContent = open ? "Match Details: \u25bc" : "Match Details: \u25b6"; + }; + function setAutoSelectVisible(show) { + var b = bar.querySelector("#dr-btn-apply"); + if (!b) return; + b.hidden = !show; + } + + function setResetVisible(show) { + var b = bar.querySelector("#dr-btn-reset"); + if (!b) return; + b.hidden = !show; + } + + bar.querySelector("#dr-btn-reset").onclick = async function () { + var prev = bar.querySelector("#dr-preview-out"); + prev.textContent = "Loading…"; + state.loading = true; + setAutoSelectVisible(false); + updateUnresolvedButton(null, false); + setResetVisible(true); + try { + await loadDuplicateGroups(); + prev.textContent = + "Loaded " + (state.groups || []).length + " duplicate group(s)."; + state.lastPlan = buildPlan(); + } catch (e) { + prev.textContent = "Error: " + (e.message || e); + } + state.loading = false; + }; + + bar.querySelector("#dr-btn-apply").onclick = async function () { + if (!state.lastPlan || !state.lastPlan.checks) return; + applyChecks(state.lastPlan.checks); + renderInlineReasons(state.lastPlan); + renderSyncRecommendations(state.lastPlan); + }; + + bar.querySelector("#dr-btn-unresolved").onclick = function () { + goToFirstUnresolved(state.lastPlan); + }; + } + + function ensureRowButtons() { + var root = document.getElementById(ROOT_ID); + if (!root || !state.groups) return; + var table = root.querySelector("table.duplicate-checker-table"); + if (!table) return; + + var rows = table.querySelectorAll("tbody tr"); + rows.forEach(function (tr) { + if (tr.classList.contains("separator")) return; + var sid = sceneIdFromRow(tr); + if (!sid) return; + if (tr.querySelector(".duplicate-resolver-sync-btn")) return; + + var td = tr.querySelector("td:last-child"); + if (!td) return; + + var btn = document.createElement("button"); + btn.type = "button"; + btn.className = "btn btn-sm btn-secondary duplicate-resolver-sync-btn"; + btn.textContent = "Sync data"; + btn.setAttribute("data-scene-id", sid); + btn.onclick = function () { + if (!state.groups) { + loadDuplicateGroups() + .then(function () { + var info = sceneByIdInVisible(sid); + if (!info) { + notifyStashWarning( + "Scene not in current page groups — use Reset in the log drawer or change page." + ); + return; + } + showModal(info.scene, info.group); + }) + .catch(function (e) { + notifyStashError(e); + }); + return; + } + var info = sceneByIdInVisible(sid); + if (!info) { + notifyStashWarning( + "Scene not in current page groups — use Reset in the log drawer or change page." + ); + return; + } + showModal(info.scene, info.group); + }; + td.appendChild(btn); + }); + } + + function routeMatches() { + var p = window.location.pathname || ""; + return p === ROUTE || p.endsWith(ROUTE); + } + + function detachObserver() { + if (state.observer) { + state.observer.disconnect(); + state.observer = null; + } + state.attachedRoot = null; + } + + function clearRetryTimer() { + if (state.retryTimer) { + clearInterval(state.retryTimer); + state.retryTimer = null; + } + } + + function currentPageKey() { + var p = parseParams(); + return [p.page, p.size, p.distance, p.durationDiff].join("|"); + } + + function maybeRenderStashIdBadgesForPageChange() { + var key = currentPageKey(); + if (state.lastBadgePageKey === key) return; + state.lastBadgePageKey = key; + // New page/filter context: clear stale smart-resolve UI/plan and reload groups. + state.smartResolveUiActive = false; + state.lastPlan = null; + state.groups = null; + clearInlineReasons(); + renderSyncRecommendations(null); + setSmartResolveDetailsVisible(false, false); + loadDuplicateGroups() + .then(function () { + ensureStashIdBadges(); + }) + .catch(function () { + // Keep UI responsive even if data refresh fails transiently. + }); + } + + function applyDomEnhancements() { + if (state.applyingDomEnhancements) return; + state.applyingDomEnhancements = true; + try { + ensureToolbar(); + placeToolbarButtonsInCoreRow(); + ensureCoreSelectSmartResolveOption(); + ensureRowButtons(); + } finally { + state.applyingDomEnhancements = false; + } + } + + function attach() { + if (!routeMatches()) { + detachObserver(); + return; + } + var root = document.getElementById(ROOT_ID); + if (!root) return false; + if (state.attachedRoot === root && state.observer) { + applyDomEnhancements(); + maybeRenderStashIdBadgesForPageChange(); + return true; + } + + loadPluginSetting(); + + detachObserver(); + var obs = new MutationObserver(function () { + if (state.applyingDomEnhancements) return; + applyDomEnhancements(); + maybeRenderStashIdBadgesForPageChange(); + }); + obs.observe(root, { childList: true, subtree: true }); + state.observer = obs; + state.attachedRoot = root; + + applyDomEnhancements(); + loadDuplicateGroups() + .then(function () { + applyDomEnhancements(); + state.lastBadgePageKey = ""; + maybeRenderStashIdBadgesForPageChange(); + }) + .catch(function () { + /* table may still load */ + }); + return true; + } + + function scheduleAttachRetries() { + clearRetryTimer(); + // Stash is a SPA; route content can render after plugin script executes. + state.retryTimer = setInterval(function () { + try { + if (!routeMatches()) { + detachObserver(); + return; + } + if (attach()) { + clearRetryTimer(); + } + } catch (e) { + // Keep trying; do not permanently fail on transient render timing. + } + }, 500); + // Stop background retries after a minute if route never appears. + setTimeout(clearRetryTimer, 60000); + } + + installStashInlineNotifyBridge(); + + if (document.readyState === "loading") + document.addEventListener("DOMContentLoaded", function () { + attach(); + scheduleAttachRetries(); + }); + else { + attach(); + scheduleAttachRetries(); + } + + // Stash UI is a SPA; route changes do not reload plugin scripts. + window.addEventListener("stash:location", function () { + // Route changed: attempt immediate attach and keep retrying briefly. + setTimeout(function () { + attach(); + scheduleAttachRetries(); + }, 0); + }); +})(); diff --git a/plugins/SmartResolve/SmartResolve.yml b/plugins/SmartResolve/SmartResolve.yml new file mode 100644 index 00000000..fd0b14e3 --- /dev/null +++ b/plugins/SmartResolve/SmartResolve.yml @@ -0,0 +1,96 @@ +name: Smart Resolver +description: Scene Duplicate Checker helper with Smart Select and mergeless Sync Data. Rules are processed in order to determine a primary keep candidate. Protection rules are then processed to determine if the non-primary scene should be marked for deletion. +version: 1.0.02 +url: https://discourse.stashapp.cc/t/smart-resolver/6680 +ui: + javascript: + - SmartResolve.js + css: + - SmartResolve.css +settings: + autoCheckAfterSync: + displayName: After Sync, mark source scenes for deletion + description: >- + Successfully synced source scenes are marked by default after sync. + type: BOOLEAN + ignoreRule01TotalPixels: + displayName: Ignore 01 - Most Total pixels + description: When enabled, will not eliminate candidate with lower total pixels than the highest width*height (1% tolerance). + type: BOOLEAN + ignoreRule02Framerate: + displayName: Ignore 02 - Highest Framerate + description: When enabled, will not eliminate candidate with lower framerate than the highest file framerate. + type: BOOLEAN + ignoreRule03Codec: + displayName: Ignore 03 - Codec tier + description: When enabled, will not eliminate candidate with lower codec quality tier (AV1 > H265 > H264 > others). + type: BOOLEAN + ignoreRule04Duration: + displayName: Ignore 04 - Longest Duration + description: When enabled, will not eliminate candidate with shorter duration than the longest duration (rounded to nearest second). + type: BOOLEAN + ignoreRule05SmallerSize: + displayName: Ignore 05 - Smaller file size + description: When enabled, will not eliminate candidate with larger file size than the smallest (tollerance max(1MB or 1%)). + type: BOOLEAN + ignoreRule05bUpgradeToken: + displayName: Ignore 05b - Upgrade token preference + description: When enabled, will not eliminate candidate with primary file path containing "upgrade". + type: BOOLEAN + ignoreRule06OlderDate: + displayName: Ignore 06 - Older date + description: When enabled, will not eliminate candidate with later scene date than the oldest scene date (null is latest). + type: BOOLEAN + ignoreRule07MoreGroups: + displayName: Ignore 07 - More groups + description: When enabled, will not eliminate candidate with fewer group associations than the most groups. + type: BOOLEAN + ignoreRule08HasStashId: + displayName: Ignore 08 - Has stash ID + description: When enabled, will not eliminate candidate with fewer stash IDs than the most stash IDs. + type: BOOLEAN + ignoreRule09MorePerformers: + displayName: Ignore 09 - More performers + description: When enabled, will not eliminate candidate with fewer performer associations than the most performer associations. + type: BOOLEAN + ignoreRule10MoreMarkers: + displayName: Ignore 10 - More markers + description: When enabled, will not eliminate candidate with fewer scene markers than the most scene markers. + type: BOOLEAN + ignoreRule11MoreTags: + displayName: Ignore 11 - More tags + description: When enabled, will not eliminate candidate with fewer tags than the most tags. + type: BOOLEAN + ignoreRule12LessAssociatedFiles: + displayName: Ignore 12 - Less associated files + description: When enabled, will not eliminate candidate with more associated file entries than the least associated file entries. + type: BOOLEAN + ignoreRule13MoreMetadataCardinality: + displayName: Ignore 13 - Metadata cardinality + description: When enabled, will not eliminate candidate with fewer total populated metadata elements than the most. + type: BOOLEAN + unprotectAOCount: + displayName: Unprotect O-count + description: When enabled, will permit marking for deletion scenes with O-count > 0. + type: BOOLEAN + unprotectBGroupAssociation: + + displayName: Unprotect Group association containment + description: When enabled, will permit marking for deletion scenes with group associations not present on the primary candidate. + type: BOOLEAN + unprotectCPerformerMismatch: + displayName: Unprotect Performer mismatch + description: When enabled, will permit marking for deletion scenes with performer associations not present on the primary candidate. + type: BOOLEAN + unprotectDTagLossGt1NonStashed: + displayName: Unprotect Tag loss >1 (non-stashed) + description: When enabled, will permit marking unstashed scenes for deletion with more than 1 less tags than the primary candidate. + type: BOOLEAN + unprotectEOlderDate: + displayName: Unprotect Older date + description: When enabled, will permit marking for deletion scenes with an older date than the primary candidate. + type: BOOLEAN + unprotectFIgnoreSmartResolveTag: + displayName: Unprotect Ignore:Smart Resolve tag + description: When enabled, will permit marking for deletion scenes tagged "Ignore:Smart Resolve". + type: BOOLEAN diff --git a/plugins/SmartResolve/about.png b/plugins/SmartResolve/about.png new file mode 100644 index 00000000..93504778 Binary files /dev/null and b/plugins/SmartResolve/about.png differ diff --git a/plugins/StashRandomButton/random_button.js b/plugins/StashRandomButton/random_button.js index 5920e0de..c82fd998 100644 --- a/plugins/StashRandomButton/random_button.js +++ b/plugins/StashRandomButton/random_button.js @@ -19,7 +19,7 @@ async function randomGlobal(entity, idField, redirectPrefix, internalFilter) { const realEntityPlural = getPlural(entity); - let filter = { per_page: 1 }; + let filter = { per_page: 1, sort: "random" }; let variables = { filter }; let filterArg = ""; let filterVar = ""; @@ -45,7 +45,7 @@ if (!totalCount) { alert("No results found."); return; } const randomIndex = Math.floor(Math.random() * totalCount); - let itemVars = { filter: { per_page: 1, page: randomIndex + 1 } }; + let itemVars = { filter: { per_page: 1, page: randomIndex + 1, sort: "random" } }; if (internalFilter) itemVars.internal_filter = internalFilter; const itemQuery = ` query Find${realEntityPlural}($filter: FindFilterType${filterArg}) { diff --git a/plugins/StashRandomButton/random_button.yml b/plugins/StashRandomButton/random_button.yml index 230763ba..abc79885 100644 --- a/plugins/StashRandomButton/random_button.yml +++ b/plugins/StashRandomButton/random_button.yml @@ -1,6 +1,6 @@ name: RandomButton description: Adds a button to quickly jump to a random scene, image, performer, studio, group, tag, or gallery, both on overview and internal entity pages. -version: 2.0.1 +version: 2.0.2 url: https://discourse.stashapp.cc/t/randombutton/1809 ui: requires: [] diff --git a/plugins/TPDBMarkers/TPDBMarkers.yml b/plugins/TPDBMarkers/TPDBMarkers.yml index f6fcf52d..e0e51d13 100644 --- a/plugins/TPDBMarkers/TPDBMarkers.yml +++ b/plugins/TPDBMarkers/TPDBMarkers.yml @@ -1,6 +1,6 @@ name: The Porn DB Markers description: Sync Markers from The Porn DB aka theporndb.net -version: 0.4.3 +version: 0.4.4 url: https://discourse.stashapp.cc/t/the-porn-db-markers/1335 exec: - python diff --git a/plugins/TPDBMarkers/tpdbMarkers.py b/plugins/TPDBMarkers/tpdbMarkers.py index d9e688d5..3ca347ed 100644 --- a/plugins/TPDBMarkers/tpdbMarkers.py +++ b/plugins/TPDBMarkers/tpdbMarkers.py @@ -5,6 +5,7 @@ import requests import json import time +import math per_page = 100 @@ -99,7 +100,7 @@ def processAll(): )[0] log.info(str(count) + " scenes to submit.") i = 0 - for r in range(1, int(count / per_page) + 1): + for r in range(1, math.ceil(count / per_page) + 1): log.info( "fetching data: %s - %s %0.1f%%" % ( diff --git a/plugins/cjCardTweaks/README.md b/plugins/cjCardTweaks/README.md index 8de12288..f68c5235 100644 --- a/plugins/cjCardTweaks/README.md +++ b/plugins/cjCardTweaks/README.md @@ -20,3 +20,8 @@ Adds an additional dimension to the rating banners. ![unnamed_2](https://github.com/user-attachments/assets/f505417d-ed0c-40c4-9c78-647081a41307) Modify the performer cards to use a traditional profile design + +### Stash ID icon +![unnamed_3](https://github.com/user-attachments/assets/181fe3cd-b3e5-437d-8ded-7e48f2c0e446) + +Adds a box icon to performer cards that have one or more Stash IDs (GUIDs) attached. The icon appears in the top-left corner of the performer card thumbnail and displays a tooltip showing the count of Stash IDs when hovered. This helps quickly identify performers that are linked to external Stash databases. diff --git a/plugins/cjCardTweaks/cjCardTweaks.js b/plugins/cjCardTweaks/cjCardTweaks.js index 923e5640..beae48f6 100644 --- a/plugins/cjCardTweaks/cjCardTweaks.js +++ b/plugins/cjCardTweaks/cjCardTweaks.js @@ -26,7 +26,8 @@ if ( key === "fileCount" || key === "addBannerDimension" || - key === "performerProfileCards" + key === "performerProfileCards" || + key === "stashIDIcon" ) { acc[key] = settings[key]; } else { @@ -42,6 +43,8 @@ ".performer-card:hover img.performer-card-image{box-shadow: 0 0 0 rgb(0 0 0 / 20%), 0 0 6px rgb(0 0 0 / 90%);transition: box-shadow .5s .5s}@media (min-width: 1691px){.performer-recommendations .card .performer-card-image{height: unset}}button.btn.favorite-button.not-favorite,button.btn.favorite-button.favorite{transition: filter .5s .5s}.performer-card:hover .thumbnail-section button.btn.favorite-button.not-favorite, .performer-card:hover .thumbnail-section button.btn.favorite-button.favorite{filter: drop-shadow(0 0 2px rgba(0, 0, 0, .9))}.performer-card .thumbnail-section button.btn.favorite-button.not-favorite, .performer-card .thumbnail-section button.btn.favorite-button.favorite{top: 10px;filter: drop-shadow(0 2px 2px rgba(0, 0, 0, .9))}.item-list-container .performer-card__age,.recommendation-row .performer-card__age,.item-list-container .performer-card .card-section-title,.recommendation-row .performer-card .card-section-title,.item-list-container .performer-card .thumbnail-section,.recommendation-row .performer-card .thumbnail-section{display: flex;align-content: center;justify-content: center}.item-list-container .performer-card .thumbnail-section a,.recommendation-row .performer-card .thumbnail-section a{display: contents}.item-list-container .performer-card-image,.recommendation-row .performer-card-image{aspect-ratio: 1 / 1;display: flex;object-fit: cover;border: 3px solid var(--plex-yelow);border-radius: 50%;min-width: unset;position: relative;width: 58%;margin: auto;z-index: 1;margin-top: 1.5rem;box-shadow:0 13px 26px rgb(0 0 0 / 20%),0 3px 6px rgb(0 0 0 / 90%);object-position: center;transition: box-shadow .5s .5s}.item-list-container .performer-card hr,.recommendation-row .performer-card hr{width: 90%}.item-list-container .performer-card .fi,.recommendation-row .performer-card .fi{position: absolute;top: 81.5%;left: 69%;border-radius: 50% !important;background-size: cover;margin-left: -1px;height: 1.5rem;width: 1.5rem;z-index: 10;border: solid 2px #252525;box-shadow: unset}.item-list-container .performer-card .card-popovers .btn,.recommendation-row .performer-card .card-popovers .btn{font-size: 0.9rem}"; const RATING_BANNER_3D_STYLE = ".grid-card{overflow:unset}.detail-group .rating-banner-3d,.rating-banner{display:none}.grid-card:hover .rating-banner-3d{opacity:0;transition:opacity .5s}.rating-banner-3d{height:110px;left:-6px;overflow:hidden;position:absolute;top:-6px;width:110px}.rating-banner-3d span{box-shadow:0 5px 4px rgb(0 0 0 / 50%);position:absolute;display:block;width:170px;padding:10px 5px 10px 0;background-color:#ff6a07;color:#fff;font:700 1rem/1 Lato,sans-serif;text-shadow:0 1px 1px rgba(0,0,0,.2);text-transform:uppercase;text-align:center;letter-spacing:1px;right:-20px;top:24px;transform:rotate(-45deg)}.rating-banner-3d::before{top:0;right:0;position:absolute;z-index:-1;content:'';display:block;border:5px solid #a34405;border-top-color:transparent;border-left-color:transparent}.rating-banner-3d::after{bottom:0;left:0;position:absolute;z-index:-1;content:'';display:block;border:5px solid #963e04}"; + const STASH_ID_ICON_STYLE = + ".stash-id-count{display:inline-flex;align-items:center;flex-direction:row}.stash-id-count-number{display:inline-block;margin-right:0.25rem}.stash-id-icon{display:inline-flex;align-items:center}.stash-id-icon svg{width:0.875rem;height:0.875rem;fill:currentColor;color:#fff}"; /** * Element to inject custom CSS styles. @@ -54,6 +57,8 @@ styleElement.innerHTML += RATING_BANNER_3D_STYLE; if (SETTINGS.performerProfileCards) styleElement.innerHTML += PERFORMER_PROFILE_CARD_STYLE; + if (SETTINGS.stashIDIcon) + styleElement.innerHTML += STASH_ID_ICON_STYLE; function createElementFromHTML(htmlString) { const div = document.createElement("div"); @@ -93,7 +98,7 @@ } /** - * Handles gallery cards to specific paths in Stash. + * Handles gallery cards to specific paths in Stash. * * The supported paths are: * - /galleries @@ -207,6 +212,36 @@ cards.forEach((card) => { maybeAddFileCount(card, stashData, isContentCard); maybeAddDimensionToBanner(card); + if (cardClass === "performer-card") { + maybeAddStashIDIcon(card, stashData); + + // Also set up a MutationObserver to watch for card-popovers being added + if (SETTINGS.stashIDIcon && !card.querySelector(".stash-id-count")) { + const observer = new MutationObserver((mutations) => { + const cardPopovers = card.querySelector(".card-popovers.btn-group") || + card.querySelector(".card-popovers") || + card.querySelector('[role="group"].btn-group'); + if (cardPopovers && !cardPopovers.querySelector(".stash-id-count")) { + const link = card.querySelector(".thumbnail-section > a"); + if (link) { + const id = new URL(link.href).pathname.split("/").pop(); + const idNum = parseInt(id, 10); + // Query GraphQL for stash IDs + queryStashIDs(card, id, idNum); + observer.disconnect(); + } + } + }); + + observer.observe(card, { + childList: true, + subtree: true + }); + + // Disconnect after 5 seconds to avoid memory leaks + setTimeout(() => observer.disconnect(), 5000); + } + } }); } @@ -269,4 +304,113 @@ link.parentElement.appendChild(el); oldBanner.remove(); } + + /** + * Add Stash ID count and icon to performer cards in the card-popovers btn-group + * + * @param {Element} card - Card element from cards list. + * @param {Object} stashData - Data fetched from the GraphQL interceptor. e.g. stash.performers. + */ + function maybeAddStashIDIcon(card, stashData) { + if (!SETTINGS.stashIDIcon) return; + + // Verify this function was not run twice on the same card + const existingCount = card.querySelector(".stash-id-count"); + if (existingCount) return; + + const link = card.querySelector(".thumbnail-section > a"); + if (!link) return; + + const id = new URL(link.href).pathname.split("/").pop(); + const idNum = parseInt(id, 10); + + // Query GraphQL for stash IDs + queryStashIDs(card, id, idNum); + } + + /** + * Query GraphQL for performer stash IDs + * @param {Element} card - Card element + * @param {string} id - Performer ID as string + * @param {number} idNum - Performer ID as number + */ + async function queryStashIDs(card, id, idNum) { + const query = ` + query FindPerformer($id: ID!) { + findPerformer(id: $id) { + id + stash_ids { + endpoint + stash_id + } + } + } + `; + + const variables = { + id: idNum + }; + + try { + const response = await fetch('/graphql', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + query: query, + variables: variables + }) + }); + + const result = await response.json(); + + if (result.errors) return; + + const performer = result.data?.findPerformer; + if (!performer) return; + + const stashIDs = performer.stash_ids || []; + const stashIDCount = Array.isArray(stashIDs) ? stashIDs.length : 0; + + // Only show if count is greater than 0 + if (stashIDCount > 0) { + // Find card-popovers and add button + const cardPopovers = card.querySelector(".card-popovers.btn-group") || + card.querySelector(".card-popovers") || + card.querySelector('[role="group"].btn-group'); + + if (cardPopovers && !cardPopovers.querySelector(".stash-id-count")) { + addStashIDButton(cardPopovers, stashIDCount); + } + } + } catch (error) { + // On error, don't show anything (silent fail) + } + } + + /** + * Helper function to add the stash ID button to the card-popovers + */ + function addStashIDButton(cardPopovers, stashIDCount) { + // Check if already added + if (cardPopovers.querySelector(".stash-id-count")) return; + + // Box-open icon SVG (StashApp logo style - open box) + const boxIconSVG = ``; + + // Create a wrapper div similar to the tag-count structure + const wrapper = document.createElement("div"); + + // Create button with count FIRST, then icon (as requested) + const button = createElementFromHTML( + `` + ); + + wrapper.appendChild(button); + cardPopovers.appendChild(wrapper); + } })(); diff --git a/plugins/cjCardTweaks/cjCardTweaks.yml b/plugins/cjCardTweaks/cjCardTweaks.yml index 95b09393..a5affb60 100644 --- a/plugins/cjCardTweaks/cjCardTweaks.yml +++ b/plugins/cjCardTweaks/cjCardTweaks.yml @@ -1,7 +1,7 @@ name: CJ's Card Tweaks. # requires: CommunityScriptsUILibrary -description: Provides various tweaks for the Stash Cards. -version: 1.1 +description: Provides various tweaks for the Stash cards. +version: 1.2 url: https://discourse.stashapp.cc/t/cjs-card-tweaks/1342 ui: requires: @@ -25,3 +25,7 @@ settings: displayName: Performer profile cards description: "Tweaks performer cards to use a traditional profile design." type: BOOLEAN + stashIDIcon: + displayName: Stash ID icon + description: "Adds a Stash ID icon to the performer cards." + type: BOOLEAN diff --git a/plugins/imageGalleryNavigation/imageGalleryNavigation.js b/plugins/imageGalleryNavigation/imageGalleryNavigation.js index 7de67bef..301a1a41 100644 --- a/plugins/imageGalleryNavigation/imageGalleryNavigation.js +++ b/plugins/imageGalleryNavigation/imageGalleryNavigation.js @@ -275,6 +275,63 @@ // Init filter type field. imageFilter[cObj.type] = {}; + // Parse boolean type fields. + if (cObj["value"] == "true" || cObj["value"] == "false") { + imageFilter[cObj.type] = (cObj["value"] == "true"); + return; + } + + if (cObj.type == "orientation") { + imageFilter[cObj.type]["value"] = cObj["value"].map(value => value.toUpperCase()); + return; + } + + if (cObj.type == "resolution") { + imageFilter[cObj.type]["modifier"] = cObj["modifier"]; + switch (cObj["value"]) { + case "144p": + imageFilter[cObj.type]["value"] = "VERY_LOW"; + break; + case "240p": + imageFilter[cObj.type]["value"] = "LOW"; + break; + case "360p": + imageFilter[cObj.type]["value"] = "R360P"; + break; + case "480p": + imageFilter[cObj.type]["value"] = "STANDARD"; + break; + case "720p": + imageFilter[cObj.type]["value"] = "WEB_HD"; + break; + case "720p": + imageFilter[cObj.type]["value"] = "STANDARD_HD"; + break; + case "1080p": + imageFilter[cObj.type]["value"] = "FULL_HD"; + break; + case "1440p": + imageFilter[cObj.type]["value"] = "QUAD_HD"; + break; + case "4k": + imageFilter[cObj.type]["value"] = "FOUR_K"; + break; + case "5k": + imageFilter[cObj.type]["value"] = "FIVE_K"; + break; + case "6k": + imageFilter[cObj.type]["value"] = "SIX_K"; + break; + case "7k": + imageFilter[cObj.type]["value"] = "SEVEN_K"; + break; + case "8k": + imageFilter[cObj.type]["value"] = "EIGHT_K"; + break; + } + return; + } + // Get all keys (except for "type"). var keys = Object.keys(cObj); keys.splice(keys.indexOf("type"), 1); @@ -285,7 +342,21 @@ // Special parsing for object type "value" fields (used where there's possibly a value and value2) var keys2 = Object.keys(cObj[keyName]); keys2.forEach((keyName2) => { - imageFilter[cObj.type][keyName2] = cObj[keyName][keyName2]; + if (keyName2 == "items") { + // Parse tag values. + imageFilter[cObj.type]["value"] = [] + cObj[keyName][keyName2].forEach((keyValue) => { + imageFilter[cObj.type]["value"].push(keyValue.id); + }); + } else if (keyName2 == "excluded") { + // Parse excluded tags. + imageFilter[cObj.type]["excludes"] = [] + cObj[keyName][keyName2].forEach((keyValue) => { + imageFilter[cObj.type]["excludes"].push(keyValue.id); + }); + } else { + imageFilter[cObj.type][keyName2] = cObj[keyName][keyName2]; + } }); } else { imageFilter[cObj.type][keyName] = cObj[keyName]; diff --git a/plugins/imageGalleryNavigation/imageGalleryNavigation.yml b/plugins/imageGalleryNavigation/imageGalleryNavigation.yml index 876a2a3f..7091b600 100644 --- a/plugins/imageGalleryNavigation/imageGalleryNavigation.yml +++ b/plugins/imageGalleryNavigation/imageGalleryNavigation.yml @@ -1,7 +1,7 @@ name: imageGalleryNavigation # requires: CommunityScriptsUILibrary description: This plugin adds features for navigating between images within a Gallery from the Image details page. -version: 0.3 +version: 0.4 url: https://discourse.stashapp.cc/t/imagegallerynavigation/1857 settings: enableTransform: diff --git a/plugins/mobileWallLayout/README.md b/plugins/mobileWallLayout/README.md new file mode 100644 index 00000000..9c59cd8c --- /dev/null +++ b/plugins/mobileWallLayout/README.md @@ -0,0 +1,29 @@ +# Mobile Wall Layout + +https://discourse.stashapp.cc/t/mobile-wall-layout/6160 + +Makes the wall-mode gallery render as a single full-width column on mobile +devices, on the **Markers** (`/scenes/markers`) and **Images** (`/images`) pages. + +By default, Stash's wall mode uses `react-photo-gallery`, which calculates +`position: absolute` offsets for a multi-column brick layout. On small screens +this produces items that are too small to comfortably tap and browse. This +plugin overrides those offsets so each item spans the full width of the screen, +making marker previews and images easy to scroll through on a phone. + +## Behaviour + +- Applies only on **touch-screen devices** (`pointer: coarse`) — correctly + targets phones and tablets without triggering on narrow desktop browser windows. +- Activates and deactivates automatically as you navigate between pages. +- Has no effect on desktop or mouse-driven viewports. + +## Implementation note + +The fix injects a `