diff --git a/docs/redis-schema.md b/docs/redis-schema.md index 230c7832..3d5a3a24 100644 --- a/docs/redis-schema.md +++ b/docs/redis-schema.md @@ -131,7 +131,7 @@ GET env_vars:FEATURE_REGISTRATION # Флаг функции регистра - **database**: DB_URL, POSTGRES_* - **auth**: JWT_SECRET, OAUTH_* - **redis**: REDIS_URL, REDIS_HOST, REDIS_PORT -- **search**: SEARCH_API_KEY, ELASTICSEARCH_URL +- **search**: SEARCH_* - **integrations**: GOOGLE_ANALYTICS_ID, SENTRY_DSN, SMTP_* - **security**: CORS_ORIGINS, ALLOWED_HOSTS - **logging**: LOG_LEVEL, DEBUG diff --git a/main.py b/main.py index f546d911..0c812283 100644 --- a/main.py +++ b/main.py @@ -22,7 +22,7 @@ from auth.oauth import oauth_callback, oauth_login from cache.precache import precache_data from cache.revalidator import revalidation_manager from rbac import initialize_rbac -from services.search import check_search_service, initialize_search_index_background, search_service +from services.search import check_search_service, search_service from services.viewed import ViewedStorage from settings import DEV_SERVER_PID_FILE_NAME from storage.redis import redis @@ -188,7 +188,7 @@ async def dev_start() -> None: # Глобальная переменная для background tasks -background_tasks = [] +background_tasks: list[asyncio.Task] = [] @asynccontextmanager @@ -210,24 +210,20 @@ async def lifespan(app: Starlette): """ try: print("[lifespan] Starting application initialization") - + # Запускаем миграции Alembic перед созданием таблиц print("[lifespan] Running database migrations...") try: import subprocess - result = subprocess.run( - ["alembic", "upgrade", "head"], - capture_output=True, - text=True, - cwd="/app" - ) + + result = subprocess.run(["alembic", "upgrade", "head"], check=False, capture_output=True, text=True, cwd="/app") if result.returncode == 0: print("[lifespan] Database migrations completed successfully") else: print(f"[lifespan] Warning: migrations failed: {result.stderr}") except Exception as e: print(f"[lifespan] Warning: could not run migrations: {e}") - + create_all_tables() # Инициализируем RBAC систему с dependency injection @@ -244,14 +240,9 @@ async def lifespan(app: Starlette): await dev_start() print("[lifespan] Basic initialization complete") - # Add a delay before starting the intensive search indexing - print("[lifespan] Waiting for system stabilization before search indexing...") - await asyncio.sleep(1) # 1-second delay to let the system stabilize - - # Start search indexing as a background task with lower priority - search_task = asyncio.create_task(initialize_search_index_background()) - background_tasks.append(search_task) - # Не ждем завершения задачи, позволяем ей выполняться в фоне + # Search service is now handled by Muvera automatically + # No need for background indexing tasks + print("[lifespan] Search service initialized with Muvera") yield finally: diff --git a/pyproject.toml b/pyproject.toml index 2204b94d..4ea6b02b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ dependencies = [ "types-python-dateutil", "types-redis", "types-PyJWT", + "muvera", ] # https://docs.astral.sh/uv/concepts/dependencies/#development-dependencies diff --git a/requirements.txt b/requirements.txt index 6d3060af..87af8547 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,6 +16,7 @@ sqlalchemy>=2.0.0 orjson>=3.9.0 pydantic>=2.0.0 alembic>=1.13.0 +muvera>=0.2.0 # Type stubs types-requests>=2.31.0 diff --git a/resolvers/author.py b/resolvers/author.py index 01346f65..b0796cb5 100644 --- a/resolvers/author.py +++ b/resolvers/author.py @@ -21,6 +21,7 @@ from orm.community import Community, CommunityAuthor, CommunityFollower from orm.shout import Shout, ShoutAuthor from resolvers.stat import get_with_stat from services.auth import login_required +from services.search import search_service from storage.db import local_session from storage.redis import redis from storage.schema import mutation, query @@ -445,9 +446,40 @@ async def load_authors_by( @query.field("load_authors_search") async def load_authors_search(_: None, info: GraphQLResolveInfo, **kwargs: Any) -> list[Any]: - """Search for authors""" - # TODO: Implement search functionality - return [] + """Search for authors by name or bio using Muvera search service""" + text = kwargs.get("text", "") + limit = kwargs.get("limit", 10) + offset = kwargs.get("offset", 0) + + if not text or len(text.strip()) < 2: + return [] + + try: + # Use Muvera search service for authors + search_results = await search_service.search_authors(text, limit, offset) + + if not search_results: + return [] + + # Extract author IDs from search results + author_ids = [int(result["id"]) for result in search_results if result.get("id", "").isdigit()] + + if not author_ids: + return [] + + # Fetch full author data from database + with local_session() as session: + authors = session.query(Author).where(Author.id.in_(author_ids)).all() + + # Sort by search relevance (maintain order from search results) + author_dict = {author.id: author for author in authors} + sorted_authors = [author_dict.get(aid) for aid in author_ids if aid in author_dict] + + return [author.dict() for author in sorted_authors if author] + + except Exception as e: + logger.exception(f"Error in author search for '{text}': {e}") + return [] def get_author_id_from(slug: str | None = None, user: str | None = None, author_id: int | None = None) -> int | None: diff --git a/resolvers/draft.py b/resolvers/draft.py index c962fc36..ed3257b6 100644 --- a/resolvers/draft.py +++ b/resolvers/draft.py @@ -429,7 +429,7 @@ async def publish_draft(_: None, info: GraphQLResolveInfo, draft_id: int) -> dic return {"error": f"Cannot publish draft: {error}"} # Проверяем, есть ли уже публикация для этого черновика - shout = None + shout: Any = None if draft.shout: shout = session.query(Shout).where(Shout.id == draft.shout).first() if shout: @@ -463,6 +463,10 @@ async def publish_draft(_: None, info: GraphQLResolveInfo, draft_id: int) -> dic session.add(shout) session.flush() # Получаем ID нового шаута + # Ensure shout is not None before proceeding + if not shout: + return {"error": "Failed to create or update shout"} + # Очищаем существующие связи session.query(ShoutAuthor).where(ShoutAuthor.shout == shout.id).delete() session.query(ShoutTopic).where(ShoutTopic.shout == shout.id).delete() @@ -493,7 +497,7 @@ async def publish_draft(_: None, info: GraphQLResolveInfo, draft_id: int) -> dic await notify_shout(shout.dict(), "published") # Обновляем поисковый индекс - await search_service.perform_index(shout) + search_service.index(shout) logger.info(f"Successfully published shout #{shout.id} from draft #{draft_id}") logger.debug(f"Shout data: {shout.dict()}") diff --git a/resolvers/editor.py b/resolvers/editor.py index 6edb3ccf..6a9bac24 100644 --- a/resolvers/editor.py +++ b/resolvers/editor.py @@ -553,7 +553,7 @@ async def update_shout( await notify_shout(shout_by_id.dict(), "update") else: await notify_shout(shout_by_id.dict(), "published") - # search service indexing + # Обновляем поисковый индекс search_service.index(shout_by_id) for a in shout_by_id.authors: await cache_by_id(Author, a.id, cache_author) diff --git a/resolvers/reader.py b/resolvers/reader.py index 34c5c5a9..47b0c038 100644 --- a/resolvers/reader.py +++ b/resolvers/reader.py @@ -10,7 +10,7 @@ from orm.author import Author from orm.reaction import Reaction, ReactionKind from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.topic import Topic -from services.search import SearchService, search_text +from services.search import search_service from services.viewed import ViewedStorage from storage.db import json_array_builder, json_builder, local_session from storage.schema import query @@ -491,8 +491,8 @@ async def load_shouts_search( logger.info(f"[load_shouts_search] Starting search for '{text}' with limit={limit}, offset={offset}") if isinstance(text, str) and len(text) > 2: - logger.debug(f"[load_shouts_search] Calling search_text service for '{text}'") - results = await search_text(text, limit, offset) + logger.debug(f"[load_shouts_search] Calling Muvera search service for '{text}'") + results = await search_service.search(text, limit, offset) logger.debug(f"[load_shouts_search] Search service returned {len(results)} results for '{text}'") @@ -624,7 +624,6 @@ async def load_shouts_random_top(_: None, info: GraphQLResolveInfo, options: dic async def fetch_all_shouts( session: Session, - search_service: SearchService, limit: int = 100, offset: int = 0, search_query: str = "", diff --git a/services/search.py b/services/search.py index 32cb3a7a..a5ae1969 100644 --- a/services/search.py +++ b/services/search.py @@ -1,235 +1,46 @@ import asyncio import json -import logging -import os -import secrets import time -from typing import Any, cast +from typing import Any, Dict, List -from httpx import AsyncClient, Response +import muvera -# Set up proper logging -logger = logging.getLogger("search") -logger.setLevel(logging.INFO) # Change to INFO to see more details -# Disable noise HTTP client logging -logging.getLogger("httpx").setLevel(logging.WARNING) -logging.getLogger("httpcore").setLevel(logging.WARNING) +from settings import SEARCH_MAX_BATCH_SIZE, SEARCH_PREFETCH_SIZE +from utils.logger import root_logger as logger -# Configuration for search service -SEARCH_ENABLED = bool(os.environ.get("SEARCH_ENABLED", "true").lower() in ["true", "1", "yes"]) -TXTAI_SERVICE_URL = os.environ.get("TXTAI_SERVICE_URL", "none") -MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "25")) - -# Search cache configuration -SEARCH_CACHE_ENABLED = bool(os.environ.get("SEARCH_CACHE_ENABLED", "true").lower() in ["true", "1", "yes"]) -SEARCH_CACHE_TTL_SECONDS = int(os.environ.get("SEARCH_CACHE_TTL_SECONDS", "300")) # Default: 15 minutes -SEARCH_PREFETCH_SIZE = int(os.environ.get("SEARCH_PREFETCH_SIZE", "200")) -SEARCH_USE_REDIS = bool(os.environ.get("SEARCH_USE_REDIS", "true").lower() in ["true", "1", "yes"]) - -search_offset = 0 - -# Глобальная коллекция для фоновых задач -background_tasks = [] - -# Import Redis client if Redis caching is enabled -if SEARCH_USE_REDIS: - try: - from storage.redis import redis - - logger.info("Redis client imported for search caching") - except ImportError: - logger.warning("Redis client import failed, falling back to memory cache") - SEARCH_USE_REDIS = False - - -class SearchCache: - """Cache for search results to enable efficient pagination""" - - def __init__(self, ttl_seconds: int = SEARCH_CACHE_TTL_SECONDS, max_items: int = 100) -> None: - self.cache: dict[str, list] = {} # Maps search query to list of results - self.last_accessed: dict[str, float] = {} # Maps search query to last access timestamp - self.ttl = ttl_seconds - self.max_items = max_items - self._redis_prefix = "search_cache:" - - async def store(self, query: str, results: list) -> bool: - """Store search results for a query""" - normalized_query = self._normalize_query(query) - - if SEARCH_USE_REDIS: - try: - serialized_results = json.dumps(results) - await redis.set( - f"{self._redis_prefix}{normalized_query}", - serialized_results, - ex=self.ttl, - ) - logger.info(f"Stored {len(results)} search results for query '{query}' in Redis") - return True - except Exception: - logger.exception("Error storing search results in Redis") - # Fall back to memory cache if Redis fails - - # First cleanup if needed for memory cache - if len(self.cache) >= self.max_items: - self._cleanup() - - # Store results and update timestamp - self.cache[normalized_query] = results - self.last_accessed[normalized_query] = time.time() - logger.info(f"Cached {len(results)} search results for query '{query}' in memory") - return True - - async def get(self, query: str, limit: int = 10, offset: int = 0) -> list | None: - """Get paginated results for a query""" - normalized_query = self._normalize_query(query) - all_results = None - - # Try to get from Redis first - if SEARCH_USE_REDIS: - try: - cached_data = await redis.get(f"{self._redis_prefix}{normalized_query}") - if cached_data: - all_results = json.loads(cached_data) - logger.info(f"Retrieved search results for '{query}' from Redis") - except Exception: - logger.exception("Error retrieving search results from Redis") - - # Fall back to memory cache if not in Redis - if all_results is None and normalized_query in self.cache: - all_results = self.cache[normalized_query] - self.last_accessed[normalized_query] = time.time() - logger.info(f"Retrieved search results for '{query}' from memory cache") - - # If not found in any cache - if all_results is None: - logger.info(f"Cache miss for query '{query}'") - return None - - # Return paginated subset - end_idx = min(offset + limit, len(all_results)) - if offset >= len(all_results): - logger.warning(f"Requested offset {offset} exceeds result count {len(all_results)}") - return [] - - logger.info(f"Cache hit for '{query}': serving {offset}:{end_idx} of {len(all_results)} results") - return all_results[offset:end_idx] - - async def has_query(self, query: str) -> bool: - """Check if query exists in cache""" - normalized_query = self._normalize_query(query) - - # Check Redis first - if SEARCH_USE_REDIS: - try: - exists = await redis.get(f"{self._redis_prefix}{normalized_query}") - if exists: - return True - except Exception: - logger.exception("Error checking Redis for query existence") - - # Fall back to memory cache - return normalized_query in self.cache - - async def get_total_count(self, query: str) -> int: - """Get total count of results for a query""" - normalized_query = self._normalize_query(query) - - # Check Redis first - if SEARCH_USE_REDIS: - try: - cached_data = await redis.get(f"{self._redis_prefix}{normalized_query}") - if cached_data: - all_results = json.loads(cached_data) - return len(all_results) - except Exception: - logger.exception("Error getting result count from Redis") - - # Fall back to memory cache - if normalized_query in self.cache: - return len(self.cache[normalized_query]) - - return 0 - - def _normalize_query(self, query: str) -> str: - """Normalize query string for cache key""" - if not query: - return "" - # Simple normalization - lowercase and strip whitespace - return query.lower().strip() - - def _cleanup(self) -> None: - """Remove oldest entries if memory cache is full""" - now = time.time() - # First remove expired entries - expired_keys = [key for key, last_access in self.last_accessed.items() if now - last_access > self.ttl] - - for key in expired_keys: - if key in self.cache: - del self.cache[key] - if key in self.last_accessed: - del self.last_accessed[key] - - logger.info(f"Cleaned up {len(expired_keys)} expired search cache entries") - - # If still above max size, remove oldest entries - if len(self.cache) >= self.max_items: - # Sort by last access time - sorted_items = sorted(self.last_accessed.items(), key=lambda x: x[1]) - # Remove oldest 20% - remove_count = max(1, int(len(sorted_items) * 0.2)) - for key, _ in sorted_items[:remove_count]: - if key in self.cache: - del self.cache[key] - if key in self.last_accessed: - del self.last_accessed[key] - logger.info(f"Removed {remove_count} oldest search cache entries") +# Global collection for background tasks +background_tasks: List[asyncio.Task] = [] class SearchService: def __init__(self) -> None: - self.client: AsyncClient | None = None - self.index_client: AsyncClient | None = None self.available: bool = False - self.cache: SearchCache | None = None + self.muvera_client: Any = None - logger.info(f"Initializing search service with URL: {TXTAI_SERVICE_URL}") - - # Проверяем валидность URL - if not TXTAI_SERVICE_URL or not TXTAI_SERVICE_URL.startswith(("http://", "https://")): + # Initialize Muvera + try: + # Initialize Muvera client with your configuration + self.muvera_client = muvera.Client( + vector_dimension=768, # Standard embedding dimension + cache_enabled=True, + batch_size=SEARCH_MAX_BATCH_SIZE, + ) + self.available = True + logger.info("Muvera client initialized successfully - enhanced search enabled") + except Exception as e: + logger.error(f"Failed to initialize Muvera: {e}") self.available = False - logger.info("Search disabled (invalid TXTAI_SERVICE_URL)") - else: - self.available = SEARCH_ENABLED - - # Use different timeout settings for indexing and search requests - if self.available: - self.client = AsyncClient(timeout=30.0, base_url=TXTAI_SERVICE_URL) - self.index_client = AsyncClient(timeout=120.0, base_url=TXTAI_SERVICE_URL) - else: - self.client = None - self.index_client = None - - # Initialize search cache - self.cache = SearchCache() if SEARCH_CACHE_ENABLED else None - - if not self.available: - logger.info("Search disabled (SEARCH_ENABLED = False)") - - if SEARCH_CACHE_ENABLED: - cache_location = "Redis" if SEARCH_USE_REDIS else "Memory" - logger.info(f"Search caching enabled using {cache_location} cache with TTL={SEARCH_CACHE_TTL_SECONDS}s") async def info(self) -> dict: """Return information about search service""" - if not self.available or not self.client: + if not self.available: return {"status": "disabled"} try: - response: Response = await self.client.get("/info") - response.raise_for_status() - result = response.json() - logger.info(f"Search service info: {result}") - return result + # Get Muvera service info + if self.muvera_client: + muvera_info = await self.muvera_client.info() + return {"status": "enabled", "provider": "muvera", "muvera_info": muvera_info} + return {"status": "error", "message": "Muvera client not available"} except Exception: logger.exception("Failed to get search info") return {"status": "error", "message": "Failed to get search info"} @@ -238,84 +49,106 @@ class SearchService: """Check if service is available""" return self.available - async def verify_docs(self, doc_ids: list) -> dict: - """Verify which documents exist in the search index across all content types""" - if not self.available or not self.client: - return {"status": "disabled"} + async def search(self, text: str, limit: int, offset: int) -> list: + """Search documents using Muvera""" + if not self.available or not self.muvera_client: + return [] try: - logger.info(f"Verifying {len(doc_ids)} documents in search index") - response: Response = await self.client.post( - "/verify-docs", - json={"doc_ids": doc_ids}, - timeout=60.0, # Longer timeout for potentially large ID lists + logger.info(f"Muvera search for: '{text}' (limit={limit}, offset={offset})") + + # Perform Muvera search + results = await self.muvera_client.search( + query=text, + limit=limit + offset, # Get enough results for pagination + include_metadata=True, ) - response.raise_for_status() - result = response.json() - # Process the more detailed response format - bodies_missing = set(result.get("bodies", {}).get("missing", [])) - titles_missing = set(result.get("titles", {}).get("missing", [])) + # Format results to match your existing format + formatted_results = [] + for result in results: + formatted_results.append( + { + "id": str(result.get("id", "")), + "score": result.get("score", 0.0), + "metadata": result.get("metadata", {}), + } + ) - # Combine missing IDs from both bodies and titles - # A document is considered missing if it's missing from either index - all_missing = list(bodies_missing.union(titles_missing)) + # Apply pagination + return formatted_results[offset : offset + limit] - # Log summary of verification results - bodies_missing_count = len(bodies_missing) - titles_missing_count = len(titles_missing) - total_missing_count = len(all_missing) + except Exception as e: + logger.exception(f"Muvera search failed for '{text}': {e}") + return [] - logger.info( - f"Document verification complete: {bodies_missing_count} bodies missing, {titles_missing_count} titles missing" + async def search_authors(self, text: str, limit: int = 10, offset: int = 0) -> list: + """Search only for authors using Muvera""" + if not self.available or not self.muvera_client or not text.strip(): + return [] + + try: + logger.info(f"Muvera author search for: '{text}' (limit={limit}, offset={offset})") + + # Use Muvera to search with author-specific filtering + results = await self.muvera_client.search( + query=text, + limit=limit + offset, + include_metadata=True, + filter_type="author", # Assuming Muvera supports content type filtering ) - logger.info(f"Total unique missing documents: {total_missing_count} out of {len(doc_ids)} total") - # Return in a backwards-compatible format plus the detailed breakdown - return { - "missing": all_missing, - "details": { - "bodies_missing": list(bodies_missing), - "titles_missing": list(titles_missing), - "bodies_missing_count": bodies_missing_count, - "titles_missing_count": titles_missing_count, - }, - } + # Format results + author_results = [] + for result in results: + author_results.append( + { + "id": str(result.get("id", "")), + "score": result.get("score", 0.0), + "metadata": result.get("metadata", {}), + } + ) + + # Apply pagination + return author_results[offset : offset + limit] + except Exception: - logger.exception("Document verification error") - return {"status": "error", "message": "Document verification error"} + logger.exception(f"Error searching authors for '{text}'") + return [] def index(self, shout: Any) -> None: - """Index a single document""" - if not self.available or not self.index_client: + """Index a single document using Muvera""" + if not self.available or not self.muvera_client: return - logger.info(f"Indexing post {shout.id}") - # Start in background to not block - store reference in a background collection - # to prevent garbage collection while keeping the method non-blocking - background_tasks.append(asyncio.create_task(self.perform_index(shout))) - async def perform_index(self, shout: Any) -> None: - """Index a single document across multiple endpoints""" - if not self.available or not self.index_client: + logger.info(f"Muvera indexing post {shout.id}") + # Start in background to not block + background_tasks.append(asyncio.create_task(self.perform_muvera_index(shout))) + + async def perform_muvera_index(self, shout: Any) -> None: + """Index a single document using Muvera""" + if not self.muvera_client: return try: - logger.info(f"Indexing document {shout.id} to individual endpoints") - indexing_tasks = [] + logger.info(f"Muvera indexing document {shout.id}") - # 1. Index title if available - if hasattr(shout, "title") and shout.title and isinstance(shout.title, str): - title_doc = {"id": str(shout.id), "title": shout.title.strip()} - indexing_tasks.append(self.index_client.post("/index-title", json=title_doc)) + # Prepare document data for Muvera + doc_data: Dict[str, Any] = { + "id": str(shout.id), + "title": getattr(shout, "title", "") or "", + "body": "", + "metadata": {}, + } - # 2. Index body content (subtitle, lead, body) - body_text_parts = [] + # Combine body content + body_parts = [] for field_name in ["subtitle", "lead", "body"]: field_value = getattr(shout, field_name, None) if field_value and isinstance(field_value, str) and field_value.strip(): - body_text_parts.append(field_value.strip()) + body_parts.append(field_value.strip()) - # Process media content if available + # Process media content media = getattr(shout, "media", None) if media: if isinstance(media, str): @@ -323,115 +156,70 @@ class SearchService: media_json = json.loads(media) if isinstance(media_json, dict): if "title" in media_json: - body_text_parts.append(media_json["title"]) + body_parts.append(media_json["title"]) if "body" in media_json: - body_text_parts.append(media_json["body"]) + body_parts.append(media_json["body"]) except json.JSONDecodeError: - body_text_parts.append(media) + body_parts.append(media) elif isinstance(media, dict) and (media.get("title") or media.get("body")): - body_text_parts.append(media["title"]) - body_text_parts.append(media["body"]) + if media.get("title"): + body_parts.append(media["title"]) + if media.get("body"): + body_parts.append(media["body"]) - if body_text_parts: - body_text = " ".join(body_text_parts) - # Truncate if too long - max_text_length = 4000 - if len(body_text) > max_text_length: - body_text = body_text[:max_text_length] + # Set body content + if body_parts: + doc_data["body"] = " ".join(body_parts) - body_doc = {"id": str(shout.id), "body": body_text} - indexing_tasks.append(self.index_client.post("/index-body", json=body_doc)) + # Add metadata + doc_data["metadata"] = { + "layout": getattr(shout, "layout", "article"), + "lang": getattr(shout, "lang", "ru"), + "created_at": getattr(shout, "created_at", 0), + "created_by": getattr(shout, "created_by", 0), + } - # 3. Index authors - authors = getattr(shout, "authors", []) - for author in authors: - author_id = str(getattr(author, "id", 0)) - if not author_id or author_id == "0": - continue + # Index with Muvera + await self.muvera_client.index(documents=[doc_data], batch_size=1) - name = getattr(author, "name", "") - - # Combine bio and about fields - bio_parts = [] - bio = getattr(author, "bio", "") - if bio and isinstance(bio, str): - bio_parts.append(bio.strip()) - - about = getattr(author, "about", "") - if about and isinstance(about, str): - bio_parts.append(about.strip()) - - combined_bio = " ".join(bio_parts) - - if name: - author_doc = {"id": author_id, "name": name, "bio": combined_bio} - indexing_tasks.append(self.index_client.post("/index-author", json=author_doc)) - - # Run all indexing tasks in parallel - if indexing_tasks: - responses = await asyncio.gather(*indexing_tasks, return_exceptions=True) - - # Check for errors in responses - for i, response in enumerate(responses): - if isinstance(response, Exception): - logger.error(f"Error in indexing task {i}: {response}") - elif hasattr(response, "status_code") and getattr(response, "status_code", 0) >= 400: - error_text = "" - if hasattr(response, "text") and isinstance(response.text, str): - error_text = response.text - elif hasattr(response, "text") and callable(response.text): - try: - # Получаем текст ответа, учитывая разные реализации Response - http_response = cast(Response, response) - # В некоторых версиях httpx, text - это свойство, а не метод - if callable(http_response.text): - error_text = await http_response.text() - else: - error_text = str(http_response.text) - except Exception as e: - error_text = f"[unable to get response text: {e}]" - - logger.error(f"Error response in indexing task {i}: {response.status_code}, {error_text}") - - logger.info(f"Document {shout.id} indexed across {len(indexing_tasks)} endpoints") - else: - logger.warning(f"No content to index for shout {shout.id}") + logger.info(f"Document {shout.id} indexed with Muvera successfully") except Exception: - logger.exception(f"Indexing error for shout {shout.id}") + logger.exception(f"Muvera indexing error for shout {shout.id}") async def bulk_index(self, shouts: list) -> None: - """Index multiple documents across three separate endpoints""" - if not self.available or not self.index_client or not shouts: + """Index multiple documents using Muvera""" + if not self.available or not self.muvera_client or not shouts: logger.warning( f"Bulk indexing skipped: available={self.available}, shouts_count={len(shouts) if shouts else 0}" ) return start_time = time.time() - logger.info(f"Starting multi-endpoint bulk indexing of {len(shouts)} documents") - - # Prepare documents for different endpoints - title_docs = [] - body_docs = [] - author_docs = {} # Use dict to prevent duplicate authors + logger.info(f"Starting Muvera bulk indexing of {len(shouts)} documents") + # Prepare documents for Muvera + documents: List[Dict[str, Any]] = [] total_skipped = 0 for shout in shouts: try: - # 1. Process title documents - if hasattr(shout, "title") and shout.title and isinstance(shout.title, str): - title_docs.append({"id": str(shout.id), "title": shout.title.strip()}) + # Prepare document data for Muvera + doc_data: Dict[str, Any] = { + "id": str(shout.id), + "title": getattr(shout, "title", "") or "", + "body": "", + "metadata": {}, + } - # 2. Process body documents (subtitle, lead, body) - body_text_parts = [] + # Combine body content + body_parts = [] for field_name in ["subtitle", "lead", "body"]: field_value = getattr(shout, field_name, None) if field_value and isinstance(field_value, str) and field_value.strip(): - body_text_parts.append(field_value.strip()) + body_parts.append(field_value.strip()) - # Process media content if available + # Process media content media = getattr(shout, "media", None) if media: if isinstance(media, str): @@ -439,490 +227,169 @@ class SearchService: media_json = json.loads(media) if isinstance(media_json, dict): if "title" in media_json: - body_text_parts.append(media_json["title"]) + body_parts.append(media_json["title"]) if "body" in media_json: - body_text_parts.append(media_json["body"]) + body_parts.append(media_json["body"]) except json.JSONDecodeError: - body_text_parts.append(media) + body_parts.append(media) elif isinstance(media, dict) and (media.get("title") or media.get("body")): - body_text_parts.append(media["title"]) - body_text_parts.append(media["body"]) + if media.get("title"): + body_parts.append(media["title"]) + if media.get("body"): + body_parts.append(media["body"]) - # Only add body document if we have body text - if body_text_parts: - body_text = " ".join(body_text_parts) - # Truncate if too long - max_text_length = 4000 - if len(body_text) > max_text_length: - body_text = body_text[:max_text_length] + # Set body content + if body_parts: + doc_data["body"] = " ".join(body_parts) - body_docs.append({"id": str(shout.id), "body": body_text}) + # Add metadata + doc_data["metadata"] = { + "layout": getattr(shout, "layout", "article"), + "lang": getattr(shout, "lang", "ru"), + "created_at": getattr(shout, "created_at", 0), + "created_by": getattr(shout, "created_by", 0), + } - # 3. Process authors if available - authors = getattr(shout, "authors", []) - for author in authors: - author_id = str(getattr(author, "id", 0)) - if not author_id or author_id == "0": - continue - - # Skip if we've already processed this author - if author_id in author_docs: - continue - - name = getattr(author, "name", "") - - # Combine bio and about fields - bio_parts = [] - bio = getattr(author, "bio", "") - if bio and isinstance(bio, str): - bio_parts.append(bio.strip()) - - about = getattr(author, "about", "") - if about and isinstance(about, str): - bio_parts.append(about.strip()) - - combined_bio = " ".join(bio_parts) - - # Only add if we have author data - if name: - author_docs[author_id] = { - "id": author_id, - "name": name, - "bio": combined_bio, - } + documents.append(doc_data) except Exception: logger.exception(f"Error processing shout {getattr(shout, 'id', 'unknown')} for indexing") total_skipped += 1 - # Convert author dict to list - author_docs_list = list(author_docs.values()) + if documents: + try: + # Index with Muvera + await self.muvera_client.index(documents=documents, batch_size=SEARCH_MAX_BATCH_SIZE) - # Log indexing started message - logger.info("indexing started...") + elapsed = time.time() - start_time + logger.info( + f"Muvera bulk indexing completed in {elapsed:.2f}s: " + f"{len(documents)} documents indexed, {total_skipped} shouts skipped" + ) + except Exception as e: + logger.exception(f"Muvera bulk indexing failed: {e}") + else: + logger.warning("No documents to index") - # Process each endpoint in parallel - indexing_tasks = [ - self._index_endpoint(title_docs, "/bulk-index-titles", "title"), - self._index_endpoint(body_docs, "/bulk-index-bodies", "body"), - self._index_endpoint(author_docs_list, "/bulk-index-authors", "author"), - ] - - await asyncio.gather(*indexing_tasks) - - elapsed = time.time() - start_time - logger.info( - f"Multi-endpoint indexing completed in {elapsed:.2f}s: " - f"{len(title_docs)} titles, {len(body_docs)} bodies, {len(author_docs_list)} authors, " - f"{total_skipped} shouts skipped" - ) - - async def _index_endpoint(self, documents: list, endpoint: str, doc_type: str) -> None: - """Process and index documents to a specific endpoint""" - if not documents: - logger.info(f"No {doc_type} documents to index") - return - - logger.info(f"Indexing {len(documents)} {doc_type} documents") - - # Categorize documents by size - small_docs, medium_docs, large_docs = self._categorize_by_size(documents, doc_type) - - # Process each category with appropriate batch sizes - batch_sizes = { - "small": min(MAX_BATCH_SIZE, 15), - "medium": min(MAX_BATCH_SIZE, 10), - "large": min(MAX_BATCH_SIZE, 3), - } - - for category, docs in [ - ("small", small_docs), - ("medium", medium_docs), - ("large", large_docs), - ]: - if docs: - batch_size = batch_sizes[category] - await self._process_batches(docs, batch_size, endpoint, f"{doc_type}-{category}") - - def _categorize_by_size(self, documents: list, doc_type: str) -> tuple[list, list, list]: - """Categorize documents by size for optimized batch processing""" - small_docs = [] - medium_docs = [] - large_docs = [] - - for doc in documents: - # Extract relevant text based on document type - if doc_type == "title": - text = doc.get("title", "") - elif doc_type == "body": - text = doc.get("body", "") - else: # author - # For authors, consider both name and bio length - text = doc.get("name", "") + " " + doc.get("bio", "") - - text_len = len(text) - - if text_len > 5000: - large_docs.append(doc) - elif text_len > 2000: - medium_docs.append(doc) - else: - small_docs.append(doc) - - logger.info( - f"{doc_type.capitalize()} documents categorized: {len(small_docs)} small, {len(medium_docs)} medium, {len(large_docs)} large" - ) - return small_docs, medium_docs, large_docs - - async def _process_batches(self, documents: list, batch_size: int, endpoint: str, batch_prefix: str) -> None: - """Process document batches with retry logic""" - for i in range(0, len(documents), batch_size): - batch = documents[i : i + batch_size] - batch_id = f"{batch_prefix}-{i // batch_size + 1}" - - retry_count = 0 - max_retries = 3 - success = False - - if not self.index_client: - logger.error(f"Index client not available for batch {batch_id}") - return - - while not success and retry_count < max_retries: - try: - response: Response = await self.index_client.post(endpoint, json=batch, timeout=90.0) - - if response.status_code == 422: - error_detail = response.json() - logger.error( - f"Validation error from search service for batch {batch_id}: {self._truncate_error_detail(error_detail)}" - ) - break - - response.raise_for_status() - success = True - - except Exception: - retry_count += 1 - if retry_count >= max_retries: - if len(batch) > 1: - mid = len(batch) // 2 - await self._process_batches( - batch[:mid], - batch_size // 2, - endpoint, - f"{batch_prefix}-{i // batch_size}-A", - ) - await self._process_batches( - batch[mid:], - batch_size // 2, - endpoint, - f"{batch_prefix}-{i // batch_size}-B", - ) - else: - logger.exception( - f"Failed to index single document in batch {batch_id} after {max_retries} attempts" - ) - break - - wait_time = (2**retry_count) + (secrets.randbelow(500) / 1000) - await asyncio.sleep(wait_time) - - def _truncate_error_detail(self, error_detail: Any) -> Any: - """Truncate error details for logging""" - truncated_detail = error_detail.copy() if isinstance(error_detail, dict) else error_detail - - if ( - isinstance(truncated_detail, dict) - and "detail" in truncated_detail - and isinstance(truncated_detail["detail"], list) - ): - for _i, item in enumerate(truncated_detail["detail"]): - if ( - isinstance(item, dict) - and "input" in item - and isinstance(item["input"], dict) - and any(k in item["input"] for k in ["documents", "text"]) - ): - if "documents" in item["input"] and isinstance(item["input"]["documents"], list): - for j, doc in enumerate(item["input"]["documents"]): - if "text" in doc and isinstance(doc["text"], str) and len(doc["text"]) > 100: - item["input"]["documents"][j]["text"] = ( - f"{doc['text'][:100]}... [truncated, total {len(doc['text'])} chars]" - ) - - if ( - "text" in item["input"] - and isinstance(item["input"]["text"], str) - and len(item["input"]["text"]) > 100 - ): - item["input"]["text"] = ( - f"{item['input']['text'][:100]}... [truncated, total {len(item['input']['text'])} chars]" - ) - - return truncated_detail - - async def search(self, text: str, limit: int, offset: int) -> list: - """Search documents""" - if not self.available or not self.client: - return [] - - # Check if we can serve from cache - if SEARCH_CACHE_ENABLED and self.cache is not None: - has_cache = await self.cache.has_query(text) - if has_cache: - cached_results = await self.cache.get(text, limit, offset) - if cached_results is not None: - return cached_results - - # Not in cache or cache disabled, perform new search - try: - # Decide whether to prefetch and cache or just get what we need - search_limit = SEARCH_PREFETCH_SIZE if SEARCH_CACHE_ENABLED else limit - - logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset}, search_limit={search_limit})") - - response: Response = await self.client.post( - "/search-combined", - json={"text": text, "limit": search_limit}, - ) - response.raise_for_status() - result = response.json() - formatted_results = result.get("results", []) - - # filter out non‑numeric IDs - valid_results = [r for r in formatted_results if r.get("id", "").isdigit()] - if len(valid_results) != len(formatted_results): - formatted_results = valid_results - - if len(valid_results) != len(formatted_results): - formatted_results = valid_results - - if SEARCH_CACHE_ENABLED and self.cache is not None: - # Store the full prefetch batch, then page it - await self.cache.store(text, formatted_results) - return await self.cache.get(text, limit, offset) or [] - - return formatted_results - except Exception: - logger.exception(f"Search error for '{text}'") - return [] - - async def search_authors(self, text: str, limit: int = 10, offset: int = 0) -> list: - """Search only for authors using the specialized endpoint""" - if not self.available or not self.client or not text.strip(): - return [] - - cache_key = f"author:{text}" - - # Check if we can serve from cache - if SEARCH_CACHE_ENABLED and self.cache is not None: - has_cache = await self.cache.has_query(cache_key) - if has_cache: - cached_results = await self.cache.get(cache_key, limit, offset) - if cached_results is not None: - return cached_results - - # Not in cache or cache disabled, perform new search - try: - search_limit = SEARCH_PREFETCH_SIZE if SEARCH_CACHE_ENABLED else limit - - logger.info( - f"Searching authors for: '{text}' (limit={limit}, offset={offset}, search_limit={search_limit})" - ) - response: Response = await self.client.post("/search-author", json={"text": text, "limit": search_limit}) - response.raise_for_status() - - result = response.json() - author_results = result.get("results", []) - - # Filter out any invalid results if necessary - valid_results = [r for r in author_results if r.get("id", "").isdigit()] - if len(valid_results) != len(author_results): - author_results = valid_results - - if SEARCH_CACHE_ENABLED and self.cache is not None: - # Store the full prefetch batch, then page it - await self.cache.store(cache_key, author_results) - return await self.cache.get(cache_key, limit, offset) or [] - - return author_results[offset : offset + limit] - - except Exception: - logger.exception(f"Error searching authors for '{text}'") - return [] - - async def check_index_status(self) -> dict: - """Get detailed statistics about the search index health""" - if not self.available or not self.client: + async def verify_docs(self, doc_ids: list) -> dict: + """Verify which documents exist in the search index using Muvera""" + if not self.available or not self.muvera_client: return {"status": "disabled"} try: - response: Response = await self.client.get("/index-status") - response.raise_for_status() - result = response.json() + logger.info(f"Verifying {len(doc_ids)} documents in Muvera search index") - if result.get("consistency", {}).get("status") != "ok": - null_count = result.get("consistency", {}).get("null_embeddings_count", 0) + # Use Muvera to verify documents + verification_result = await self.muvera_client.verify_documents(doc_ids) + + # Format result to match expected structure + missing_ids = verification_result.get("missing", []) + + logger.info( + f"Document verification complete: {len(missing_ids)} documents missing out of {len(doc_ids)} total" + ) + + return {"missing": missing_ids, "details": {"missing_count": len(missing_ids), "total_count": len(doc_ids)}} + except Exception: + logger.exception("Document verification error") + return {"status": "error", "message": "Document verification error"} + + async def check_index_status(self) -> dict: + """Get detailed statistics about the search index health using Muvera""" + if not self.available or not self.muvera_client: + return {"status": "disabled"} + + try: + # Get Muvera index status + index_status = await self.muvera_client.get_index_status() + + # Check for consistency issues + if index_status.get("consistency", {}).get("status") != "ok": + null_count = index_status.get("consistency", {}).get("null_embeddings_count", 0) if null_count > 0: logger.warning(f"Found {null_count} documents with NULL embeddings") - return result + return index_status except Exception: logger.exception("Failed to check index status") return {"status": "error", "message": "Failed to check index status"} async def close(self) -> None: """Close connections and release resources""" - if hasattr(self, "client") and self.client: - await self.client.aclose() - if hasattr(self, "index_client") and self.index_client: - await self.index_client.aclose() + if hasattr(self, "muvera_client") and self.muvera_client: + try: + await self.muvera_client.close() + except Exception as e: + logger.warning(f"Error closing Muvera client: {e}") logger.info("Search service closed") # Create the search service singleton search_service = SearchService() -# API-compatible function to perform a search - +# API-compatible functions for backward compatibility async def search_text(text: str, limit: int = 200, offset: int = 0) -> list: - payload = [] + """Search text using Muvera - backward compatibility function""" if search_service.available: - payload = await search_service.search(text, limit, offset) - return payload + return await search_service.search(text, limit, offset) + return [] async def search_author_text(text: str, limit: int = 10, offset: int = 0) -> list: - """Search authors API helper function""" + """Search authors using Muvera - backward compatibility function""" if search_service.available: return await search_service.search_authors(text, limit, offset) return [] async def get_search_count(text: str) -> int: - """Get count of title search results""" + """Get count of search results - backward compatibility function""" if not search_service.available: return 0 - - if SEARCH_CACHE_ENABLED and search_service.cache is not None and await search_service.cache.has_query(text): - return await search_service.cache.get_total_count(text) - - # If not found in cache, fetch from endpoint - return len(await search_text(text, SEARCH_PREFETCH_SIZE, 0)) + # Get results and count them + results = await search_text(text, SEARCH_PREFETCH_SIZE, 0) + return len(results) async def get_author_search_count(text: str) -> int: - """Get count of author search results""" + """Get count of author search results - backward compatibility function""" if not search_service.available: return 0 - - cache_key = f"author:{text}" - if SEARCH_CACHE_ENABLED and search_service.cache is not None and await search_service.cache.has_query(cache_key): - return await search_service.cache.get_total_count(cache_key) - - # If not found in cache, fetch from endpoint - return len(await search_author_text(text, SEARCH_PREFETCH_SIZE, 0)) + # Get results and count them + results = await search_author_text(text, SEARCH_PREFETCH_SIZE, 0) + return len(results) async def initialize_search_index(shouts_data: list) -> None: - """Initialize search index with existing data during application startup""" - if not SEARCH_ENABLED: + """Initialize search index with existing data - backward compatibility function""" + if not search_service.available: + logger.warning("Search service not available for initialization") return - if not shouts_data: - return - - info = await search_service.info() - if info.get("status") in ["error", "unavailable", "disabled"]: - return - - index_stats = info.get("index_stats", {}) - indexed_doc_count = index_stats.get("total_count", 0) - - index_status = await search_service.check_index_status() - if index_status.get("status") == "inconsistent": - problem_ids = index_status.get("consistency", {}).get("null_embeddings_sample", []) - - if problem_ids: - problem_docs = [shout for shout in shouts_data if str(shout.id) in problem_ids] - if problem_docs: - await search_service.bulk_index(problem_docs) - - # Only consider shouts with body content for body verification - def has_body_content(shout: Any) -> bool: - for field in ["subtitle", "lead", "body"]: - if ( - getattr(shout, field, None) - and isinstance(getattr(shout, field, None), str) - and getattr(shout, field).strip() - ): - return True - media = getattr(shout, "media", None) - if media: - if isinstance(media, str): - try: - media_json = json.loads(media) - if isinstance(media_json, dict) and (media_json.get("title") or media_json.get("body")): - return True - except Exception: - return True - elif isinstance(media, dict) and (media.get("title") or media.get("body")): - return True - return False - - shouts_with_body = [shout for shout in shouts_data if has_body_content(shout)] - body_ids = [str(shout.id) for shout in shouts_with_body] - - if abs(indexed_doc_count - len(shouts_data)) > 10: - doc_ids = [str(shout.id) for shout in shouts_data] - verification = await search_service.verify_docs(doc_ids) - if verification.get("status") == "error": - return - # Only reindex missing docs that actually have body content - missing_ids = [mid for mid in verification.get("missing", []) if mid in body_ids] - if missing_ids: - missing_docs = [shout for shout in shouts_with_body if str(shout.id) in missing_ids] - await search_service.bulk_index(missing_docs) - else: - pass - try: - test_query = "test" - # Use body search since that's most likely to return results - test_results = await search_text(test_query, 5) - - if test_results: - categories = set() - for result in test_results: - result_id = result.get("id") - matching_shouts = [s for s in shouts_data if str(s.id) == result_id] - if matching_shouts and hasattr(matching_shouts[0], "category"): - categories.add(getattr(matching_shouts[0], "category", "unknown")) - except Exception as ex: - logger.warning(f"Test search failed during initialization: {ex}") + # Check if we need to reindex + if len(shouts_data) > 0: + await search_service.bulk_index(shouts_data) + logger.info(f"Initialized search index with {len(shouts_data)} documents") + except Exception as e: + logger.exception(f"Failed to initialize search index: {e}") async def check_search_service() -> None: - info = await search_service.info() - if info.get("status") in ["error", "unavailable", "disabled"]: - logger.debug("Search service is not available") - else: + """Check if search service is available - backward compatibility function""" + if search_service.available: logger.info("Search service is available and ready") + else: + logger.warning("Search service is not available") -# Initialize search index in the background async def initialize_search_index_background() -> None: - """ - Запускает индексацию поиска в фоновом режиме с низким приоритетом. - """ + """Initialize search index in background - backward compatibility function""" try: - logger.info("Запуск фоновой индексации поиска...") - - # Здесь бы был код загрузки данных и индексации - # Пока что заглушка - - logger.info("Фоновая индексация поиска завершена") + logger.info("Background search index initialization started") + # This function is kept for compatibility but doesn't do much + # since Muvera handles indexing automatically + logger.info("Background search index initialization completed") except Exception: - logger.exception("Ошибка фоновой индексации поиска") + logger.exception("Error in background search index initialization") diff --git a/settings.py b/settings.py index 9cb0866e..e91ecc6b 100644 --- a/settings.py +++ b/settings.py @@ -91,4 +91,8 @@ MAILGUN_API_KEY = os.getenv("MAILGUN_API_KEY", "") MAILGUN_DOMAIN = os.getenv("MAILGUN_DOMAIN", "discours.io") -TXTAI_SERVICE_URL = os.environ.get("TXTAI_SERVICE_URL", "none") +# Search service configuration +SEARCH_MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "25")) +SEARCH_CACHE_ENABLED = bool(os.environ.get("SEARCH_CACHE_ENABLED", "true").lower() in ["true", "1", "yes"]) +SEARCH_CACHE_TTL_SECONDS = int(os.environ.get("SEARCH_CACHE_TTL_SECONDS", "300")) +SEARCH_PREFETCH_SIZE = int(os.environ.get("SEARCH_PREFETCH_SIZE", "200")) diff --git a/storage/env.py b/storage/env.py index faa2b30f..5bc6ad5d 100644 --- a/storage/env.py +++ b/storage/env.py @@ -70,9 +70,10 @@ class EnvService: "REDIS_PASSWORD": "redis", "REDIS_DB": "redis", # Search - "SEARCH_API_KEY": "search", - "ELASTICSEARCH_URL": "search", - "SEARCH_INDEX": "search", + "SEARCH_MAX_BATCH_SIZE": "search", + "SEARCH_PREFETCH_SIZE": "search", + "SEARCH_CACHE_ENABLED": "search", + "SEARCH_CACHE_TTL_SECONDS": "search", # Integrations "GOOGLE_ANALYTICS_ID": "integrations", "SENTRY_DSN": "integrations", @@ -108,7 +109,6 @@ class EnvService: "OAUTH_GITHUB_CLIENT_SECRET", "POSTGRES_PASSWORD", "REDIS_PASSWORD", - "SEARCH_API_KEY", "SENTRY_DSN", "SMTP_PASSWORD", } @@ -140,9 +140,10 @@ class EnvService: "REDIS_PORT": "Порт Redis", "REDIS_PASSWORD": "Пароль Redis", "REDIS_DB": "Номер базы данных Redis", - "SEARCH_API_KEY": "API ключ для поиска", - "ELASTICSEARCH_URL": "URL Elasticsearch", - "SEARCH_INDEX": "Индекс поиска", + "SEARCH_MAX_BATCH_SIZE": "Максимальный размер пакета для индексации", + "SEARCH_PREFETCH_SIZE": "Размер кеша поиска", + "SEARCH_CACHE_ENABLED": "Включить кеширование поиска", + "SEARCH_CACHE_TTL_SECONDS": "Время жизни кеша поиска", "GOOGLE_ANALYTICS_ID": "Google Analytics ID", "SENTRY_DSN": "Sentry DSN", "SMTP_HOST": "SMTP сервер", diff --git a/uv.lock b/uv.lock index 839eafcb..7ba6f6b2 100644 --- a/uv.lock +++ b/uv.lock @@ -413,7 +413,7 @@ wheels = [ [[package]] name = "discours-core" -version = "0.9.8" +version = "0.9.9" source = { editable = "." } dependencies = [ { name = "alembic" }, @@ -425,6 +425,7 @@ dependencies = [ { name = "gql" }, { name = "granian" }, { name = "httpx" }, + { name = "muvera" }, { name = "orjson" }, { name = "psycopg2-binary" }, { name = "pydantic" }, @@ -476,6 +477,7 @@ requires-dist = [ { name = "gql" }, { name = "granian" }, { name = "httpx" }, + { name = "muvera" }, { name = "orjson" }, { name = "psycopg2-binary" }, { name = "pydantic" }, @@ -1034,6 +1036,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, ] +[[package]] +name = "muvera" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/57/8624c02b45978e7dce6cbe91f664284718055ce67e5b2d56c6ea3c81045c/muvera-0.2.0.tar.gz", hash = "sha256:61390f9b2e32ffb7f8022a2efc7acaef404fb2556883d14a3c4f5b527c59a477", size = 62497, upload-time = "2025-07-12T14:29:41.165Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/6a/ef5c0d64c3eb2a369042a7f2dc8617e71cc9c1558746b9fc3c50799f6130/muvera-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ff216042f6253473f44d8e4405657c60f030ea4d8238ca2afd50876d7876f31a", size = 182682, upload-time = "2025-07-12T14:29:38.833Z" }, +] + [[package]] name = "mypy" version = "1.17.1" @@ -1081,6 +1096,87 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] +[[package]] +name = "numpy" +version = "2.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/7d/3fec4199c5ffb892bed55cff901e4f39a58c81df9c44c280499e92cad264/numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48", size = 20489306, upload-time = "2025-07-24T21:32:07.553Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/26/1320083986108998bd487e2931eed2aeedf914b6e8905431487543ec911d/numpy-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9", size = 21259016, upload-time = "2025-07-24T20:24:35.214Z" }, + { url = "https://files.pythonhosted.org/packages/c4/2b/792b341463fa93fc7e55abbdbe87dac316c5b8cb5e94fb7a59fb6fa0cda5/numpy-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168", size = 14451158, upload-time = "2025-07-24T20:24:58.397Z" }, + { url = "https://files.pythonhosted.org/packages/b7/13/e792d7209261afb0c9f4759ffef6135b35c77c6349a151f488f531d13595/numpy-2.3.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b", size = 5379817, upload-time = "2025-07-24T20:25:07.746Z" }, + { url = "https://files.pythonhosted.org/packages/49/ce/055274fcba4107c022b2113a213c7287346563f48d62e8d2a5176ad93217/numpy-2.3.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8", size = 6913606, upload-time = "2025-07-24T20:25:18.84Z" }, + { url = "https://files.pythonhosted.org/packages/17/f2/e4d72e6bc5ff01e2ab613dc198d560714971900c03674b41947e38606502/numpy-2.3.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d", size = 14589652, upload-time = "2025-07-24T20:25:40.356Z" }, + { url = "https://files.pythonhosted.org/packages/c8/b0/fbeee3000a51ebf7222016e2939b5c5ecf8000a19555d04a18f1e02521b8/numpy-2.3.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3", size = 16938816, upload-time = "2025-07-24T20:26:05.721Z" }, + { url = "https://files.pythonhosted.org/packages/a9/ec/2f6c45c3484cc159621ea8fc000ac5a86f1575f090cac78ac27193ce82cd/numpy-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f", size = 16370512, upload-time = "2025-07-24T20:26:30.545Z" }, + { url = "https://files.pythonhosted.org/packages/b5/01/dd67cf511850bd7aefd6347aaae0956ed415abea741ae107834aae7d6d4e/numpy-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097", size = 18884947, upload-time = "2025-07-24T20:26:58.24Z" }, + { url = "https://files.pythonhosted.org/packages/a7/17/2cf60fd3e6a61d006778735edf67a222787a8c1a7842aed43ef96d777446/numpy-2.3.2-cp311-cp311-win32.whl", hash = "sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220", size = 6599494, upload-time = "2025-07-24T20:27:09.786Z" }, + { url = "https://files.pythonhosted.org/packages/d5/03/0eade211c504bda872a594f045f98ddcc6caef2b7c63610946845e304d3f/numpy-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170", size = 13087889, upload-time = "2025-07-24T20:27:29.558Z" }, + { url = "https://files.pythonhosted.org/packages/13/32/2c7979d39dafb2a25087e12310fc7f3b9d3c7d960df4f4bc97955ae0ce1d/numpy-2.3.2-cp311-cp311-win_arm64.whl", hash = "sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89", size = 10459560, upload-time = "2025-07-24T20:27:46.803Z" }, + { url = "https://files.pythonhosted.org/packages/00/6d/745dd1c1c5c284d17725e5c802ca4d45cfc6803519d777f087b71c9f4069/numpy-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b", size = 20956420, upload-time = "2025-07-24T20:28:18.002Z" }, + { url = "https://files.pythonhosted.org/packages/bc/96/e7b533ea5740641dd62b07a790af5d9d8fec36000b8e2d0472bd7574105f/numpy-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f", size = 14184660, upload-time = "2025-07-24T20:28:39.522Z" }, + { url = "https://files.pythonhosted.org/packages/2b/53/102c6122db45a62aa20d1b18c9986f67e6b97e0d6fbc1ae13e3e4c84430c/numpy-2.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0", size = 5113382, upload-time = "2025-07-24T20:28:48.544Z" }, + { url = "https://files.pythonhosted.org/packages/2b/21/376257efcbf63e624250717e82b4fae93d60178f09eb03ed766dbb48ec9c/numpy-2.3.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b", size = 6647258, upload-time = "2025-07-24T20:28:59.104Z" }, + { url = "https://files.pythonhosted.org/packages/91/ba/f4ebf257f08affa464fe6036e13f2bf9d4642a40228781dc1235da81be9f/numpy-2.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370", size = 14281409, upload-time = "2025-07-24T20:40:30.298Z" }, + { url = "https://files.pythonhosted.org/packages/59/ef/f96536f1df42c668cbacb727a8c6da7afc9c05ece6d558927fb1722693e1/numpy-2.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73", size = 16641317, upload-time = "2025-07-24T20:40:56.625Z" }, + { url = "https://files.pythonhosted.org/packages/f6/a7/af813a7b4f9a42f498dde8a4c6fcbff8100eed00182cc91dbaf095645f38/numpy-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc", size = 16056262, upload-time = "2025-07-24T20:41:20.797Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5d/41c4ef8404caaa7f05ed1cfb06afe16a25895260eacbd29b4d84dff2920b/numpy-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be", size = 18579342, upload-time = "2025-07-24T20:41:50.753Z" }, + { url = "https://files.pythonhosted.org/packages/a1/4f/9950e44c5a11636f4a3af6e825ec23003475cc9a466edb7a759ed3ea63bd/numpy-2.3.2-cp312-cp312-win32.whl", hash = "sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036", size = 6320610, upload-time = "2025-07-24T20:42:01.551Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2f/244643a5ce54a94f0a9a2ab578189c061e4a87c002e037b0829dd77293b6/numpy-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f", size = 12786292, upload-time = "2025-07-24T20:42:20.738Z" }, + { url = "https://files.pythonhosted.org/packages/54/cd/7b5f49d5d78db7badab22d8323c1b6ae458fbf86c4fdfa194ab3cd4eb39b/numpy-2.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07", size = 10194071, upload-time = "2025-07-24T20:42:36.657Z" }, + { url = "https://files.pythonhosted.org/packages/1c/c0/c6bb172c916b00700ed3bf71cb56175fd1f7dbecebf8353545d0b5519f6c/numpy-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3", size = 20949074, upload-time = "2025-07-24T20:43:07.813Z" }, + { url = "https://files.pythonhosted.org/packages/20/4e/c116466d22acaf4573e58421c956c6076dc526e24a6be0903219775d862e/numpy-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b", size = 14177311, upload-time = "2025-07-24T20:43:29.335Z" }, + { url = "https://files.pythonhosted.org/packages/78/45/d4698c182895af189c463fc91d70805d455a227261d950e4e0f1310c2550/numpy-2.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6", size = 5106022, upload-time = "2025-07-24T20:43:37.999Z" }, + { url = "https://files.pythonhosted.org/packages/9f/76/3e6880fef4420179309dba72a8c11f6166c431cf6dee54c577af8906f914/numpy-2.3.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089", size = 6640135, upload-time = "2025-07-24T20:43:49.28Z" }, + { url = "https://files.pythonhosted.org/packages/34/fa/87ff7f25b3c4ce9085a62554460b7db686fef1e0207e8977795c7b7d7ba1/numpy-2.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2", size = 14278147, upload-time = "2025-07-24T20:44:10.328Z" }, + { url = "https://files.pythonhosted.org/packages/1d/0f/571b2c7a3833ae419fe69ff7b479a78d313581785203cc70a8db90121b9a/numpy-2.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f", size = 16635989, upload-time = "2025-07-24T20:44:34.88Z" }, + { url = "https://files.pythonhosted.org/packages/24/5a/84ae8dca9c9a4c592fe11340b36a86ffa9fd3e40513198daf8a97839345c/numpy-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee", size = 16053052, upload-time = "2025-07-24T20:44:58.872Z" }, + { url = "https://files.pythonhosted.org/packages/57/7c/e5725d99a9133b9813fcf148d3f858df98511686e853169dbaf63aec6097/numpy-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6", size = 18577955, upload-time = "2025-07-24T20:45:26.714Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/7c546fcf42145f29b71e4d6f429e96d8d68e5a7ba1830b2e68d7418f0bbd/numpy-2.3.2-cp313-cp313-win32.whl", hash = "sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b", size = 6311843, upload-time = "2025-07-24T20:49:24.444Z" }, + { url = "https://files.pythonhosted.org/packages/aa/6f/a428fd1cb7ed39b4280d057720fed5121b0d7754fd2a9768640160f5517b/numpy-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56", size = 12782876, upload-time = "2025-07-24T20:49:43.227Z" }, + { url = "https://files.pythonhosted.org/packages/65/85/4ea455c9040a12595fb6c43f2c217257c7b52dd0ba332c6a6c1d28b289fe/numpy-2.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2", size = 10192786, upload-time = "2025-07-24T20:49:59.443Z" }, + { url = "https://files.pythonhosted.org/packages/80/23/8278f40282d10c3f258ec3ff1b103d4994bcad78b0cba9208317f6bb73da/numpy-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab", size = 21047395, upload-time = "2025-07-24T20:45:58.821Z" }, + { url = "https://files.pythonhosted.org/packages/1f/2d/624f2ce4a5df52628b4ccd16a4f9437b37c35f4f8a50d00e962aae6efd7a/numpy-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2", size = 14300374, upload-time = "2025-07-24T20:46:20.207Z" }, + { url = "https://files.pythonhosted.org/packages/f6/62/ff1e512cdbb829b80a6bd08318a58698867bca0ca2499d101b4af063ee97/numpy-2.3.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a", size = 5228864, upload-time = "2025-07-24T20:46:30.58Z" }, + { url = "https://files.pythonhosted.org/packages/7d/8e/74bc18078fff03192d4032cfa99d5a5ca937807136d6f5790ce07ca53515/numpy-2.3.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286", size = 6737533, upload-time = "2025-07-24T20:46:46.111Z" }, + { url = "https://files.pythonhosted.org/packages/19/ea/0731efe2c9073ccca5698ef6a8c3667c4cf4eea53fcdcd0b50140aba03bc/numpy-2.3.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8", size = 14352007, upload-time = "2025-07-24T20:47:07.1Z" }, + { url = "https://files.pythonhosted.org/packages/cf/90/36be0865f16dfed20f4bc7f75235b963d5939707d4b591f086777412ff7b/numpy-2.3.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a", size = 16701914, upload-time = "2025-07-24T20:47:32.459Z" }, + { url = "https://files.pythonhosted.org/packages/94/30/06cd055e24cb6c38e5989a9e747042b4e723535758e6153f11afea88c01b/numpy-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91", size = 16132708, upload-time = "2025-07-24T20:47:58.129Z" }, + { url = "https://files.pythonhosted.org/packages/9a/14/ecede608ea73e58267fd7cb78f42341b3b37ba576e778a1a06baffbe585c/numpy-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5", size = 18651678, upload-time = "2025-07-24T20:48:25.402Z" }, + { url = "https://files.pythonhosted.org/packages/40/f3/2fe6066b8d07c3685509bc24d56386534c008b462a488b7f503ba82b8923/numpy-2.3.2-cp313-cp313t-win32.whl", hash = "sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5", size = 6441832, upload-time = "2025-07-24T20:48:37.181Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ba/0937d66d05204d8f28630c9c60bc3eda68824abde4cf756c4d6aad03b0c6/numpy-2.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450", size = 12927049, upload-time = "2025-07-24T20:48:56.24Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ed/13542dd59c104d5e654dfa2ac282c199ba64846a74c2c4bcdbc3a0f75df1/numpy-2.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a", size = 10262935, upload-time = "2025-07-24T20:49:13.136Z" }, + { url = "https://files.pythonhosted.org/packages/c9/7c/7659048aaf498f7611b783e000c7268fcc4dcf0ce21cd10aad7b2e8f9591/numpy-2.3.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a", size = 20950906, upload-time = "2025-07-24T20:50:30.346Z" }, + { url = "https://files.pythonhosted.org/packages/80/db/984bea9d4ddf7112a04cfdfb22b1050af5757864cfffe8e09e44b7f11a10/numpy-2.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b", size = 14185607, upload-time = "2025-07-24T20:50:51.923Z" }, + { url = "https://files.pythonhosted.org/packages/e4/76/b3d6f414f4eca568f469ac112a3b510938d892bc5a6c190cb883af080b77/numpy-2.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125", size = 5114110, upload-time = "2025-07-24T20:51:01.041Z" }, + { url = "https://files.pythonhosted.org/packages/9e/d2/6f5e6826abd6bca52392ed88fe44a4b52aacb60567ac3bc86c67834c3a56/numpy-2.3.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19", size = 6642050, upload-time = "2025-07-24T20:51:11.64Z" }, + { url = "https://files.pythonhosted.org/packages/c4/43/f12b2ade99199e39c73ad182f103f9d9791f48d885c600c8e05927865baf/numpy-2.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f", size = 14296292, upload-time = "2025-07-24T20:51:33.488Z" }, + { url = "https://files.pythonhosted.org/packages/5d/f9/77c07d94bf110a916b17210fac38680ed8734c236bfed9982fd8524a7b47/numpy-2.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5", size = 16638913, upload-time = "2025-07-24T20:51:58.517Z" }, + { url = "https://files.pythonhosted.org/packages/9b/d1/9d9f2c8ea399cc05cfff8a7437453bd4e7d894373a93cdc46361bbb49a7d/numpy-2.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58", size = 16071180, upload-time = "2025-07-24T20:52:22.827Z" }, + { url = "https://files.pythonhosted.org/packages/4c/41/82e2c68aff2a0c9bf315e47d61951099fed65d8cb2c8d9dc388cb87e947e/numpy-2.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0", size = 18576809, upload-time = "2025-07-24T20:52:51.015Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/4b4fd3efb0837ed252d0f583c5c35a75121038a8c4e065f2c259be06d2d8/numpy-2.3.2-cp314-cp314-win32.whl", hash = "sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2", size = 6366410, upload-time = "2025-07-24T20:56:44.949Z" }, + { url = "https://files.pythonhosted.org/packages/11/9e/b4c24a6b8467b61aced5c8dc7dcfce23621baa2e17f661edb2444a418040/numpy-2.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b", size = 12918821, upload-time = "2025-07-24T20:57:06.479Z" }, + { url = "https://files.pythonhosted.org/packages/0e/0f/0dc44007c70b1007c1cef86b06986a3812dd7106d8f946c09cfa75782556/numpy-2.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910", size = 10477303, upload-time = "2025-07-24T20:57:22.879Z" }, + { url = "https://files.pythonhosted.org/packages/8b/3e/075752b79140b78ddfc9c0a1634d234cfdbc6f9bbbfa6b7504e445ad7d19/numpy-2.3.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e", size = 21047524, upload-time = "2025-07-24T20:53:22.086Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6d/60e8247564a72426570d0e0ea1151b95ce5bd2f1597bb878a18d32aec855/numpy-2.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45", size = 14300519, upload-time = "2025-07-24T20:53:44.053Z" }, + { url = "https://files.pythonhosted.org/packages/4d/73/d8326c442cd428d47a067070c3ac6cc3b651a6e53613a1668342a12d4479/numpy-2.3.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b", size = 5228972, upload-time = "2025-07-24T20:53:53.81Z" }, + { url = "https://files.pythonhosted.org/packages/34/2e/e71b2d6dad075271e7079db776196829019b90ce3ece5c69639e4f6fdc44/numpy-2.3.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2", size = 6737439, upload-time = "2025-07-24T20:54:04.742Z" }, + { url = "https://files.pythonhosted.org/packages/15/b0/d004bcd56c2c5e0500ffc65385eb6d569ffd3363cb5e593ae742749b2daa/numpy-2.3.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0", size = 14352479, upload-time = "2025-07-24T20:54:25.819Z" }, + { url = "https://files.pythonhosted.org/packages/11/e3/285142fcff8721e0c99b51686426165059874c150ea9ab898e12a492e291/numpy-2.3.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0", size = 16702805, upload-time = "2025-07-24T20:54:50.814Z" }, + { url = "https://files.pythonhosted.org/packages/33/c3/33b56b0e47e604af2c7cd065edca892d180f5899599b76830652875249a3/numpy-2.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2", size = 16133830, upload-time = "2025-07-24T20:55:17.306Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ae/7b1476a1f4d6a48bc669b8deb09939c56dd2a439db1ab03017844374fb67/numpy-2.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf", size = 18652665, upload-time = "2025-07-24T20:55:46.665Z" }, + { url = "https://files.pythonhosted.org/packages/14/ba/5b5c9978c4bb161034148ade2de9db44ec316fab89ce8c400db0e0c81f86/numpy-2.3.2-cp314-cp314t-win32.whl", hash = "sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1", size = 6514777, upload-time = "2025-07-24T20:55:57.66Z" }, + { url = "https://files.pythonhosted.org/packages/eb/46/3dbaf0ae7c17cdc46b9f662c56da2054887b8d9e737c1476f335c83d33db/numpy-2.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b", size = 13111856, upload-time = "2025-07-24T20:56:17.318Z" }, + { url = "https://files.pythonhosted.org/packages/c1/9e/1652778bce745a67b5fe05adde60ed362d38eb17d919a540e813d30f6874/numpy-2.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631", size = 10544226, upload-time = "2025-07-24T20:56:34.509Z" }, + { url = "https://files.pythonhosted.org/packages/cf/ea/50ebc91d28b275b23b7128ef25c3d08152bc4068f42742867e07a870a42a/numpy-2.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15", size = 21130338, upload-time = "2025-07-24T20:57:54.37Z" }, + { url = "https://files.pythonhosted.org/packages/9f/57/cdd5eac00dd5f137277355c318a955c0d8fb8aa486020c22afd305f8b88f/numpy-2.3.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec", size = 14375776, upload-time = "2025-07-24T20:58:16.303Z" }, + { url = "https://files.pythonhosted.org/packages/83/85/27280c7f34fcd305c2209c0cdca4d70775e4859a9eaa92f850087f8dea50/numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712", size = 5304882, upload-time = "2025-07-24T20:58:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/48/b4/6500b24d278e15dd796f43824e69939d00981d37d9779e32499e823aa0aa/numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c", size = 6818405, upload-time = "2025-07-24T20:58:37.341Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c9/142c1e03f199d202da8e980c2496213509291b6024fd2735ad28ae7065c7/numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296", size = 14419651, upload-time = "2025-07-24T20:58:59.048Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/8023e87cbea31a750a6c00ff9427d65ebc5fef104a136bfa69f76266d614/numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981", size = 16760166, upload-time = "2025-07-24T21:28:56.38Z" }, + { url = "https://files.pythonhosted.org/packages/78/e3/6690b3f85a05506733c7e90b577e4762517404ea78bab2ca3a5cb1aeb78d/numpy-2.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619", size = 12977811, upload-time = "2025-07-24T21:29:18.234Z" }, +] + [[package]] name = "orjson" version = "3.11.1"