Compare commits
40 Commits
feature/ca
...
9a02ca74ad
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9a02ca74ad | ||
![]() |
9ebb81cbd3 | ||
abbc074474 | |||
![]() |
0bc55977ac | ||
![]() |
ff3a4debce | ||
![]() |
ae85b32f69 | ||
![]() |
34a354e9e3 | ||
4f599e097f | |||
a5eaf4bb65 | |||
![]() |
e405fb527b | ||
![]() |
7f36f93d92 | ||
![]() |
f089a32394 | ||
![]() |
1fd623a660 | ||
![]() |
88012f1b8c | ||
![]() |
6e284640c0 | ||
![]() |
077cb46482 | ||
![]() |
60a13a9097 | ||
3c56fdfaea | |||
81a8bf3c58 | |||
fe9984e2d8 | |||
369ff757b0 | |||
![]() |
316375bf18 | ||
![]() |
fb820f67fd | ||
![]() |
f1d9f4e036 | ||
![]() |
ebb67eb311 | ||
![]() |
50a8c24ead | ||
![]() |
eb4b9363ab | ||
![]() |
19c5028a0c | ||
![]() |
57e1e8e6bd | ||
![]() |
385057ffcd | ||
![]() |
90699768ff | ||
![]() |
ad0ca75aa9 | ||
![]() |
39242d5e6c | ||
![]() |
24cca7f2cb | ||
![]() |
a9c7ac49d6 | ||
![]() |
f249752db5 | ||
![]() |
c0b2116da2 | ||
![]() |
59e71c8144 | ||
![]() |
e6a416383d | ||
![]() |
d55448398d |
@@ -29,7 +29,16 @@ jobs:
|
||||
if: github.ref == 'refs/heads/dev'
|
||||
uses: dokku/github-action@master
|
||||
with:
|
||||
branch: 'dev'
|
||||
branch: 'main'
|
||||
force: true
|
||||
git_remote_url: 'ssh://dokku@v2.discours.io:22/core'
|
||||
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Push to dokku for staging branch
|
||||
if: github.ref == 'refs/heads/staging'
|
||||
uses: dokku/github-action@master
|
||||
with:
|
||||
branch: 'dev'
|
||||
git_remote_url: 'ssh://dokku@staging.discours.io:22/core'
|
||||
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
git_push_flags: '--force'
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -161,4 +161,5 @@ views.json
|
||||
*.key
|
||||
*.crt
|
||||
*cache.json
|
||||
.cursor
|
||||
.cursor
|
||||
.devcontainer/
|
||||
|
24
CHANGELOG.md
24
CHANGELOG.md
@@ -1,3 +1,21 @@
|
||||
#### [0.4.17] - 2025-03-26
|
||||
- Fixed `'Reaction' object is not subscriptable` error in hierarchical comments:
|
||||
- Modified `get_reactions_with_stat()` to convert Reaction objects to dictionaries
|
||||
- Added default values for limit/offset parameters
|
||||
- Fixed `load_first_replies()` implementation with proper parameter passing
|
||||
- Added doctest with example usage
|
||||
- Limited child comments to 100 per parent for performance
|
||||
|
||||
#### [0.4.16] - 2025-03-22
|
||||
- Added hierarchical comments pagination:
|
||||
- Created new GraphQL query `load_comments_branch` for efficient loading of hierarchical comments
|
||||
- Ability to load root comments with their first N replies
|
||||
- Added pagination for both root and child comments
|
||||
- Using existing `comments_count` field in `Stat` type to display number of replies
|
||||
- Added special `first_replies` field to store first replies to a comment
|
||||
- Optimized SQL queries for efficient loading of comment hierarchies
|
||||
- Implemented flexible comment sorting system (by time, rating)
|
||||
|
||||
#### [0.4.15] - 2025-03-22
|
||||
- Upgraded caching system described `docs/caching.md`
|
||||
- Module `cache/memorycache.py` removed
|
||||
@@ -31,8 +49,7 @@
|
||||
- Implemented persistent Redis caching for author queries without TTL (invalidated only on changes)
|
||||
- Optimized author retrieval with separate endpoints:
|
||||
- `get_authors_all` - returns all non-deleted authors without statistics
|
||||
- `get_authors_paginated` - returns authors with statistics and pagination support
|
||||
- `load_authors_by` - optimized to use caching and efficient sorting
|
||||
- `load_authors_by` - optimized to use caching and efficient sorting and pagination
|
||||
- Improved SQL queries with optimized JOIN conditions and efficient filtering
|
||||
- Added pre-aggregation of statistics (shouts count, followers count) in single efficient queries
|
||||
- Implemented robust cache invalidation on author updates
|
||||
@@ -44,7 +61,6 @@
|
||||
- Implemented persistent Redis caching for topic queries (no TTL, invalidated only on changes)
|
||||
- Optimized topic retrieval with separate endpoints for different use cases:
|
||||
- `get_topics_all` - returns all topics without statistics for lightweight listing
|
||||
- `get_topics_paginated` - returns topics with statistics and pagination support
|
||||
- `get_topics_by_community` - adds pagination and optimized filtering by community
|
||||
- Added SQLAlchemy-managed indexes directly in ORM models for automatic schema maintenance
|
||||
- Created `sync_indexes()` function for automatic index synchronization during app startup
|
||||
@@ -142,7 +158,7 @@
|
||||
#### [0.4.4]
|
||||
- `followers_stat` removed for shout
|
||||
- sqlite3 support added
|
||||
- `rating_stat` and `commented_stat` fixes
|
||||
- `rating_stat` and `comments_count` fixes
|
||||
|
||||
#### [0.4.3]
|
||||
- cache reimplemented
|
||||
|
165
docs/comments-pagination.md
Normal file
165
docs/comments-pagination.md
Normal file
@@ -0,0 +1,165 @@
|
||||
# Пагинация комментариев
|
||||
|
||||
## Обзор
|
||||
|
||||
Реализована система пагинации комментариев по веткам, которая позволяет эффективно загружать и отображать вложенные ветки обсуждений. Основные преимущества:
|
||||
|
||||
1. Загрузка только необходимых комментариев, а не всего дерева
|
||||
2. Снижение нагрузки на сервер и клиент
|
||||
3. Возможность эффективной навигации по большим обсуждениям
|
||||
4. Предзагрузка первых N ответов для улучшения UX
|
||||
|
||||
## API для иерархической загрузки комментариев
|
||||
|
||||
### GraphQL запрос `load_comments_branch`
|
||||
|
||||
```graphql
|
||||
query LoadCommentsBranch(
|
||||
$shout: Int!,
|
||||
$parentId: Int,
|
||||
$limit: Int,
|
||||
$offset: Int,
|
||||
$sort: ReactionSort,
|
||||
$childrenLimit: Int,
|
||||
$childrenOffset: Int
|
||||
) {
|
||||
load_comments_branch(
|
||||
shout: $shout,
|
||||
parent_id: $parentId,
|
||||
limit: $limit,
|
||||
offset: $offset,
|
||||
sort: $sort,
|
||||
children_limit: $childrenLimit,
|
||||
children_offset: $childrenOffset
|
||||
) {
|
||||
id
|
||||
body
|
||||
created_at
|
||||
created_by {
|
||||
id
|
||||
name
|
||||
slug
|
||||
pic
|
||||
}
|
||||
kind
|
||||
reply_to
|
||||
stat {
|
||||
rating
|
||||
comments_count
|
||||
}
|
||||
first_replies {
|
||||
id
|
||||
body
|
||||
created_at
|
||||
created_by {
|
||||
id
|
||||
name
|
||||
slug
|
||||
pic
|
||||
}
|
||||
kind
|
||||
reply_to
|
||||
stat {
|
||||
rating
|
||||
comments_count
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Параметры запроса
|
||||
|
||||
| Параметр | Тип | По умолчанию | Описание |
|
||||
|----------|-----|--------------|----------|
|
||||
| shout | Int! | - | ID статьи, к которой относятся комментарии |
|
||||
| parent_id | Int | null | ID родительского комментария. Если null, загружаются корневые комментарии |
|
||||
| limit | Int | 10 | Максимальное количество комментариев для загрузки |
|
||||
| offset | Int | 0 | Смещение для пагинации |
|
||||
| sort | ReactionSort | newest | Порядок сортировки: newest, oldest, like |
|
||||
| children_limit | Int | 3 | Максимальное количество дочерних комментариев для каждого родительского |
|
||||
| children_offset | Int | 0 | Смещение для пагинации дочерних комментариев |
|
||||
|
||||
### Поля в ответе
|
||||
|
||||
Каждый комментарий содержит следующие основные поля:
|
||||
|
||||
- `id`: ID комментария
|
||||
- `body`: Текст комментария
|
||||
- `created_at`: Время создания
|
||||
- `created_by`: Информация об авторе
|
||||
- `kind`: Тип реакции (COMMENT)
|
||||
- `reply_to`: ID родительского комментария (null для корневых)
|
||||
- `first_replies`: Первые N дочерних комментариев
|
||||
- `stat`: Статистика комментария, включающая:
|
||||
- `comments_count`: Количество ответов на комментарий
|
||||
- `rating`: Рейтинг комментария
|
||||
|
||||
## Примеры использования
|
||||
|
||||
### Загрузка корневых комментариев с первыми ответами
|
||||
|
||||
```javascript
|
||||
const { data } = await client.query({
|
||||
query: LOAD_COMMENTS_BRANCH,
|
||||
variables: {
|
||||
shout: 222,
|
||||
limit: 10,
|
||||
offset: 0,
|
||||
sort: "newest",
|
||||
childrenLimit: 3
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Загрузка ответов на конкретный комментарий
|
||||
|
||||
```javascript
|
||||
const { data } = await client.query({
|
||||
query: LOAD_COMMENTS_BRANCH,
|
||||
variables: {
|
||||
shout: 222,
|
||||
parentId: 123, // ID комментария, для которого загружаем ответы
|
||||
limit: 10,
|
||||
offset: 0,
|
||||
sort: "oldest" // Сортируем ответы от старых к новым
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Пагинация дочерних комментариев
|
||||
|
||||
Для загрузки дополнительных ответов на комментарий:
|
||||
|
||||
```javascript
|
||||
const { data } = await client.query({
|
||||
query: LOAD_COMMENTS_BRANCH,
|
||||
variables: {
|
||||
shout: 222,
|
||||
parentId: 123,
|
||||
limit: 10,
|
||||
offset: 0,
|
||||
childrenLimit: 5,
|
||||
childrenOffset: 3 // Пропускаем первые 3 комментария (уже загруженные)
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Рекомендации по клиентской реализации
|
||||
|
||||
1. Для эффективной работы со сложными ветками обсуждений рекомендуется:
|
||||
|
||||
- Сначала загружать только корневые комментарии с первыми N ответами
|
||||
- При наличии дополнительных ответов (когда `stat.comments_count > first_replies.length`)
|
||||
добавить кнопку "Показать все ответы"
|
||||
- При нажатии на кнопку загружать дополнительные ответы с помощью запроса с указанным `parentId`
|
||||
|
||||
2. Для сортировки:
|
||||
- По умолчанию использовать `newest` для отображения свежих обсуждений
|
||||
- Предусмотреть переключатель сортировки для всего дерева комментариев
|
||||
- При изменении сортировки перезагружать данные с новым параметром `sort`
|
||||
|
||||
3. Для улучшения производительности:
|
||||
- Кешировать результаты запросов на клиенте
|
||||
- Использовать оптимистичные обновления при добавлении/редактировании комментариев
|
||||
- При необходимости загружать комментарии порциями (ленивая загрузка)
|
@@ -34,4 +34,15 @@
|
||||
- Поддерживаемые методы: GET, POST, OPTIONS
|
||||
- Настроена поддержка credentials
|
||||
- Разрешенные заголовки: Authorization, Content-Type, X-Requested-With, DNT, Cache-Control
|
||||
- Настроено кэширование preflight-ответов на 20 дней (1728000 секунд)
|
||||
- Настроено кэширование preflight-ответов на 20 дней (1728000 секунд)
|
||||
|
||||
## Пагинация комментариев по веткам
|
||||
|
||||
- Эффективная загрузка комментариев с учетом их иерархической структуры
|
||||
- Отдельный запрос `load_comments_branch` для оптимизированной загрузки ветки комментариев
|
||||
- Возможность загрузки корневых комментариев статьи с первыми ответами на них
|
||||
- Гибкая пагинация как для корневых, так и для дочерних комментариев
|
||||
- Использование поля `stat.comments_count` для отображения количества ответов на комментарий
|
||||
- Добавление специального поля `first_replies` для хранения первых ответов на комментарий
|
||||
- Поддержка различных методов сортировки (новые, старые, популярные)
|
||||
- Оптимизированные SQL запросы для минимизации нагрузки на базу данных
|
48
main.py
48
main.py
@@ -17,7 +17,8 @@ from cache.revalidator import revalidation_manager
|
||||
from services.exception import ExceptionHandlerMiddleware
|
||||
from services.redis import redis
|
||||
from services.schema import create_all_tables, resolvers
|
||||
from services.search import search_service
|
||||
#from services.search import search_service
|
||||
from services.search import search_service, initialize_search_index
|
||||
from services.viewed import ViewedStorage
|
||||
from services.webhook import WebhookEndpoint, create_webhook_endpoint
|
||||
from settings import DEV_SERVER_PID_FILE_NAME, MODE
|
||||
@@ -34,24 +35,67 @@ async def start():
|
||||
f.write(str(os.getpid()))
|
||||
print(f"[main] process started in {MODE} mode")
|
||||
|
||||
async def check_search_service():
|
||||
"""Check if search service is available and log result"""
|
||||
info = await search_service.info()
|
||||
if info.get("status") in ["error", "unavailable"]:
|
||||
print(f"[WARNING] Search service unavailable: {info.get('message', 'unknown reason')}")
|
||||
else:
|
||||
print(f"[INFO] Search service is available: {info}")
|
||||
|
||||
|
||||
# indexing DB data
|
||||
# async def indexing():
|
||||
# from services.db import fetch_all_shouts
|
||||
# all_shouts = await fetch_all_shouts()
|
||||
# await initialize_search_index(all_shouts)
|
||||
async def lifespan(_app):
|
||||
try:
|
||||
print("[lifespan] Starting application initialization")
|
||||
create_all_tables()
|
||||
await asyncio.gather(
|
||||
redis.connect(),
|
||||
precache_data(),
|
||||
ViewedStorage.init(),
|
||||
create_webhook_endpoint(),
|
||||
search_service.info(),
|
||||
check_search_service(),
|
||||
start(),
|
||||
revalidation_manager.start(),
|
||||
)
|
||||
print("[lifespan] Basic initialization complete")
|
||||
|
||||
# Add a delay before starting the intensive search indexing
|
||||
print("[lifespan] Waiting for system stabilization before search indexing...")
|
||||
await asyncio.sleep(10) # 10-second delay to let the system stabilize
|
||||
|
||||
# Start search indexing as a background task with lower priority
|
||||
asyncio.create_task(initialize_search_index_background())
|
||||
|
||||
yield
|
||||
finally:
|
||||
print("[lifespan] Shutting down application services")
|
||||
tasks = [redis.disconnect(), ViewedStorage.stop(), revalidation_manager.stop()]
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
print("[lifespan] Shutdown complete")
|
||||
|
||||
# Initialize search index in the background
|
||||
async def initialize_search_index_background():
|
||||
"""Run search indexing as a background task with low priority"""
|
||||
try:
|
||||
print("[search] Starting background search indexing process")
|
||||
from services.db import fetch_all_shouts
|
||||
|
||||
# Get total count first (optional)
|
||||
all_shouts = await fetch_all_shouts()
|
||||
total_count = len(all_shouts) if all_shouts else 0
|
||||
print(f"[search] Fetched {total_count} shouts for background indexing")
|
||||
|
||||
# Start the indexing process with the fetched shouts
|
||||
print("[search] Beginning background search index initialization...")
|
||||
await initialize_search_index(all_shouts)
|
||||
print("[search] Background search index initialization complete")
|
||||
except Exception as e:
|
||||
print(f"[search] Error in background search indexing: {str(e)}")
|
||||
|
||||
# Создаем экземпляр GraphQL
|
||||
graphql_app = GraphQL(schema, debug=True)
|
||||
|
@@ -13,5 +13,9 @@ starlette
|
||||
gql
|
||||
ariadne
|
||||
granian
|
||||
|
||||
# NLP and search
|
||||
httpx
|
||||
|
||||
orjson
|
||||
pydantic
|
@@ -37,6 +37,7 @@ from resolvers.reaction import (
|
||||
create_reaction,
|
||||
delete_reaction,
|
||||
load_comment_ratings,
|
||||
load_comments_branch,
|
||||
load_reactions_by,
|
||||
load_shout_comments,
|
||||
load_shout_ratings,
|
||||
@@ -107,6 +108,7 @@ __all__ = [
|
||||
"load_shout_comments",
|
||||
"load_shout_ratings",
|
||||
"load_comment_ratings",
|
||||
"load_comments_branch",
|
||||
# notifier
|
||||
"load_notifications",
|
||||
"notifications_seen_thread",
|
||||
|
@@ -232,22 +232,6 @@ async def get_authors_all(_, _info):
|
||||
return await get_all_authors()
|
||||
|
||||
|
||||
@query.field("get_authors_paginated")
|
||||
async def get_authors_paginated(_, _info, limit=50, offset=0, by=None):
|
||||
"""
|
||||
Получает список авторов с пагинацией и статистикой.
|
||||
|
||||
Args:
|
||||
limit: Максимальное количество возвращаемых авторов
|
||||
offset: Смещение для пагинации
|
||||
by: Параметр сортировки (new/active)
|
||||
|
||||
Returns:
|
||||
list: Список авторов с их статистикой
|
||||
"""
|
||||
return await get_authors_with_stats(limit, offset, by)
|
||||
|
||||
|
||||
@query.field("get_author")
|
||||
async def get_author(_, _info, slug="", author_id=0):
|
||||
author_dict = None
|
||||
|
@@ -148,7 +148,11 @@ async def update_draft(_, info, draft_id: int, draft_input):
|
||||
return {"error": "Draft not found"}
|
||||
|
||||
Draft.update(draft, draft_input)
|
||||
draft.updated_at = int(time.time())
|
||||
# Set updated_at and updated_by from the authenticated user
|
||||
current_time = int(time.time())
|
||||
draft.updated_at = current_time
|
||||
draft.updated_by = author_id
|
||||
|
||||
session.commit()
|
||||
return {"draft": draft}
|
||||
|
||||
|
@@ -67,30 +67,35 @@ def add_reaction_stat_columns(q):
|
||||
return q
|
||||
|
||||
|
||||
def get_reactions_with_stat(q, limit, offset):
|
||||
def get_reactions_with_stat(q, limit=10, offset=0):
|
||||
"""
|
||||
Execute the reaction query and retrieve reactions with statistics.
|
||||
|
||||
:param q: Query with reactions and statistics.
|
||||
:param limit: Number of reactions to load.
|
||||
:param offset: Pagination offset.
|
||||
:return: List of reactions.
|
||||
:return: List of reactions as dictionaries.
|
||||
|
||||
>>> get_reactions_with_stat(q, 10, 0) # doctest: +SKIP
|
||||
[{'id': 1, 'body': 'Текст комментария', 'stat': {'rating': 5, 'comments_count': 3}, ...}]
|
||||
"""
|
||||
q = q.limit(limit).offset(offset)
|
||||
reactions = []
|
||||
|
||||
with local_session() as session:
|
||||
result_rows = session.execute(q)
|
||||
for reaction, author, shout, commented_stat, rating_stat in result_rows:
|
||||
for reaction, author, shout, comments_count, rating_stat in result_rows:
|
||||
# Пропускаем реакции с отсутствующими shout или author
|
||||
if not shout or not author:
|
||||
logger.error(f"Пропущена реакция из-за отсутствия shout или author: {reaction.dict()}")
|
||||
continue
|
||||
|
||||
reaction.created_by = author.dict()
|
||||
reaction.shout = shout.dict()
|
||||
reaction.stat = {"rating": rating_stat, "comments": commented_stat}
|
||||
reactions.append(reaction)
|
||||
# Преобразуем Reaction в словарь для доступа по ключу
|
||||
reaction_dict = reaction.dict()
|
||||
reaction_dict["created_by"] = author.dict()
|
||||
reaction_dict["shout"] = shout.dict()
|
||||
reaction_dict["stat"] = {"rating": rating_stat, "comments_count": comments_count}
|
||||
reactions.append(reaction_dict)
|
||||
|
||||
return reactions
|
||||
|
||||
@@ -393,7 +398,7 @@ async def update_reaction(_, info, reaction):
|
||||
|
||||
result = session.execute(reaction_query).unique().first()
|
||||
if result:
|
||||
r, author, _shout, commented_stat, rating_stat = result
|
||||
r, author, _shout, comments_count, rating_stat = result
|
||||
if not r or not author:
|
||||
return {"error": "Invalid reaction ID or unauthorized"}
|
||||
|
||||
@@ -408,7 +413,7 @@ async def update_reaction(_, info, reaction):
|
||||
session.commit()
|
||||
|
||||
r.stat = {
|
||||
"commented": commented_stat,
|
||||
"comments_count": comments_count,
|
||||
"rating": rating_stat,
|
||||
}
|
||||
|
||||
@@ -612,24 +617,22 @@ async def load_shout_comments(_, info, shout: int, limit=50, offset=0):
|
||||
@query.field("load_comment_ratings")
|
||||
async def load_comment_ratings(_, info, comment: int, limit=50, offset=0):
|
||||
"""
|
||||
Load ratings for a specified comment with pagination and statistics.
|
||||
Load ratings for a specified comment with pagination.
|
||||
|
||||
:param info: GraphQL context info.
|
||||
:param comment: Comment ID.
|
||||
:param limit: Number of ratings to load.
|
||||
:param offset: Pagination offset.
|
||||
:return: List of reactions.
|
||||
:return: List of ratings.
|
||||
"""
|
||||
q = query_reactions()
|
||||
|
||||
q = add_reaction_stat_columns(q)
|
||||
|
||||
# Filter, group, sort, limit, offset
|
||||
q = q.filter(
|
||||
and_(
|
||||
Reaction.deleted_at.is_(None),
|
||||
Reaction.reply_to == comment,
|
||||
Reaction.kind == ReactionKind.COMMENT.value,
|
||||
Reaction.kind.in_(RATING_REACTIONS),
|
||||
)
|
||||
)
|
||||
q = q.group_by(Reaction.id, Author.id, Shout.id)
|
||||
@@ -637,3 +640,187 @@ async def load_comment_ratings(_, info, comment: int, limit=50, offset=0):
|
||||
|
||||
# Retrieve and return reactions
|
||||
return get_reactions_with_stat(q, limit, offset)
|
||||
|
||||
|
||||
@query.field("load_comments_branch")
|
||||
async def load_comments_branch(
|
||||
_,
|
||||
_info,
|
||||
shout: int,
|
||||
parent_id: int | None = None,
|
||||
limit=10,
|
||||
offset=0,
|
||||
sort="newest",
|
||||
children_limit=3,
|
||||
children_offset=0,
|
||||
):
|
||||
"""
|
||||
Загружает иерархические комментарии с возможностью пагинации корневых и дочерних.
|
||||
|
||||
:param info: GraphQL context info.
|
||||
:param shout: ID статьи.
|
||||
:param parent_id: ID родительского комментария (None для корневых).
|
||||
:param limit: Количество комментариев для загрузки.
|
||||
:param offset: Смещение для пагинации.
|
||||
:param sort: Порядок сортировки ('newest', 'oldest', 'like').
|
||||
:param children_limit: Максимальное количество дочерних комментариев.
|
||||
:param children_offset: Смещение для дочерних комментариев.
|
||||
:return: Список комментариев с дочерними.
|
||||
"""
|
||||
# Создаем базовый запрос
|
||||
q = query_reactions()
|
||||
q = add_reaction_stat_columns(q)
|
||||
|
||||
# Фильтруем по статье и типу (комментарии)
|
||||
q = q.filter(
|
||||
and_(
|
||||
Reaction.deleted_at.is_(None),
|
||||
Reaction.shout == shout,
|
||||
Reaction.kind == ReactionKind.COMMENT.value,
|
||||
)
|
||||
)
|
||||
|
||||
# Фильтруем по родительскому ID
|
||||
if parent_id is None:
|
||||
# Загружаем только корневые комментарии
|
||||
q = q.filter(Reaction.reply_to.is_(None))
|
||||
else:
|
||||
# Загружаем только прямые ответы на указанный комментарий
|
||||
q = q.filter(Reaction.reply_to == parent_id)
|
||||
|
||||
# Сортировка и группировка
|
||||
q = q.group_by(Reaction.id, Author.id, Shout.id)
|
||||
|
||||
# Определяем сортировку
|
||||
order_by_stmt = None
|
||||
if sort.lower() == "oldest":
|
||||
order_by_stmt = asc(Reaction.created_at)
|
||||
elif sort.lower() == "like":
|
||||
order_by_stmt = desc("rating_stat")
|
||||
else: # "newest" по умолчанию
|
||||
order_by_stmt = desc(Reaction.created_at)
|
||||
|
||||
q = q.order_by(order_by_stmt)
|
||||
|
||||
# Выполняем запрос для получения комментариев
|
||||
comments = get_reactions_with_stat(q, limit, offset)
|
||||
|
||||
# Если комментарии найдены, загружаем дочерние и количество ответов
|
||||
if comments:
|
||||
# Загружаем количество ответов для каждого комментария
|
||||
await load_replies_count(comments)
|
||||
|
||||
# Загружаем дочерние комментарии
|
||||
await load_first_replies(comments, children_limit, children_offset, sort)
|
||||
|
||||
return comments
|
||||
|
||||
|
||||
async def load_replies_count(comments):
|
||||
"""
|
||||
Загружает количество ответов для списка комментариев и обновляет поле stat.comments_count.
|
||||
|
||||
:param comments: Список комментариев, для которых нужно загрузить количество ответов.
|
||||
"""
|
||||
if not comments:
|
||||
return
|
||||
|
||||
comment_ids = [comment["id"] for comment in comments]
|
||||
|
||||
# Запрос для подсчета количества ответов
|
||||
q = (
|
||||
select(Reaction.reply_to.label("parent_id"), func.count().label("count"))
|
||||
.where(
|
||||
and_(
|
||||
Reaction.reply_to.in_(comment_ids),
|
||||
Reaction.deleted_at.is_(None),
|
||||
Reaction.kind == ReactionKind.COMMENT.value,
|
||||
)
|
||||
)
|
||||
.group_by(Reaction.reply_to)
|
||||
)
|
||||
|
||||
# Выполняем запрос
|
||||
with local_session() as session:
|
||||
result = session.execute(q).fetchall()
|
||||
|
||||
# Создаем словарь {parent_id: count}
|
||||
replies_count = {row[0]: row[1] for row in result}
|
||||
|
||||
# Добавляем значения в комментарии
|
||||
for comment in comments:
|
||||
if "stat" not in comment:
|
||||
comment["stat"] = {}
|
||||
|
||||
# Обновляем счетчик комментариев в stat
|
||||
comment["stat"]["comments_count"] = replies_count.get(comment["id"], 0)
|
||||
|
||||
|
||||
async def load_first_replies(comments, limit, offset, sort="newest"):
|
||||
"""
|
||||
Загружает первые N ответов для каждого комментария.
|
||||
|
||||
:param comments: Список комментариев, для которых нужно загрузить ответы.
|
||||
:param limit: Максимальное количество ответов для каждого комментария.
|
||||
:param offset: Смещение для пагинации дочерних комментариев.
|
||||
:param sort: Порядок сортировки ответов.
|
||||
"""
|
||||
if not comments or limit <= 0:
|
||||
return
|
||||
|
||||
# Собираем ID комментариев
|
||||
comment_ids = [comment["id"] for comment in comments]
|
||||
|
||||
# Базовый запрос для загрузки ответов
|
||||
q = query_reactions()
|
||||
q = add_reaction_stat_columns(q)
|
||||
|
||||
# Фильтрация: только ответы на указанные комментарии
|
||||
q = q.filter(
|
||||
and_(
|
||||
Reaction.reply_to.in_(comment_ids),
|
||||
Reaction.deleted_at.is_(None),
|
||||
Reaction.kind == ReactionKind.COMMENT.value,
|
||||
)
|
||||
)
|
||||
|
||||
# Группировка
|
||||
q = q.group_by(Reaction.id, Author.id, Shout.id)
|
||||
|
||||
# Определяем сортировку
|
||||
order_by_stmt = None
|
||||
if sort.lower() == "oldest":
|
||||
order_by_stmt = asc(Reaction.created_at)
|
||||
elif sort.lower() == "like":
|
||||
order_by_stmt = desc("rating_stat")
|
||||
else: # "newest" по умолчанию
|
||||
order_by_stmt = desc(Reaction.created_at)
|
||||
|
||||
q = q.order_by(order_by_stmt, Reaction.reply_to)
|
||||
|
||||
# Выполняем запрос - указываем limit для неограниченного количества ответов
|
||||
# но не более 100 на родительский комментарий
|
||||
replies = get_reactions_with_stat(q, limit=100, offset=0)
|
||||
|
||||
# Группируем ответы по родительским ID
|
||||
replies_by_parent = {}
|
||||
for reply in replies:
|
||||
parent_id = reply.get("reply_to")
|
||||
if parent_id not in replies_by_parent:
|
||||
replies_by_parent[parent_id] = []
|
||||
replies_by_parent[parent_id].append(reply)
|
||||
|
||||
# Добавляем ответы к соответствующим комментариям с учетом смещения и лимита
|
||||
for comment in comments:
|
||||
comment_id = comment["id"]
|
||||
if comment_id in replies_by_parent:
|
||||
parent_replies = replies_by_parent[comment_id]
|
||||
# Применяем смещение и лимит
|
||||
comment["first_replies"] = parent_replies[offset : offset + limit]
|
||||
else:
|
||||
comment["first_replies"] = []
|
||||
|
||||
# Загружаем количество ответов для дочерних комментариев
|
||||
all_replies = [reply for replies in replies_by_parent.values() for reply in replies]
|
||||
if all_replies:
|
||||
await load_replies_count(all_replies)
|
||||
|
@@ -225,7 +225,7 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
elif isinstance(row.stat, dict):
|
||||
stat = row.stat
|
||||
viewed = ViewedStorage.get_shout(shout_id=shout_id) or 0
|
||||
shout_dict["stat"] = {**stat, "viewed": viewed, "commented": stat.get("comments_count", 0)}
|
||||
shout_dict["stat"] = {**stat, "viewed": viewed}
|
||||
|
||||
# Обработка main_topic и topics
|
||||
topics = None
|
||||
@@ -252,10 +252,10 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
"is_main": True,
|
||||
}
|
||||
elif not main_topic:
|
||||
logger.warning(f"No main_topic and no topics found for shout#{shout_id}")
|
||||
logger.debug(f"No main_topic and no topics found for shout#{shout_id}")
|
||||
main_topic = {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
|
||||
shout_dict["main_topic"] = main_topic
|
||||
# logger.debug(f"Final main_topic for shout#{shout_id}: {main_topic}")
|
||||
logger.debug(f"Final main_topic for shout#{shout_id}: {main_topic}")
|
||||
|
||||
if has_field(info, "authors") and hasattr(row, "authors"):
|
||||
shout_dict["authors"] = (
|
||||
@@ -412,18 +412,21 @@ async def load_shouts_search(_, info, text, options):
|
||||
scores[shout_id] = sr.get("score")
|
||||
hits_ids.append(shout_id)
|
||||
|
||||
q = (
|
||||
query_with_stat(info)
|
||||
if has_field(info, "stat")
|
||||
else select(Shout).filter(and_(Shout.published_at.is_not(None), Shout.deleted_at.is_(None)))
|
||||
)
|
||||
q = query_with_stat(info)
|
||||
|
||||
q = q.filter(Shout.id.in_(hits_ids))
|
||||
q = apply_filters(q, options)
|
||||
q = apply_sorting(q, options)
|
||||
|
||||
# added this to join topics
|
||||
topic_join = aliased(ShoutTopic)
|
||||
topic = aliased(Topic)
|
||||
q = q.outerjoin(topic_join, topic_join.shout == Shout.id)
|
||||
q = q.outerjoin(topic, topic.id == topic_join.topic)
|
||||
|
||||
shouts = get_shouts_with_links(info, q, limit, offset)
|
||||
for shout in shouts:
|
||||
shout.score = scores[f"{shout.id}"]
|
||||
shouts.sort(key=lambda x: x.score, reverse=True)
|
||||
shout["score"] = scores[f"{shout['id']}"]
|
||||
shouts.sort(key=lambda x: x["score"], reverse=True)
|
||||
return shouts
|
||||
return []
|
||||
|
||||
|
@@ -202,23 +202,6 @@ async def get_topics_all(_, _info):
|
||||
return await get_all_topics()
|
||||
|
||||
|
||||
# Запрос на получение тем с пагинацией и статистикой
|
||||
@query.field("get_topics_paginated")
|
||||
async def get_topics_paginated(_, _info, limit=100, offset=0, by=None):
|
||||
"""
|
||||
Получает список тем с пагинацией и статистикой.
|
||||
|
||||
Args:
|
||||
limit: Максимальное количество возвращаемых тем
|
||||
offset: Смещение для пагинации
|
||||
by: Опциональные параметры сортировки
|
||||
|
||||
Returns:
|
||||
list: Список тем с их статистикой
|
||||
"""
|
||||
return await get_topics_with_stats(limit, offset, None, by)
|
||||
|
||||
|
||||
# Запрос на получение тем по сообществу
|
||||
@query.field("get_topics_by_community")
|
||||
async def get_topics_by_community(_, _info, community_id: int, limit=100, offset=0, by=None):
|
||||
|
@@ -26,6 +26,9 @@ type Query {
|
||||
load_shout_ratings(shout: Int!, limit: Int, offset: Int): [Reaction]
|
||||
load_comment_ratings(comment: Int!, limit: Int, offset: Int): [Reaction]
|
||||
|
||||
# branched comments pagination
|
||||
load_comments_branch(shout: Int!, parent_id: Int, limit: Int, offset: Int, sort: ReactionSort, children_limit: Int, children_offset: Int): [Reaction]
|
||||
|
||||
# reader
|
||||
get_shout(slug: String, shout_id: Int): Shout
|
||||
load_shouts_by(options: LoadShoutsOptions): [Shout]
|
||||
@@ -57,7 +60,7 @@ type Query {
|
||||
get_topic(slug: String!): Topic
|
||||
get_topics_all: [Topic]
|
||||
get_topics_by_author(slug: String, user: String, author_id: Int): [Topic]
|
||||
get_topics_by_community(slug: String, community_id: Int): [Topic]
|
||||
get_topics_by_community(community_id: Int!, limit: Int, offset: Int): [Topic]
|
||||
|
||||
# notifier
|
||||
load_notifications(after: Int!, limit: Int, offset: Int): NotificationsResult!
|
||||
|
@@ -55,6 +55,7 @@ type Reaction {
|
||||
stat: Stat
|
||||
oid: String
|
||||
# old_thread: String
|
||||
first_replies: [Reaction]
|
||||
}
|
||||
|
||||
type MediaItem {
|
||||
@@ -136,7 +137,7 @@ type Draft {
|
||||
|
||||
type Stat {
|
||||
rating: Int
|
||||
commented: Int
|
||||
comments_count: Int
|
||||
viewed: Int
|
||||
last_commented_at: Int
|
||||
}
|
||||
@@ -207,6 +208,7 @@ type CommonResult {
|
||||
}
|
||||
|
||||
type SearchResult {
|
||||
id: Int!
|
||||
slug: String!
|
||||
title: String!
|
||||
cover: String
|
||||
|
34
server.py
Normal file
34
server.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from granian.constants import Interfaces
|
||||
from granian.log import LogLevels
|
||||
from granian.server import Server
|
||||
|
||||
from settings import PORT
|
||||
from utils.logger import root_logger as logger
|
||||
|
||||
if __name__ == "__main__":
|
||||
logger.info("started")
|
||||
try:
|
||||
|
||||
granian_instance = Server(
|
||||
"main:app",
|
||||
address="0.0.0.0",
|
||||
port=PORT,
|
||||
interface=Interfaces.ASGI,
|
||||
workers=1,
|
||||
websockets=False,
|
||||
log_level=LogLevels.debug,
|
||||
backlog=2048,
|
||||
)
|
||||
|
||||
if "dev" in sys.argv:
|
||||
logger.info("dev mode, building ssl context")
|
||||
granian_instance.build_ssl_context(cert=Path("localhost.pem"), key=Path("localhost-key.pem"), password=None)
|
||||
granian_instance.serve()
|
||||
except Exception as error:
|
||||
logger.error(error, exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
logger.info("stopped")
|
@@ -259,3 +259,27 @@ def get_json_builder():
|
||||
|
||||
# Используем их в коде
|
||||
json_builder, json_array_builder, json_cast = get_json_builder()
|
||||
|
||||
async def fetch_all_shouts(session=None):
|
||||
"""Fetch all published shouts for search indexing"""
|
||||
from orm.shout import Shout
|
||||
|
||||
close_session = False
|
||||
if session is None:
|
||||
session = local_session()
|
||||
close_session = True
|
||||
|
||||
try:
|
||||
# Fetch only published and non-deleted shouts
|
||||
query = session.query(Shout).filter(
|
||||
Shout.published_at.is_not(None),
|
||||
Shout.deleted_at.is_(None)
|
||||
)
|
||||
shouts = query.all()
|
||||
return shouts
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching shouts for search indexing: {e}")
|
||||
return []
|
||||
finally:
|
||||
if close_session:
|
||||
session.close()
|
@@ -2,231 +2,571 @@ import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import httpx
|
||||
import time
|
||||
import random
|
||||
|
||||
import orjson
|
||||
from opensearchpy import OpenSearch
|
||||
|
||||
from services.redis import redis
|
||||
from utils.encoders import CustomJSONEncoder
|
||||
|
||||
# Set redis logging level to suppress DEBUG messages
|
||||
# Set up proper logging
|
||||
logger = logging.getLogger("search")
|
||||
logger.setLevel(logging.WARNING)
|
||||
logger.setLevel(logging.INFO) # Change to INFO to see more details
|
||||
|
||||
ELASTIC_HOST = os.environ.get("ELASTIC_HOST", "").replace("https://", "")
|
||||
ELASTIC_USER = os.environ.get("ELASTIC_USER", "")
|
||||
ELASTIC_PASSWORD = os.environ.get("ELASTIC_PASSWORD", "")
|
||||
ELASTIC_PORT = os.environ.get("ELASTIC_PORT", 9200)
|
||||
ELASTIC_URL = os.environ.get(
|
||||
"ELASTIC_URL",
|
||||
f"https://{ELASTIC_USER}:{ELASTIC_PASSWORD}@{ELASTIC_HOST}:{ELASTIC_PORT}",
|
||||
)
|
||||
REDIS_TTL = 86400 # 1 день в секундах
|
||||
|
||||
index_settings = {
|
||||
"settings": {
|
||||
"index": {"number_of_shards": 1, "auto_expand_replicas": "0-all"},
|
||||
"analysis": {
|
||||
"analyzer": {
|
||||
"ru": {
|
||||
"tokenizer": "standard",
|
||||
"filter": ["lowercase", "ru_stop", "ru_stemmer"],
|
||||
}
|
||||
},
|
||||
"filter": {
|
||||
"ru_stemmer": {"type": "stemmer", "language": "russian"},
|
||||
"ru_stop": {"type": "stop", "stopwords": "_russian_"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"body": {"type": "text", "analyzer": "ru"},
|
||||
"title": {"type": "text", "analyzer": "ru"},
|
||||
"subtitle": {"type": "text", "analyzer": "ru"},
|
||||
"lead": {"type": "text", "analyzer": "ru"},
|
||||
"media": {"type": "text", "analyzer": "ru"},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
expected_mapping = index_settings["mappings"]
|
||||
|
||||
# Создание цикла событий
|
||||
search_loop = asyncio.get_event_loop()
|
||||
|
||||
# В начале файла добавим флаг
|
||||
SEARCH_ENABLED = bool(os.environ.get("ELASTIC_HOST", ""))
|
||||
|
||||
|
||||
def get_indices_stats():
|
||||
indices_stats = search_service.client.cat.indices(format="json")
|
||||
for index_info in indices_stats:
|
||||
index_name = index_info["index"]
|
||||
if not index_name.startswith("."):
|
||||
index_health = index_info["health"]
|
||||
index_status = index_info["status"]
|
||||
pri_shards = index_info["pri"]
|
||||
rep_shards = index_info["rep"]
|
||||
docs_count = index_info["docs.count"]
|
||||
docs_deleted = index_info["docs.deleted"]
|
||||
store_size = index_info["store.size"]
|
||||
pri_store_size = index_info["pri.store.size"]
|
||||
|
||||
logger.info(f"Index: {index_name}")
|
||||
logger.info(f"Health: {index_health}")
|
||||
logger.info(f"Status: {index_status}")
|
||||
logger.info(f"Primary Shards: {pri_shards}")
|
||||
logger.info(f"Replica Shards: {rep_shards}")
|
||||
logger.info(f"Documents Count: {docs_count}")
|
||||
logger.info(f"Deleted Documents: {docs_deleted}")
|
||||
logger.info(f"Store Size: {store_size}")
|
||||
logger.info(f"Primary Store Size: {pri_store_size}")
|
||||
# Configuration for search service
|
||||
SEARCH_ENABLED = bool(os.environ.get("SEARCH_ENABLED", "true").lower() in ["true", "1", "yes"])
|
||||
TXTAI_SERVICE_URL = os.environ.get("TXTAI_SERVICE_URL", "none")
|
||||
MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "25"))
|
||||
|
||||
|
||||
class SearchService:
|
||||
def __init__(self, index_name="search_index"):
|
||||
logger.info("Инициализируем поиск...")
|
||||
self.index_name = index_name
|
||||
self.client = None
|
||||
self.lock = asyncio.Lock()
|
||||
|
||||
# Инициализация клиента OpenSearch только если поиск включен
|
||||
if SEARCH_ENABLED:
|
||||
try:
|
||||
self.client = OpenSearch(
|
||||
hosts=[{"host": ELASTIC_HOST, "port": ELASTIC_PORT}],
|
||||
http_compress=True,
|
||||
http_auth=(ELASTIC_USER, ELASTIC_PASSWORD),
|
||||
use_ssl=True,
|
||||
verify_certs=False,
|
||||
ssl_assert_hostname=False,
|
||||
ssl_show_warn=False,
|
||||
)
|
||||
logger.info("Клиент OpenSearch.org подключен")
|
||||
search_loop.create_task(self.check_index())
|
||||
except Exception as exc:
|
||||
logger.warning(f"Поиск отключен из-за ошибки подключения: {exc}")
|
||||
self.client = None
|
||||
else:
|
||||
logger.info("Поиск отключен (ELASTIC_HOST не установлен)")
|
||||
def __init__(self):
|
||||
logger.info(f"Initializing search service with URL: {TXTAI_SERVICE_URL}")
|
||||
self.available = SEARCH_ENABLED
|
||||
# Use different timeout settings for indexing and search requests
|
||||
self.client = httpx.AsyncClient(timeout=30.0, base_url=TXTAI_SERVICE_URL)
|
||||
self.index_client = httpx.AsyncClient(timeout=120.0, base_url=TXTAI_SERVICE_URL)
|
||||
|
||||
if not self.available:
|
||||
logger.info("Search disabled (SEARCH_ENABLED = False)")
|
||||
|
||||
async def info(self):
|
||||
if not SEARCH_ENABLED:
|
||||
"""Return information about search service"""
|
||||
if not self.available:
|
||||
return {"status": "disabled"}
|
||||
|
||||
try:
|
||||
return get_indices_stats()
|
||||
response = await self.client.get("/info")
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
logger.info(f"Search service info: {result}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get search info: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def delete_index(self):
|
||||
if self.client:
|
||||
logger.warning(f"[!!!] Удаляем индекс {self.index_name}")
|
||||
self.client.indices.delete(index=self.index_name, ignore_unavailable=True)
|
||||
|
||||
def create_index(self):
|
||||
if self.client:
|
||||
logger.info(f"Создается индекс: {self.index_name}")
|
||||
self.client.indices.create(index=self.index_name, body=index_settings)
|
||||
logger.info(f"Индекс {self.index_name} создан")
|
||||
|
||||
async def check_index(self):
|
||||
if self.client:
|
||||
logger.info(f"Проверяем индекс {self.index_name}...")
|
||||
if not self.client.indices.exists(index=self.index_name):
|
||||
self.create_index()
|
||||
self.client.indices.put_mapping(index=self.index_name, body=expected_mapping)
|
||||
else:
|
||||
logger.info(f"Найден существующий индекс {self.index_name}")
|
||||
# Проверка и обновление структуры индекса, если необходимо
|
||||
result = self.client.indices.get_mapping(index=self.index_name)
|
||||
if isinstance(result, str):
|
||||
result = orjson.loads(result)
|
||||
if isinstance(result, dict):
|
||||
mapping = result.get(self.index_name, {}).get("mappings")
|
||||
logger.info(f"Найдена структура индексации: {mapping['properties'].keys()}")
|
||||
expected_keys = expected_mapping["properties"].keys()
|
||||
if mapping and mapping["properties"].keys() != expected_keys:
|
||||
logger.info(f"Ожидаемая структура индексации: {expected_mapping}")
|
||||
logger.warning("[!!!] Требуется переиндексация всех данных")
|
||||
self.delete_index()
|
||||
self.client = None
|
||||
else:
|
||||
logger.error("клиент не инициализован, невозможно проверить индекс")
|
||||
def is_ready(self):
|
||||
"""Check if service is available"""
|
||||
return self.available
|
||||
|
||||
async def verify_docs(self, doc_ids):
|
||||
"""Verify which documents exist in the search index"""
|
||||
if not self.available:
|
||||
return {"status": "disabled"}
|
||||
|
||||
try:
|
||||
logger.info(f"Verifying {len(doc_ids)} documents in search index")
|
||||
response = await self.client.post(
|
||||
"/verify-docs",
|
||||
json={"doc_ids": doc_ids},
|
||||
timeout=60.0 # Longer timeout for potentially large ID lists
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
# Log summary of verification results
|
||||
missing_count = len(result.get("missing", []))
|
||||
logger.info(f"Document verification complete: {missing_count} missing out of {len(doc_ids)} total")
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Document verification error: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def index(self, shout):
|
||||
if not SEARCH_ENABLED:
|
||||
"""Index a single document"""
|
||||
if not self.available:
|
||||
return
|
||||
|
||||
if self.client:
|
||||
logger.info(f"Индексируем пост {shout.id}")
|
||||
index_body = {
|
||||
"body": shout.body,
|
||||
"title": shout.title,
|
||||
"subtitle": shout.subtitle,
|
||||
"lead": shout.lead,
|
||||
"media": shout.media,
|
||||
}
|
||||
asyncio.create_task(self.perform_index(shout, index_body))
|
||||
logger.info(f"Indexing post {shout.id}")
|
||||
# Start in background to not block
|
||||
asyncio.create_task(self.perform_index(shout))
|
||||
|
||||
async def perform_index(self, shout, index_body):
|
||||
if self.client:
|
||||
async def perform_index(self, shout):
|
||||
"""Actually perform the indexing operation"""
|
||||
if not self.available:
|
||||
return
|
||||
|
||||
try:
|
||||
# Combine all text fields
|
||||
text = " ".join(filter(None, [
|
||||
shout.title or "",
|
||||
shout.subtitle or "",
|
||||
shout.lead or "",
|
||||
shout.body or "",
|
||||
shout.media or ""
|
||||
]))
|
||||
|
||||
if not text.strip():
|
||||
logger.warning(f"No text content to index for shout {shout.id}")
|
||||
return
|
||||
|
||||
logger.info(f"Indexing document: ID={shout.id}, Text length={len(text)}")
|
||||
|
||||
# Send to txtai service
|
||||
response = await self.client.post(
|
||||
"/index",
|
||||
json={"id": str(shout.id), "text": text}
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
logger.info(f"Post {shout.id} successfully indexed: {result}")
|
||||
except Exception as e:
|
||||
logger.error(f"Indexing error for shout {shout.id}: {e}")
|
||||
|
||||
async def bulk_index(self, shouts):
|
||||
"""Index multiple documents at once with adaptive batch sizing"""
|
||||
if not self.available or not shouts:
|
||||
logger.warning(f"Bulk indexing skipped: available={self.available}, shouts_count={len(shouts) if shouts else 0}")
|
||||
return
|
||||
|
||||
start_time = time.time()
|
||||
logger.info(f"Starting bulk indexing of {len(shouts)} documents")
|
||||
|
||||
MAX_TEXT_LENGTH = 4000 # Maximum text length to send in a single request
|
||||
max_batch_size = MAX_BATCH_SIZE
|
||||
total_indexed = 0
|
||||
total_skipped = 0
|
||||
total_truncated = 0
|
||||
total_retries = 0
|
||||
|
||||
# Group documents by size to process smaller documents in larger batches
|
||||
small_docs = []
|
||||
medium_docs = []
|
||||
large_docs = []
|
||||
|
||||
# First pass: prepare all documents and categorize by size
|
||||
for shout in shouts:
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self.client.index(index=self.index_name, id=str(shout.id), body=index_body), timeout=40.0
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.error(f"Indexing timeout for shout {shout.id}")
|
||||
text_fields = []
|
||||
for field_name in ['title', 'subtitle', 'lead', 'body']:
|
||||
field_value = getattr(shout, field_name, None)
|
||||
if field_value and isinstance(field_value, str) and field_value.strip():
|
||||
text_fields.append(field_value.strip())
|
||||
|
||||
# Media field processing remains the same
|
||||
media = getattr(shout, 'media', None)
|
||||
if media:
|
||||
# Your existing media processing logic
|
||||
if isinstance(media, str):
|
||||
try:
|
||||
media_json = json.loads(media)
|
||||
if isinstance(media_json, dict):
|
||||
if 'title' in media_json:
|
||||
text_fields.append(media_json['title'])
|
||||
if 'body' in media_json:
|
||||
text_fields.append(media_json['body'])
|
||||
except json.JSONDecodeError:
|
||||
text_fields.append(media)
|
||||
elif isinstance(media, dict):
|
||||
if 'title' in media:
|
||||
text_fields.append(media['title'])
|
||||
if 'body' in media:
|
||||
text_fields.append(media['body'])
|
||||
|
||||
text = " ".join(text_fields)
|
||||
|
||||
if not text.strip():
|
||||
logger.debug(f"Skipping shout {shout.id}: no text content")
|
||||
total_skipped += 1
|
||||
continue
|
||||
|
||||
# Truncate text if it exceeds the maximum length
|
||||
original_length = len(text)
|
||||
if original_length > MAX_TEXT_LENGTH:
|
||||
text = text[:MAX_TEXT_LENGTH]
|
||||
logger.info(f"Truncated document {shout.id} from {original_length} to {MAX_TEXT_LENGTH} chars")
|
||||
total_truncated += 1
|
||||
|
||||
document = {
|
||||
"id": str(shout.id),
|
||||
"text": text
|
||||
}
|
||||
|
||||
# Categorize by size
|
||||
text_len = len(text)
|
||||
if text_len > 5000:
|
||||
large_docs.append(document)
|
||||
elif text_len > 2000:
|
||||
medium_docs.append(document)
|
||||
else:
|
||||
small_docs.append(document)
|
||||
|
||||
total_indexed += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Indexing error for shout {shout.id}: {e}")
|
||||
logger.error(f"Error processing shout {getattr(shout, 'id', 'unknown')} for indexing: {e}")
|
||||
total_skipped += 1
|
||||
|
||||
# Process each category with appropriate batch sizes
|
||||
logger.info(f"Documents categorized: {len(small_docs)} small, {len(medium_docs)} medium, {len(large_docs)} large")
|
||||
|
||||
# Process small documents (larger batches)
|
||||
if small_docs:
|
||||
batch_size = min(max_batch_size, 15)
|
||||
await self._process_document_batches(small_docs, batch_size, "small")
|
||||
|
||||
# Process medium documents (medium batches)
|
||||
if medium_docs:
|
||||
batch_size = min(max_batch_size, 10)
|
||||
await self._process_document_batches(medium_docs, batch_size, "medium")
|
||||
|
||||
# Process large documents (small batches)
|
||||
if large_docs:
|
||||
batch_size = min(max_batch_size, 3)
|
||||
await self._process_document_batches(large_docs, batch_size, "large")
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
logger.info(f"Bulk indexing completed in {elapsed:.2f}s: {total_indexed} indexed, {total_skipped} skipped, {total_truncated} truncated, {total_retries} retries")
|
||||
|
||||
async def _process_document_batches(self, documents, batch_size, size_category):
|
||||
"""Process document batches with retry logic"""
|
||||
# Check for possible database corruption before starting
|
||||
db_error_count = 0
|
||||
|
||||
for i in range(0, len(documents), batch_size):
|
||||
batch = documents[i:i+batch_size]
|
||||
batch_id = f"{size_category}-{i//batch_size + 1}"
|
||||
logger.info(f"Processing {size_category} batch {batch_id} of {len(batch)} documents")
|
||||
|
||||
retry_count = 0
|
||||
max_retries = 3
|
||||
success = False
|
||||
|
||||
# Process with retries
|
||||
while not success and retry_count < max_retries:
|
||||
try:
|
||||
if batch:
|
||||
sample = batch[0]
|
||||
logger.info(f"Sample document in batch {batch_id}: id={sample['id']}, text_length={len(sample['text'])}")
|
||||
|
||||
logger.info(f"Sending batch {batch_id} of {len(batch)} documents to search service (attempt {retry_count+1})")
|
||||
response = await self.index_client.post(
|
||||
"/bulk-index",
|
||||
json=batch,
|
||||
timeout=120.0 # Explicit longer timeout for large batches
|
||||
)
|
||||
|
||||
# Handle 422 validation errors - these won't be fixed by retrying
|
||||
if response.status_code == 422:
|
||||
error_detail = response.json()
|
||||
truncated_error = self._truncate_error_detail(error_detail)
|
||||
logger.error(f"Validation error from search service for batch {batch_id}: {truncated_error}")
|
||||
break
|
||||
|
||||
# Handle 500 server errors - these might be fixed by retrying with smaller batches
|
||||
elif response.status_code == 500:
|
||||
db_error_count += 1
|
||||
|
||||
# If we've seen multiple 500s, log a critical error
|
||||
if db_error_count >= 3:
|
||||
logger.critical(f"Multiple server errors detected (500). The search service may need manual intervention. Stopping batch {batch_id} processing.")
|
||||
break
|
||||
|
||||
# Try again with exponential backoff
|
||||
if retry_count < max_retries - 1:
|
||||
retry_count += 1
|
||||
wait_time = (2 ** retry_count) + (random.random() * 0.5) # Exponential backoff with jitter
|
||||
logger.warning(f"Server error for batch {batch_id}, retrying in {wait_time:.1f}s (attempt {retry_count+1}/{max_retries})")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
# Final retry, split the batch
|
||||
elif len(batch) > 1:
|
||||
logger.warning(f"Splitting batch {batch_id} after repeated failures")
|
||||
mid = len(batch) // 2
|
||||
await self._process_single_batch(batch[:mid], f"{batch_id}-A")
|
||||
await self._process_single_batch(batch[mid:], f"{batch_id}-B")
|
||||
break
|
||||
else:
|
||||
# Can't split a single document
|
||||
logger.error(f"Failed to index document {batch[0]['id']} after {max_retries} attempts")
|
||||
break
|
||||
|
||||
# Normal success case
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
logger.info(f"Batch {batch_id} indexed successfully: {result}")
|
||||
success = True
|
||||
db_error_count = 0 # Reset error counter on success
|
||||
|
||||
except Exception as e:
|
||||
# Check if it looks like a database corruption error
|
||||
error_str = str(e).lower()
|
||||
if "duplicate key" in error_str or "unique constraint" in error_str or "nonetype" in error_str:
|
||||
db_error_count += 1
|
||||
if db_error_count >= 2:
|
||||
logger.critical(f"Potential database corruption detected: {error_str}. The search service may need manual intervention. Stopping batch {batch_id} processing.")
|
||||
break
|
||||
|
||||
if retry_count < max_retries - 1:
|
||||
retry_count += 1
|
||||
wait_time = (2 ** retry_count) + (random.random() * 0.5)
|
||||
logger.warning(f"Error for batch {batch_id}, retrying in {wait_time:.1f}s: {str(e)[:200]}")
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
# Last resort - try to split the batch
|
||||
if len(batch) > 1:
|
||||
logger.warning(f"Splitting batch {batch_id} after exception: {str(e)[:200]}")
|
||||
mid = len(batch) // 2
|
||||
await self._process_single_batch(batch[:mid], f"{batch_id}-A")
|
||||
await self._process_single_batch(batch[mid:], f"{batch_id}-B")
|
||||
else:
|
||||
logger.error(f"Failed to index document {batch[0]['id']} after {max_retries} attempts: {e}")
|
||||
break
|
||||
|
||||
async def _process_single_batch(self, documents, batch_id):
|
||||
"""Process a single batch with maximum reliability"""
|
||||
max_retries = 3
|
||||
retry_count = 0
|
||||
|
||||
while retry_count < max_retries:
|
||||
try:
|
||||
if not documents:
|
||||
return
|
||||
|
||||
logger.info(f"Processing sub-batch {batch_id} with {len(documents)} documents")
|
||||
response = await self.index_client.post(
|
||||
"/bulk-index",
|
||||
json=documents,
|
||||
timeout=90.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
logger.info(f"Sub-batch {batch_id} indexed successfully: {result}")
|
||||
return # Success, exit the retry loop
|
||||
|
||||
except Exception as e:
|
||||
error_str = str(e).lower()
|
||||
retry_count += 1
|
||||
|
||||
# Check if it's a transient error that txtai might recover from internally
|
||||
if "dictionary changed size" in error_str or "transaction error" in error_str:
|
||||
wait_time = (2 ** retry_count) + (random.random() * 0.5)
|
||||
logger.warning(f"Transient txtai error in sub-batch {batch_id}, waiting {wait_time:.1f}s for recovery: {str(e)[:200]}")
|
||||
await asyncio.sleep(wait_time) # Wait for txtai to recover
|
||||
continue # Try again
|
||||
|
||||
# For other errors or final retry failure
|
||||
logger.error(f"Error indexing sub-batch {batch_id} (attempt {retry_count}/{max_retries}): {str(e)[:200]}")
|
||||
|
||||
# Only try one-by-one on the final retry
|
||||
if retry_count >= max_retries and len(documents) > 1:
|
||||
logger.info(f"Processing documents in sub-batch {batch_id} individually")
|
||||
for i, doc in enumerate(documents):
|
||||
try:
|
||||
resp = await self.index_client.post("/index", json=doc, timeout=30.0)
|
||||
resp.raise_for_status()
|
||||
logger.info(f"Indexed document {doc['id']} individually")
|
||||
except Exception as e2:
|
||||
logger.error(f"Failed to index document {doc['id']} individually: {str(e2)[:100]}")
|
||||
return # Exit after individual processing attempt
|
||||
|
||||
def _truncate_error_detail(self, error_detail):
|
||||
"""Truncate error details for logging"""
|
||||
truncated_detail = error_detail.copy() if isinstance(error_detail, dict) else error_detail
|
||||
|
||||
if isinstance(truncated_detail, dict) and 'detail' in truncated_detail and isinstance(truncated_detail['detail'], list):
|
||||
for i, item in enumerate(truncated_detail['detail']):
|
||||
if isinstance(item, dict) and 'input' in item:
|
||||
if isinstance(item['input'], dict) and any(k in item['input'] for k in ['documents', 'text']):
|
||||
# Check for documents list
|
||||
if 'documents' in item['input'] and isinstance(item['input']['documents'], list):
|
||||
for j, doc in enumerate(item['input']['documents']):
|
||||
if 'text' in doc and isinstance(doc['text'], str) and len(doc['text']) > 100:
|
||||
item['input']['documents'][j]['text'] = f"{doc['text'][:100]}... [truncated, total {len(doc['text'])} chars]"
|
||||
|
||||
# Check for direct text field
|
||||
if 'text' in item['input'] and isinstance(item['input']['text'], str) and len(item['input']['text']) > 100:
|
||||
item['input']['text'] = f"{item['input']['text'][:100]}... [truncated, total {len(item['input']['text'])} chars]"
|
||||
|
||||
return truncated_detail
|
||||
|
||||
async def search(self, text, limit, offset):
|
||||
if not SEARCH_ENABLED:
|
||||
"""Search documents"""
|
||||
if not self.available:
|
||||
logger.warning("Search not available")
|
||||
return []
|
||||
|
||||
logger.info(f"Ищем: {text} {offset}+{limit}")
|
||||
search_body = {
|
||||
"query": {"multi_match": {"query": text, "fields": ["title", "lead", "subtitle", "body", "media"]}}
|
||||
}
|
||||
|
||||
if self.client:
|
||||
search_response = self.client.search(
|
||||
index=self.index_name,
|
||||
body=search_body,
|
||||
size=limit,
|
||||
from_=offset,
|
||||
_source=False,
|
||||
_source_excludes=["title", "body", "subtitle", "media", "lead", "_index"],
|
||||
|
||||
if not isinstance(text, str) or not text.strip():
|
||||
logger.warning(f"Invalid search text: {text}")
|
||||
return []
|
||||
|
||||
logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset})")
|
||||
|
||||
try:
|
||||
logger.info(f"Sending search request: text='{text}', limit={limit}, offset={offset}")
|
||||
response = await self.client.post(
|
||||
"/search",
|
||||
json={"text": text, "limit": limit, "offset": offset}
|
||||
)
|
||||
hits = search_response["hits"]["hits"]
|
||||
results = [{"id": hit["_id"], "score": hit["_score"]} for hit in hits]
|
||||
|
||||
# если результаты не пустые
|
||||
if results:
|
||||
# Кэширование в Redis с TTL
|
||||
redis_key = f"search:{text}:{offset}+{limit}"
|
||||
await redis.execute(
|
||||
"SETEX",
|
||||
redis_key,
|
||||
REDIS_TTL,
|
||||
json.dumps(results, cls=CustomJSONEncoder),
|
||||
)
|
||||
return results
|
||||
return []
|
||||
response.raise_for_status()
|
||||
|
||||
logger.info(f"Raw search response: {response.text}")
|
||||
result = response.json()
|
||||
logger.info(f"Parsed search response: {result}")
|
||||
|
||||
formatted_results = result.get("results", [])
|
||||
logger.info(f"Search for '{text}' returned {len(formatted_results)} results")
|
||||
|
||||
if formatted_results:
|
||||
logger.info(f"Sample result: {formatted_results[0]}")
|
||||
else:
|
||||
logger.warning(f"No results found for '{text}'")
|
||||
|
||||
return formatted_results
|
||||
except Exception as e:
|
||||
logger.error(f"Search error for '{text}': {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
async def check_index_status(self):
|
||||
"""Get detailed statistics about the search index health"""
|
||||
if not self.available:
|
||||
return {"status": "disabled"}
|
||||
|
||||
try:
|
||||
response = await self.client.get("/index-status")
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
logger.info(f"Index status check: {result['status']}, {result['documents_count']} documents")
|
||||
|
||||
# Log warnings for any inconsistencies
|
||||
if result.get("consistency", {}).get("status") != "ok":
|
||||
null_count = result.get("consistency", {}).get("null_embeddings_count", 0)
|
||||
if null_count > 0:
|
||||
logger.warning(f"Found {null_count} documents with NULL embeddings")
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check index status: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
|
||||
# Create the search service singleton
|
||||
search_service = SearchService()
|
||||
|
||||
|
||||
# API-compatible function to perform a search
|
||||
async def search_text(text: str, limit: int = 50, offset: int = 0):
|
||||
payload = []
|
||||
if search_service.client:
|
||||
# Использование метода search_post из OpenSearchService
|
||||
if search_service.available:
|
||||
payload = await search_service.search(text, limit, offset)
|
||||
return payload
|
||||
|
||||
|
||||
# Проверить что URL корректный
|
||||
OPENSEARCH_URL = os.getenv("OPENSEARCH_URL", "rc1a-3n5pi3bhuj9gieel.mdb.yandexcloud.net")
|
||||
async def initialize_search_index(shouts_data):
|
||||
"""Initialize search index with existing data during application startup"""
|
||||
if not SEARCH_ENABLED:
|
||||
logger.info("Search indexing skipped (SEARCH_ENABLED=False)")
|
||||
return
|
||||
|
||||
if not shouts_data:
|
||||
logger.warning("No shouts data provided for search indexing")
|
||||
return
|
||||
|
||||
logger.info(f"Checking search index status for {len(shouts_data)} documents")
|
||||
|
||||
# Get the current index info
|
||||
info = await search_service.info()
|
||||
if info.get("status") in ["error", "unavailable", "disabled"]:
|
||||
logger.error(f"Cannot initialize search index: {info}")
|
||||
return
|
||||
|
||||
# Check if index has approximately right number of documents
|
||||
index_stats = info.get("index_stats", {})
|
||||
indexed_doc_count = index_stats.get("document_count", 0)
|
||||
|
||||
# Add a more detailed status check
|
||||
index_status = await search_service.check_index_status()
|
||||
if index_status.get("status") == "healthy":
|
||||
logger.info("Index status check passed")
|
||||
elif index_status.get("status") == "inconsistent":
|
||||
logger.warning("Index status check found inconsistencies")
|
||||
|
||||
# Get documents with null embeddings
|
||||
problem_ids = index_status.get("consistency", {}).get("null_embeddings_sample", [])
|
||||
|
||||
if problem_ids:
|
||||
logger.info(f"Repairing {len(problem_ids)} documents with NULL embeddings")
|
||||
problem_docs = [shout for shout in shouts_data if str(shout.id) in problem_ids]
|
||||
if problem_docs:
|
||||
await search_service.bulk_index(problem_docs)
|
||||
|
||||
# Log database document summary
|
||||
db_ids = [str(shout.id) for shout in shouts_data]
|
||||
logger.info(f"Database contains {len(shouts_data)} documents. Sample IDs: {', '.join(db_ids[:5])}...")
|
||||
|
||||
# Calculate summary by ID range to understand the coverage
|
||||
try:
|
||||
# Parse numeric IDs where possible to analyze coverage
|
||||
numeric_ids = [int(sid) for sid in db_ids if sid.isdigit()]
|
||||
if numeric_ids:
|
||||
min_id = min(numeric_ids)
|
||||
max_id = max(numeric_ids)
|
||||
id_range = max_id - min_id + 1
|
||||
coverage_pct = (len(numeric_ids) / id_range) * 100 if id_range > 0 else 0
|
||||
logger.info(f"ID range analysis: min_id={min_id}, max_id={max_id}, range={id_range}, "
|
||||
f"coverage={coverage_pct:.1f}% ({len(numeric_ids)}/{id_range})")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not analyze ID ranges: {e}")
|
||||
|
||||
# If counts are significantly different, do verification
|
||||
if abs(indexed_doc_count - len(shouts_data)) > 10:
|
||||
logger.info(f"Document count mismatch: {indexed_doc_count} in index vs {len(shouts_data)} in database. Verifying...")
|
||||
|
||||
# Get all document IDs from your database
|
||||
doc_ids = [str(shout.id) for shout in shouts_data]
|
||||
|
||||
# Verify which ones are missing from the index
|
||||
verification = await search_service.verify_docs(doc_ids)
|
||||
|
||||
if verification.get("status") == "error":
|
||||
logger.error(f"Document verification failed: {verification.get('message')}")
|
||||
return
|
||||
|
||||
# Index only missing documents
|
||||
missing_ids = verification.get("missing", [])
|
||||
if missing_ids:
|
||||
logger.info(f"Found {len(missing_ids)} documents missing from index. Indexing them...")
|
||||
logger.info(f"Sample missing IDs: {', '.join(missing_ids[:10])}...")
|
||||
missing_docs = [shout for shout in shouts_data if str(shout.id) in missing_ids]
|
||||
await search_service.bulk_index(missing_docs)
|
||||
else:
|
||||
logger.info("All documents are already indexed.")
|
||||
else:
|
||||
logger.info(f"Search index appears to be in sync ({indexed_doc_count} documents indexed).")
|
||||
|
||||
# Optional sample verification (can be slow with large document sets)
|
||||
# Uncomment if you want to periodically check a random sample even when counts match
|
||||
"""
|
||||
sample_size = 10
|
||||
if len(db_ids) > sample_size:
|
||||
sample_ids = random.sample(db_ids, sample_size)
|
||||
logger.info(f"Performing random sample verification on {sample_size} documents...")
|
||||
verification = await search_service.verify_docs(sample_ids)
|
||||
if verification.get("missing"):
|
||||
missing_count = len(verification.get("missing", []))
|
||||
logger.warning(f"Random verification found {missing_count}/{sample_size} missing docs "
|
||||
f"despite count match. Consider full verification.")
|
||||
else:
|
||||
logger.info("Random document sample verification passed.")
|
||||
"""
|
||||
|
||||
# Verify with test query
|
||||
try:
|
||||
test_query = "test"
|
||||
logger.info(f"Verifying search index with query: '{test_query}'")
|
||||
test_results = await search_text(test_query, 5)
|
||||
|
||||
if test_results:
|
||||
logger.info(f"Search verification successful: found {len(test_results)} results")
|
||||
# Log categories covered by search results
|
||||
categories = set()
|
||||
for result in test_results:
|
||||
result_id = result.get("id")
|
||||
matching_shouts = [s for s in shouts_data if str(s.id) == result_id]
|
||||
if matching_shouts and hasattr(matching_shouts[0], 'category'):
|
||||
categories.add(getattr(matching_shouts[0], 'category', 'unknown'))
|
||||
if categories:
|
||||
logger.info(f"Search results cover categories: {', '.join(categories)}")
|
||||
else:
|
||||
logger.warning("Search verification returned no results. Index may be empty or not working.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error verifying search index: {e}")
|
||||
|
Reference in New Issue
Block a user