Compare commits
78 Commits
feature/br
...
a1a61a6731
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a1a61a6731 | ||
![]() |
8d6ad2c84f | ||
![]() |
beba1992e9 | ||
![]() |
b0296d7747 | ||
![]() |
98e3dff35e | ||
![]() |
3782a9dffb | ||
![]() |
93c00b3dd1 | ||
![]() |
fac43e5997 | ||
![]() |
e7facf8d87 | ||
![]() |
3062a2b7de | ||
![]() |
c0406dbbf2 | ||
![]() |
ab4610575f | ||
![]() |
5425dbf832 | ||
![]() |
a10db2d38a | ||
![]() |
83e70856cd | ||
![]() |
11654dba68 | ||
![]() |
ec9465ad40 | ||
![]() |
4d965fb27b | ||
aaa6022a53 | |||
d6ada44c7f | |||
243f836f0a | |||
536c094e72 | |||
![]() |
e382cc1ea5 | ||
6920351b82 | |||
eb216a5f36 | |||
bd129efde6 | |||
b9f6033e66 | |||
710f522c8f | |||
0de4404cb1 | |||
83d61ca76d | |||
1c61e889d6 | |||
fdedb75a2c | |||
f20000f1f6 | |||
7d50638b3a | |||
![]() |
106222b0e0 | ||
![]() |
c533241d1e | ||
![]() |
78326047bf | ||
![]() |
bc4ec79240 | ||
![]() |
a0db5707c4 | ||
![]() |
ecc443c3ad | ||
![]() |
9a02ca74ad | ||
![]() |
9ebb81cbd3 | ||
abbc074474 | |||
![]() |
0bc55977ac | ||
![]() |
ff3a4debce | ||
![]() |
ae85b32f69 | ||
![]() |
34a354e9e3 | ||
4f599e097f | |||
a5eaf4bb65 | |||
![]() |
e405fb527b | ||
![]() |
7f36f93d92 | ||
![]() |
f089a32394 | ||
![]() |
1fd623a660 | ||
![]() |
88012f1b8c | ||
![]() |
6e284640c0 | ||
![]() |
077cb46482 | ||
![]() |
60a13a9097 | ||
3c56fdfaea | |||
81a8bf3c58 | |||
![]() |
316375bf18 | ||
![]() |
fb820f67fd | ||
![]() |
f1d9f4e036 | ||
![]() |
ebb67eb311 | ||
![]() |
50a8c24ead | ||
![]() |
eb4b9363ab | ||
![]() |
19c5028a0c | ||
![]() |
57e1e8e6bd | ||
![]() |
385057ffcd | ||
![]() |
90699768ff | ||
![]() |
ad0ca75aa9 | ||
![]() |
39242d5e6c | ||
![]() |
24cca7f2cb | ||
![]() |
a9c7ac49d6 | ||
![]() |
f249752db5 | ||
![]() |
c0b2116da2 | ||
![]() |
59e71c8144 | ||
![]() |
e6a416383d | ||
![]() |
d55448398d |
@@ -29,7 +29,16 @@ jobs:
|
||||
if: github.ref == 'refs/heads/dev'
|
||||
uses: dokku/github-action@master
|
||||
with:
|
||||
branch: 'dev'
|
||||
branch: 'main'
|
||||
force: true
|
||||
git_remote_url: 'ssh://dokku@v2.discours.io:22/core'
|
||||
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Push to dokku for staging branch
|
||||
if: github.ref == 'refs/heads/staging'
|
||||
uses: dokku/github-action@master
|
||||
with:
|
||||
branch: 'dev'
|
||||
git_remote_url: 'ssh://dokku@staging.discours.io:22/core'
|
||||
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
git_push_flags: '--force'
|
6
.gitignore
vendored
6
.gitignore
vendored
@@ -128,6 +128,9 @@ dmypy.json
|
||||
.idea
|
||||
temp.*
|
||||
|
||||
# Debug
|
||||
DEBUG.log
|
||||
|
||||
discours.key
|
||||
discours.crt
|
||||
discours.pem
|
||||
@@ -161,4 +164,5 @@ views.json
|
||||
*.key
|
||||
*.crt
|
||||
*cache.json
|
||||
.cursor
|
||||
.cursor
|
||||
.devcontainer/
|
||||
|
28
CHANGELOG.md
28
CHANGELOG.md
@@ -1,9 +1,29 @@
|
||||
#### [0.4.19] - 2025-04-14
|
||||
- dropped `Shout.description` and `Draft.description` to be UX-generated
|
||||
- use redis to init views counters after migrator
|
||||
|
||||
#### [0.4.18] - 2025-04-10
|
||||
- Fixed `Topic.stat.authors` and `Topic.stat.comments`
|
||||
- Fixed unique constraint violation for empty slug values:
|
||||
- Modified `update_draft` resolver to handle empty slug values
|
||||
- Modified `create_draft` resolver to prevent empty slug values
|
||||
- Added validation to prevent inserting or updating drafts with empty slug
|
||||
- Fixed database error "duplicate key value violates unique constraint draft_slug_key"
|
||||
|
||||
#### [0.4.17] - 2025-03-26
|
||||
- Fixed `'Reaction' object is not subscriptable` error in hierarchical comments:
|
||||
- Modified `get_reactions_with_stat()` to convert Reaction objects to dictionaries
|
||||
- Added default values for limit/offset parameters
|
||||
- Fixed `load_first_replies()` implementation with proper parameter passing
|
||||
- Added doctest with example usage
|
||||
- Limited child comments to 100 per parent for performance
|
||||
|
||||
#### [0.4.16] - 2025-03-22
|
||||
- Added hierarchical comments pagination:
|
||||
- Created new GraphQL query `load_comments_branch` for efficient loading of hierarchical comments
|
||||
- Ability to load root comments with their first N replies
|
||||
- Added pagination for both root and child comments
|
||||
- Using existing `commented` field in `Stat` type to display number of replies
|
||||
- Using existing `comments_count` field in `Stat` type to display number of replies
|
||||
- Added special `first_replies` field to store first replies to a comment
|
||||
- Optimized SQL queries for efficient loading of comment hierarchies
|
||||
- Implemented flexible comment sorting system (by time, rating)
|
||||
@@ -41,8 +61,7 @@
|
||||
- Implemented persistent Redis caching for author queries without TTL (invalidated only on changes)
|
||||
- Optimized author retrieval with separate endpoints:
|
||||
- `get_authors_all` - returns all non-deleted authors without statistics
|
||||
- `get_authors_paginated` - returns authors with statistics and pagination support
|
||||
- `load_authors_by` - optimized to use caching and efficient sorting
|
||||
- `load_authors_by` - optimized to use caching and efficient sorting and pagination
|
||||
- Improved SQL queries with optimized JOIN conditions and efficient filtering
|
||||
- Added pre-aggregation of statistics (shouts count, followers count) in single efficient queries
|
||||
- Implemented robust cache invalidation on author updates
|
||||
@@ -54,7 +73,6 @@
|
||||
- Implemented persistent Redis caching for topic queries (no TTL, invalidated only on changes)
|
||||
- Optimized topic retrieval with separate endpoints for different use cases:
|
||||
- `get_topics_all` - returns all topics without statistics for lightweight listing
|
||||
- `get_topics_paginated` - returns topics with statistics and pagination support
|
||||
- `get_topics_by_community` - adds pagination and optimized filtering by community
|
||||
- Added SQLAlchemy-managed indexes directly in ORM models for automatic schema maintenance
|
||||
- Created `sync_indexes()` function for automatic index synchronization during app startup
|
||||
@@ -152,7 +170,7 @@
|
||||
#### [0.4.4]
|
||||
- `followers_stat` removed for shout
|
||||
- sqlite3 support added
|
||||
- `rating_stat` and `commented_stat` fixes
|
||||
- `rating_stat` and `comments_count` fixes
|
||||
|
||||
#### [0.4.3]
|
||||
- cache reimplemented
|
||||
|
1
app/resolvers/draft.py
Normal file
1
app/resolvers/draft.py
Normal file
@@ -0,0 +1 @@
|
||||
|
5
cache/cache.py
vendored
5
cache/cache.py
vendored
@@ -545,8 +545,9 @@ async def get_cached_data(key: str) -> Optional[Any]:
|
||||
try:
|
||||
cached_data = await redis.execute("GET", key)
|
||||
if cached_data:
|
||||
logger.debug(f"Данные получены из кеша по ключу {key}")
|
||||
return orjson.loads(cached_data)
|
||||
loaded = orjson.loads(cached_data)
|
||||
logger.debug(f"Данные получены из кеша по ключу {key}: {len(loaded)}")
|
||||
return loaded
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Ошибка при получении данных из кеша: {e}")
|
||||
|
@@ -45,7 +45,7 @@ query LoadCommentsBranch(
|
||||
reply_to
|
||||
stat {
|
||||
rating
|
||||
commented
|
||||
comments_count
|
||||
}
|
||||
first_replies {
|
||||
id
|
||||
@@ -61,7 +61,7 @@ query LoadCommentsBranch(
|
||||
reply_to
|
||||
stat {
|
||||
rating
|
||||
commented
|
||||
comments_count
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ query LoadCommentsBranch(
|
||||
- `reply_to`: ID родительского комментария (null для корневых)
|
||||
- `first_replies`: Первые N дочерних комментариев
|
||||
- `stat`: Статистика комментария, включающая:
|
||||
- `commented`: Количество ответов на комментарий
|
||||
- `comments_count`: Количество ответов на комментарий
|
||||
- `rating`: Рейтинг комментария
|
||||
|
||||
## Примеры использования
|
||||
@@ -150,7 +150,7 @@ const { data } = await client.query({
|
||||
1. Для эффективной работы со сложными ветками обсуждений рекомендуется:
|
||||
|
||||
- Сначала загружать только корневые комментарии с первыми N ответами
|
||||
- При наличии дополнительных ответов (когда `stat.commented > first_replies.length`)
|
||||
- При наличии дополнительных ответов (когда `stat.comments_count > first_replies.length`)
|
||||
добавить кнопку "Показать все ответы"
|
||||
- При нажатии на кнопку загружать дополнительные ответы с помощью запроса с указанным `parentId`
|
||||
|
||||
|
@@ -42,7 +42,7 @@
|
||||
- Отдельный запрос `load_comments_branch` для оптимизированной загрузки ветки комментариев
|
||||
- Возможность загрузки корневых комментариев статьи с первыми ответами на них
|
||||
- Гибкая пагинация как для корневых, так и для дочерних комментариев
|
||||
- Использование поля `stat.commented` для отображения количества ответов на комментарий
|
||||
- Использование поля `stat.comments_count` для отображения количества ответов на комментарий
|
||||
- Добавление специального поля `first_replies` для хранения первых ответов на комментарий
|
||||
- Поддержка различных методов сортировки (новые, старые, популярные)
|
||||
- Оптимизированные SQL запросы для минимизации нагрузки на базу данных
|
48
main.py
48
main.py
@@ -17,7 +17,8 @@ from cache.revalidator import revalidation_manager
|
||||
from services.exception import ExceptionHandlerMiddleware
|
||||
from services.redis import redis
|
||||
from services.schema import create_all_tables, resolvers
|
||||
from services.search import search_service
|
||||
#from services.search import search_service
|
||||
from services.search import search_service, initialize_search_index
|
||||
from services.viewed import ViewedStorage
|
||||
from services.webhook import WebhookEndpoint, create_webhook_endpoint
|
||||
from settings import DEV_SERVER_PID_FILE_NAME, MODE
|
||||
@@ -34,24 +35,67 @@ async def start():
|
||||
f.write(str(os.getpid()))
|
||||
print(f"[main] process started in {MODE} mode")
|
||||
|
||||
async def check_search_service():
|
||||
"""Check if search service is available and log result"""
|
||||
info = await search_service.info()
|
||||
if info.get("status") in ["error", "unavailable"]:
|
||||
print(f"[WARNING] Search service unavailable: {info.get('message', 'unknown reason')}")
|
||||
else:
|
||||
print(f"[INFO] Search service is available: {info}")
|
||||
|
||||
|
||||
# indexing DB data
|
||||
# async def indexing():
|
||||
# from services.db import fetch_all_shouts
|
||||
# all_shouts = await fetch_all_shouts()
|
||||
# await initialize_search_index(all_shouts)
|
||||
async def lifespan(_app):
|
||||
try:
|
||||
print("[lifespan] Starting application initialization")
|
||||
create_all_tables()
|
||||
await asyncio.gather(
|
||||
redis.connect(),
|
||||
precache_data(),
|
||||
ViewedStorage.init(),
|
||||
create_webhook_endpoint(),
|
||||
search_service.info(),
|
||||
check_search_service(),
|
||||
start(),
|
||||
revalidation_manager.start(),
|
||||
)
|
||||
print("[lifespan] Basic initialization complete")
|
||||
|
||||
# Add a delay before starting the intensive search indexing
|
||||
print("[lifespan] Waiting for system stabilization before search indexing...")
|
||||
await asyncio.sleep(10) # 10-second delay to let the system stabilize
|
||||
|
||||
# Start search indexing as a background task with lower priority
|
||||
asyncio.create_task(initialize_search_index_background())
|
||||
|
||||
yield
|
||||
finally:
|
||||
print("[lifespan] Shutting down application services")
|
||||
tasks = [redis.disconnect(), ViewedStorage.stop(), revalidation_manager.stop()]
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
print("[lifespan] Shutdown complete")
|
||||
|
||||
# Initialize search index in the background
|
||||
async def initialize_search_index_background():
|
||||
"""Run search indexing as a background task with low priority"""
|
||||
try:
|
||||
print("[search] Starting background search indexing process")
|
||||
from services.db import fetch_all_shouts
|
||||
|
||||
# Get total count first (optional)
|
||||
all_shouts = await fetch_all_shouts()
|
||||
total_count = len(all_shouts) if all_shouts else 0
|
||||
print(f"[search] Fetched {total_count} shouts for background indexing")
|
||||
|
||||
# Start the indexing process with the fetched shouts
|
||||
print("[search] Beginning background search index initialization...")
|
||||
await initialize_search_index(all_shouts)
|
||||
print("[search] Background search index initialization complete")
|
||||
except Exception as e:
|
||||
print(f"[search] Error in background search indexing: {str(e)}")
|
||||
|
||||
# Создаем экземпляр GraphQL
|
||||
graphql_app = GraphQL(schema, debug=True)
|
||||
|
@@ -31,6 +31,7 @@ class Draft(Base):
|
||||
# required
|
||||
created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time()))
|
||||
created_by: int = Column(ForeignKey("author.id"), nullable=False)
|
||||
community: int = Column(ForeignKey("community.id"), nullable=False, default=1)
|
||||
|
||||
# optional
|
||||
layout: str = Column(String, nullable=True, default="article")
|
||||
@@ -38,7 +39,6 @@ class Draft(Base):
|
||||
title: str = Column(String, nullable=True)
|
||||
subtitle: str | None = Column(String, nullable=True)
|
||||
lead: str | None = Column(String, nullable=True)
|
||||
description: str | None = Column(String, nullable=True)
|
||||
body: str = Column(String, nullable=False, comment="Body")
|
||||
media: dict | None = Column(JSON, nullable=True)
|
||||
cover: str | None = Column(String, nullable=True, comment="Cover image url")
|
||||
|
29
orm/shout.py
29
orm/shout.py
@@ -71,6 +71,34 @@ class ShoutAuthor(Base):
|
||||
class Shout(Base):
|
||||
"""
|
||||
Публикация в системе.
|
||||
|
||||
Attributes:
|
||||
body (str)
|
||||
slug (str)
|
||||
cover (str) : "Cover image url"
|
||||
cover_caption (str) : "Cover image alt caption"
|
||||
lead (str)
|
||||
title (str)
|
||||
subtitle (str)
|
||||
layout (str)
|
||||
media (dict)
|
||||
authors (list[Author])
|
||||
topics (list[Topic])
|
||||
reactions (list[Reaction])
|
||||
lang (str)
|
||||
version_of (int)
|
||||
oid (str)
|
||||
seo (str) : JSON
|
||||
draft (int)
|
||||
created_at (int)
|
||||
updated_at (int)
|
||||
published_at (int)
|
||||
featured_at (int)
|
||||
deleted_at (int)
|
||||
created_by (int)
|
||||
updated_by (int)
|
||||
deleted_by (int)
|
||||
community (int)
|
||||
"""
|
||||
|
||||
__tablename__ = "shout"
|
||||
@@ -91,7 +119,6 @@ class Shout(Base):
|
||||
cover: str | None = Column(String, nullable=True, comment="Cover image url")
|
||||
cover_caption: str | None = Column(String, nullable=True, comment="Cover image alt caption")
|
||||
lead: str | None = Column(String, nullable=True)
|
||||
description: str | None = Column(String, nullable=True)
|
||||
title: str = Column(String, nullable=False)
|
||||
subtitle: str | None = Column(String, nullable=True)
|
||||
layout: str = Column(String, nullable=False, default="article")
|
||||
|
@@ -13,5 +13,10 @@ starlette
|
||||
gql
|
||||
ariadne
|
||||
granian
|
||||
|
||||
# NLP and search
|
||||
httpx
|
||||
|
||||
orjson
|
||||
pydantic
|
||||
pydantic
|
||||
trafilatura
|
@@ -8,6 +8,7 @@ from resolvers.author import ( # search_authors,
|
||||
get_author_id,
|
||||
get_authors_all,
|
||||
load_authors_by,
|
||||
load_authors_search,
|
||||
update_author,
|
||||
)
|
||||
from resolvers.community import get_communities_all, get_community
|
||||
@@ -71,6 +72,7 @@ __all__ = [
|
||||
"get_author_follows_authors",
|
||||
"get_authors_all",
|
||||
"load_authors_by",
|
||||
"load_authors_search",
|
||||
"update_author",
|
||||
## "search_authors",
|
||||
# community
|
||||
|
@@ -20,6 +20,7 @@ from services.auth import login_required
|
||||
from services.db import local_session
|
||||
from services.redis import redis
|
||||
from services.schema import mutation, query
|
||||
from services.search import search_service
|
||||
from utils.logger import root_logger as logger
|
||||
|
||||
DEFAULT_COMMUNITIES = [1]
|
||||
@@ -232,22 +233,6 @@ async def get_authors_all(_, _info):
|
||||
return await get_all_authors()
|
||||
|
||||
|
||||
@query.field("get_authors_paginated")
|
||||
async def get_authors_paginated(_, _info, limit=50, offset=0, by=None):
|
||||
"""
|
||||
Получает список авторов с пагинацией и статистикой.
|
||||
|
||||
Args:
|
||||
limit: Максимальное количество возвращаемых авторов
|
||||
offset: Смещение для пагинации
|
||||
by: Параметр сортировки (new/active)
|
||||
|
||||
Returns:
|
||||
list: Список авторов с их статистикой
|
||||
"""
|
||||
return await get_authors_with_stats(limit, offset, by)
|
||||
|
||||
|
||||
@query.field("get_author")
|
||||
async def get_author(_, _info, slug="", author_id=0):
|
||||
author_dict = None
|
||||
@@ -317,6 +302,46 @@ async def load_authors_by(_, _info, by, limit, offset):
|
||||
return await get_authors_with_stats(limit, offset, by)
|
||||
|
||||
|
||||
@query.field("load_authors_search")
|
||||
async def load_authors_search(_, info, text: str, limit: int = 10, offset: int = 0):
|
||||
"""
|
||||
Resolver for searching authors by text. Works with txt-ai search endpony.
|
||||
Args:
|
||||
text: Search text
|
||||
limit: Maximum number of authors to return
|
||||
offset: Offset for pagination
|
||||
Returns:
|
||||
list: List of authors matching the search criteria
|
||||
"""
|
||||
|
||||
# Get author IDs from search engine (already sorted by relevance)
|
||||
search_results = await search_service.search_authors(text, limit, offset)
|
||||
|
||||
if not search_results:
|
||||
return []
|
||||
|
||||
author_ids = [result.get("id") for result in search_results if result.get("id")]
|
||||
if not author_ids:
|
||||
return []
|
||||
|
||||
# Fetch full author objects from DB
|
||||
with local_session() as session:
|
||||
# Simple query to get authors by IDs - no need for stats here
|
||||
authors_query = select(Author).filter(Author.id.in_(author_ids))
|
||||
db_authors = session.execute(authors_query).scalars().all()
|
||||
|
||||
if not db_authors:
|
||||
return []
|
||||
|
||||
# Create a dictionary for quick lookup
|
||||
authors_dict = {str(author.id): author for author in db_authors}
|
||||
|
||||
# Keep the order from search results (maintains the relevance sorting)
|
||||
ordered_authors = [authors_dict[author_id] for author_id in author_ids if author_id in authors_dict]
|
||||
|
||||
return ordered_authors
|
||||
|
||||
|
||||
def get_author_id_from(slug="", user=None, author_id=None):
|
||||
try:
|
||||
author_id = None
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import time
|
||||
from operator import or_
|
||||
|
||||
import trafilatura
|
||||
from sqlalchemy.sql import and_
|
||||
|
||||
from cache.cache import (
|
||||
@@ -30,7 +31,6 @@ def create_shout_from_draft(session, draft, author_id):
|
||||
cover=draft.cover,
|
||||
cover_caption=draft.cover_caption,
|
||||
lead=draft.lead,
|
||||
description=draft.description,
|
||||
title=draft.title,
|
||||
subtitle=draft.subtitle,
|
||||
layout=draft.layout,
|
||||
@@ -105,12 +105,17 @@ async def create_draft(_, info, draft_input):
|
||||
if "title" not in draft_input or not draft_input["title"]:
|
||||
draft_input["title"] = "" # Пустая строка вместо NULL
|
||||
|
||||
# Проверяем slug - он должен быть или не пустым, или не передаваться вообще
|
||||
if "slug" in draft_input and (draft_input["slug"] is None or draft_input["slug"] == ""):
|
||||
# При создании черновика удаляем пустой slug из входных данных
|
||||
del draft_input["slug"]
|
||||
|
||||
try:
|
||||
with local_session() as session:
|
||||
# Remove id from input if present since it's auto-generated
|
||||
if "id" in draft_input:
|
||||
del draft_input["id"]
|
||||
|
||||
|
||||
# Добавляем текущее время создания
|
||||
draft_input["created_at"] = int(time.time())
|
||||
|
||||
@@ -122,6 +127,11 @@ async def create_draft(_, info, draft_input):
|
||||
logger.error(f"Failed to create draft: {e}", exc_info=True)
|
||||
return {"error": f"Failed to create draft: {str(e)}"}
|
||||
|
||||
def generate_teaser(body, limit=300):
|
||||
body_text = trafilatura.extract(body, include_comments=False, include_tables=False)
|
||||
body_teaser = ". ".join(body_text[:limit].split(". ")[:-1])
|
||||
return body_teaser
|
||||
|
||||
|
||||
@mutation.field("update_draft")
|
||||
@login_required
|
||||
@@ -142,14 +152,65 @@ async def update_draft(_, info, draft_id: int, draft_input):
|
||||
if not user_id or not author_id:
|
||||
return {"error": "Author ID are required"}
|
||||
|
||||
# Проверяем slug - он должен быть или не пустым, или не передаваться вообще
|
||||
if "slug" in draft_input and (draft_input["slug"] is None or draft_input["slug"] == ""):
|
||||
# Если slug пустой, либо удаляем его из входных данных, либо генерируем временный уникальный
|
||||
# Вариант 1: просто удаляем ключ из входных данных, чтобы оставить старое значение
|
||||
del draft_input["slug"]
|
||||
# Вариант 2 (если нужно обновить): генерируем временный уникальный slug
|
||||
# import uuid
|
||||
# draft_input["slug"] = f"draft-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
with local_session() as session:
|
||||
draft = session.query(Draft).filter(Draft.id == draft_id).first()
|
||||
if not draft:
|
||||
return {"error": "Draft not found"}
|
||||
|
||||
# Generate SEO description if not provided and not already set
|
||||
if "seo" not in draft_input and not draft.seo:
|
||||
body_src = draft_input.get("body") if "body" in draft_input else draft.body
|
||||
lead_src = draft_input.get("lead") if "lead" in draft_input else draft.lead
|
||||
|
||||
body_text = None
|
||||
if body_src:
|
||||
try:
|
||||
# Extract text, excluding comments and tables
|
||||
body_text = trafilatura.extract(body_src, include_comments=False, include_tables=False)
|
||||
except Exception as e:
|
||||
logger.warning(f"Trafilatura failed to extract body text for draft {draft_id}: {e}")
|
||||
|
||||
lead_text = None
|
||||
if lead_src:
|
||||
try:
|
||||
# Extract text from lead
|
||||
lead_text = trafilatura.extract(lead_src, include_comments=False, include_tables=False)
|
||||
except Exception as e:
|
||||
logger.warning(f"Trafilatura failed to extract lead text for draft {draft_id}: {e}")
|
||||
|
||||
# Generate body teaser only if body_text was successfully extracted
|
||||
body_teaser = generate_teaser(body_text, 300) if body_text else ""
|
||||
|
||||
# Prioritize lead_text for SEO, fallback to body_teaser. Ensure it's a string.
|
||||
generated_seo = lead_text if lead_text else body_teaser
|
||||
draft_input["seo"] = generated_seo if generated_seo else ""
|
||||
|
||||
# Update the draft object with new data from draft_input
|
||||
# Assuming Draft.update is a helper that iterates keys or similar.
|
||||
# A more standard SQLAlchemy approach would be:
|
||||
# for key, value in draft_input.items():
|
||||
# if hasattr(draft, key):
|
||||
# setattr(draft, key, value)
|
||||
# But we stick to the existing pattern for now.
|
||||
Draft.update(draft, draft_input)
|
||||
draft.updated_at = int(time.time())
|
||||
|
||||
# Set updated timestamp and author
|
||||
current_time = int(time.time())
|
||||
draft.updated_at = current_time
|
||||
draft.updated_by = author_id # Assuming author_id is correctly fetched context
|
||||
|
||||
session.commit()
|
||||
# Invalidate cache related to this draft if necessary (consider adding)
|
||||
# await invalidate_draft_cache(draft_id)
|
||||
return {"draft": draft}
|
||||
|
||||
|
||||
@@ -249,7 +310,6 @@ async def publish_shout(_, info, shout_id: int):
|
||||
shout.cover = draft.cover
|
||||
shout.cover_caption = draft.cover_caption
|
||||
shout.lead = draft.lead
|
||||
shout.description = draft.description
|
||||
shout.layout = draft.layout
|
||||
shout.media = draft.media
|
||||
shout.lang = draft.lang
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import time
|
||||
|
||||
import orjson
|
||||
import trafilatura
|
||||
from sqlalchemy import and_, desc, select
|
||||
from sqlalchemy.orm import joinedload
|
||||
from sqlalchemy.sql.functions import coalesce
|
||||
@@ -176,9 +177,16 @@ async def create_shout(_, info, inp):
|
||||
|
||||
logger.info(f"Creating shout with input: {inp}")
|
||||
# Создаем публикацию без topics
|
||||
body = inp.get("body", "")
|
||||
lead = inp.get("lead", "")
|
||||
body_text = trafilatura.extract(body)
|
||||
lead_text = trafilatura.extract(lead)
|
||||
seo = inp.get("seo", lead_text or body_text[:300].split(". ")[:-1].join(". "))
|
||||
new_shout = Shout(
|
||||
slug=slug,
|
||||
body=inp.get("body", ""),
|
||||
body=body,
|
||||
seo=seo,
|
||||
lead=lead,
|
||||
layout=inp.get("layout", "article"),
|
||||
title=inp.get("title", ""),
|
||||
created_by=author_id,
|
||||
@@ -380,7 +388,7 @@ def patch_topics(session, shout, topics_input):
|
||||
# @login_required
|
||||
async def update_shout(_, info, shout_id: int, shout_input=None, publish=False):
|
||||
logger.info(f"Starting update_shout with id={shout_id}, publish={publish}")
|
||||
logger.debug(f"Full shout_input: {shout_input}")
|
||||
logger.debug(f"Full shout_input: {shout_input}") # DraftInput
|
||||
|
||||
user_id = info.context.get("user_id")
|
||||
roles = info.context.get("roles", [])
|
||||
|
@@ -67,30 +67,35 @@ def add_reaction_stat_columns(q):
|
||||
return q
|
||||
|
||||
|
||||
def get_reactions_with_stat(q, limit, offset):
|
||||
def get_reactions_with_stat(q, limit=10, offset=0):
|
||||
"""
|
||||
Execute the reaction query and retrieve reactions with statistics.
|
||||
|
||||
:param q: Query with reactions and statistics.
|
||||
:param limit: Number of reactions to load.
|
||||
:param offset: Pagination offset.
|
||||
:return: List of reactions.
|
||||
:return: List of reactions as dictionaries.
|
||||
|
||||
>>> get_reactions_with_stat(q, 10, 0) # doctest: +SKIP
|
||||
[{'id': 1, 'body': 'Текст комментария', 'stat': {'rating': 5, 'comments_count': 3}, ...}]
|
||||
"""
|
||||
q = q.limit(limit).offset(offset)
|
||||
reactions = []
|
||||
|
||||
with local_session() as session:
|
||||
result_rows = session.execute(q)
|
||||
for reaction, author, shout, commented_stat, rating_stat in result_rows:
|
||||
for reaction, author, shout, comments_count, rating_stat in result_rows:
|
||||
# Пропускаем реакции с отсутствующими shout или author
|
||||
if not shout or not author:
|
||||
logger.error(f"Пропущена реакция из-за отсутствия shout или author: {reaction.dict()}")
|
||||
continue
|
||||
|
||||
reaction.created_by = author.dict()
|
||||
reaction.shout = shout.dict()
|
||||
reaction.stat = {"rating": rating_stat, "comments": commented_stat}
|
||||
reactions.append(reaction)
|
||||
# Преобразуем Reaction в словарь для доступа по ключу
|
||||
reaction_dict = reaction.dict()
|
||||
reaction_dict["created_by"] = author.dict()
|
||||
reaction_dict["shout"] = shout.dict()
|
||||
reaction_dict["stat"] = {"rating": rating_stat, "comments_count": comments_count}
|
||||
reactions.append(reaction_dict)
|
||||
|
||||
return reactions
|
||||
|
||||
@@ -393,7 +398,7 @@ async def update_reaction(_, info, reaction):
|
||||
|
||||
result = session.execute(reaction_query).unique().first()
|
||||
if result:
|
||||
r, author, _shout, commented_stat, rating_stat = result
|
||||
r, author, _shout, comments_count, rating_stat = result
|
||||
if not r or not author:
|
||||
return {"error": "Invalid reaction ID or unauthorized"}
|
||||
|
||||
@@ -408,7 +413,7 @@ async def update_reaction(_, info, reaction):
|
||||
session.commit()
|
||||
|
||||
r.stat = {
|
||||
"commented": commented_stat,
|
||||
"comments_count": comments_count,
|
||||
"rating": rating_stat,
|
||||
}
|
||||
|
||||
@@ -713,7 +718,7 @@ async def load_comments_branch(
|
||||
|
||||
async def load_replies_count(comments):
|
||||
"""
|
||||
Загружает количество ответов для списка комментариев и обновляет поле stat.commented.
|
||||
Загружает количество ответов для списка комментариев и обновляет поле stat.comments_count.
|
||||
|
||||
:param comments: Список комментариев, для которых нужно загрузить количество ответов.
|
||||
"""
|
||||
@@ -748,7 +753,7 @@ async def load_replies_count(comments):
|
||||
comment["stat"] = {}
|
||||
|
||||
# Обновляем счетчик комментариев в stat
|
||||
comment["stat"]["commented"] = replies_count.get(comment["id"], 0)
|
||||
comment["stat"]["comments_count"] = replies_count.get(comment["id"], 0)
|
||||
|
||||
|
||||
async def load_first_replies(comments, limit, offset, sort="newest"):
|
||||
@@ -793,8 +798,9 @@ async def load_first_replies(comments, limit, offset, sort="newest"):
|
||||
|
||||
q = q.order_by(order_by_stmt, Reaction.reply_to)
|
||||
|
||||
# Выполняем запрос
|
||||
replies = get_reactions_with_stat(q)
|
||||
# Выполняем запрос - указываем limit для неограниченного количества ответов
|
||||
# но не более 100 на родительский комментарий
|
||||
replies = get_reactions_with_stat(q, limit=100, offset=0)
|
||||
|
||||
# Группируем ответы по родительским ID
|
||||
replies_by_parent = {}
|
||||
|
@@ -10,7 +10,7 @@ from orm.shout import Shout, ShoutAuthor, ShoutTopic
|
||||
from orm.topic import Topic
|
||||
from services.db import json_array_builder, json_builder, local_session
|
||||
from services.schema import query
|
||||
from services.search import search_text
|
||||
from services.search import search_text, get_search_count
|
||||
from services.viewed import ViewedStorage
|
||||
from utils.logger import root_logger as logger
|
||||
|
||||
@@ -187,12 +187,10 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
"""
|
||||
shouts = []
|
||||
try:
|
||||
# logger.info(f"Starting get_shouts_with_links with limit={limit}, offset={offset}")
|
||||
q = q.limit(limit).offset(offset)
|
||||
|
||||
with local_session() as session:
|
||||
shouts_result = session.execute(q).all()
|
||||
# logger.info(f"Got {len(shouts_result) if shouts_result else 0} shouts from query")
|
||||
|
||||
if not shouts_result:
|
||||
logger.warning("No shouts found in query result")
|
||||
@@ -203,7 +201,6 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
shout = None
|
||||
if hasattr(row, "Shout"):
|
||||
shout = row.Shout
|
||||
# logger.debug(f"Processing shout#{shout.id} at index {idx}")
|
||||
if shout:
|
||||
shout_id = int(f"{shout.id}")
|
||||
shout_dict = shout.dict()
|
||||
@@ -225,26 +222,22 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
elif isinstance(row.stat, dict):
|
||||
stat = row.stat
|
||||
viewed = ViewedStorage.get_shout(shout_id=shout_id) or 0
|
||||
shout_dict["stat"] = {**stat, "viewed": viewed, "commented": stat.get("comments_count", 0)}
|
||||
shout_dict["stat"] = {**stat, "viewed": viewed}
|
||||
|
||||
# Обработка main_topic и topics
|
||||
topics = None
|
||||
if has_field(info, "topics") and hasattr(row, "topics"):
|
||||
topics = orjson.loads(row.topics) if isinstance(row.topics, str) else row.topics
|
||||
# logger.debug(f"Shout#{shout_id} topics: {topics}")
|
||||
shout_dict["topics"] = topics
|
||||
|
||||
if has_field(info, "main_topic"):
|
||||
main_topic = None
|
||||
if hasattr(row, "main_topic"):
|
||||
# logger.debug(f"Raw main_topic for shout#{shout_id}: {row.main_topic}")
|
||||
main_topic = (
|
||||
orjson.loads(row.main_topic) if isinstance(row.main_topic, str) else row.main_topic
|
||||
)
|
||||
# logger.debug(f"Parsed main_topic for shout#{shout_id}: {main_topic}")
|
||||
|
||||
if not main_topic and topics and len(topics) > 0:
|
||||
# logger.info(f"No main_topic found for shout#{shout_id}, using first topic from list")
|
||||
main_topic = {
|
||||
"id": topics[0]["id"],
|
||||
"title": topics[0]["title"],
|
||||
@@ -252,10 +245,8 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
"is_main": True,
|
||||
}
|
||||
elif not main_topic:
|
||||
logger.warning(f"No main_topic and no topics found for shout#{shout_id}")
|
||||
main_topic = {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
|
||||
shout_dict["main_topic"] = main_topic
|
||||
# logger.debug(f"Final main_topic for shout#{shout_id}: {main_topic}")
|
||||
|
||||
if has_field(info, "authors") and hasattr(row, "authors"):
|
||||
shout_dict["authors"] = (
|
||||
@@ -282,7 +273,6 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
||||
logger.error(f"Fatal error in get_shouts_with_links: {e}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
logger.info(f"Returning {len(shouts)} shouts from get_shouts_with_links")
|
||||
return shouts
|
||||
|
||||
|
||||
@@ -401,33 +391,49 @@ async def load_shouts_search(_, info, text, options):
|
||||
"""
|
||||
limit = options.get("limit", 10)
|
||||
offset = options.get("offset", 0)
|
||||
|
||||
if isinstance(text, str) and len(text) > 2:
|
||||
# Get search results with pagination
|
||||
results = await search_text(text, limit, offset)
|
||||
scores = {}
|
||||
hits_ids = []
|
||||
for sr in results:
|
||||
shout_id = sr.get("id")
|
||||
if shout_id:
|
||||
shout_id = str(shout_id)
|
||||
scores[shout_id] = sr.get("score")
|
||||
hits_ids.append(shout_id)
|
||||
|
||||
if not results:
|
||||
logger.info(f"No search results found for '{text}'")
|
||||
return []
|
||||
|
||||
# Extract IDs in the order from the search engine
|
||||
hits_ids = [str(sr.get("id")) for sr in results if sr.get("id")]
|
||||
|
||||
q = (
|
||||
query_with_stat(info)
|
||||
if has_field(info, "stat")
|
||||
else select(Shout).filter(and_(Shout.published_at.is_not(None), Shout.deleted_at.is_(None)))
|
||||
)
|
||||
# Query DB for only the IDs in the current page
|
||||
q = query_with_stat(info)
|
||||
q = q.filter(Shout.id.in_(hits_ids))
|
||||
q = apply_filters(q, options)
|
||||
q = apply_sorting(q, options)
|
||||
shouts = get_shouts_with_links(info, q, limit, offset)
|
||||
for shout in shouts:
|
||||
shout.score = scores[f"{shout.id}"]
|
||||
shouts.sort(key=lambda x: x.score, reverse=True)
|
||||
return shouts
|
||||
q = apply_filters(q, options.get("filters", {}))
|
||||
|
||||
shouts = get_shouts_with_links(info, q, len(hits_ids), 0)
|
||||
|
||||
# Reorder shouts to match the order from hits_ids
|
||||
shouts_dict = {str(shout['id']): shout for shout in shouts}
|
||||
ordered_shouts = [shouts_dict[shout_id] for shout_id in hits_ids if shout_id in shouts_dict]
|
||||
|
||||
return ordered_shouts
|
||||
return []
|
||||
|
||||
|
||||
@query.field("get_search_results_count")
|
||||
async def get_search_results_count(_, info, text):
|
||||
"""
|
||||
Returns the total count of search results for a search query.
|
||||
|
||||
:param _: Root query object (unused)
|
||||
:param info: GraphQL context information
|
||||
:param text: Search query text
|
||||
:return: Total count of results
|
||||
"""
|
||||
if isinstance(text, str) and len(text) > 2:
|
||||
count = await get_search_count(text)
|
||||
return {"count": count}
|
||||
return {"count": 0}
|
||||
|
||||
|
||||
@query.field("load_shouts_unrated")
|
||||
async def load_shouts_unrated(_, info, options):
|
||||
"""
|
||||
|
@@ -127,6 +127,28 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
|
||||
"""
|
||||
followers_stats = {row[0]: row[1] for row in session.execute(text(followers_stats_query))}
|
||||
|
||||
# Запрос на получение статистики авторов для выбранных тем
|
||||
authors_stats_query = f"""
|
||||
SELECT st.topic, COUNT(DISTINCT sa.author) as authors_count
|
||||
FROM shout_topic st
|
||||
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
|
||||
JOIN shout_author sa ON sa.shout = s.id
|
||||
WHERE st.topic IN ({",".join(map(str, topic_ids))})
|
||||
GROUP BY st.topic
|
||||
"""
|
||||
authors_stats = {row[0]: row[1] for row in session.execute(text(authors_stats_query))}
|
||||
|
||||
# Запрос на получение статистики комментариев для выбранных тем
|
||||
comments_stats_query = f"""
|
||||
SELECT st.topic, COUNT(DISTINCT r.id) as comments_count
|
||||
FROM shout_topic st
|
||||
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
|
||||
JOIN reaction r ON r.shout = s.id
|
||||
WHERE st.topic IN ({",".join(map(str, topic_ids))})
|
||||
GROUP BY st.topic
|
||||
"""
|
||||
comments_stats = {row[0]: row[1] for row in session.execute(text(comments_stats_query))}
|
||||
|
||||
# Формируем результат с добавлением статистики
|
||||
result = []
|
||||
for topic in topics:
|
||||
@@ -134,6 +156,8 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
|
||||
topic_dict["stat"] = {
|
||||
"shouts": shouts_stats.get(topic.id, 0),
|
||||
"followers": followers_stats.get(topic.id, 0),
|
||||
"authors": authors_stats.get(topic.id, 0),
|
||||
"comments": comments_stats.get(topic.id, 0),
|
||||
}
|
||||
result.append(topic_dict)
|
||||
|
||||
@@ -202,23 +226,6 @@ async def get_topics_all(_, _info):
|
||||
return await get_all_topics()
|
||||
|
||||
|
||||
# Запрос на получение тем с пагинацией и статистикой
|
||||
@query.field("get_topics_paginated")
|
||||
async def get_topics_paginated(_, _info, limit=100, offset=0, by=None):
|
||||
"""
|
||||
Получает список тем с пагинацией и статистикой.
|
||||
|
||||
Args:
|
||||
limit: Максимальное количество возвращаемых тем
|
||||
offset: Смещение для пагинации
|
||||
by: Опциональные параметры сортировки
|
||||
|
||||
Returns:
|
||||
list: Список тем с их статистикой
|
||||
"""
|
||||
return await get_topics_with_stats(limit, offset, None, by)
|
||||
|
||||
|
||||
# Запрос на получение тем по сообществу
|
||||
@query.field("get_topics_by_community")
|
||||
async def get_topics_by_community(_, _info, community_id: int, limit=100, offset=0, by=None):
|
||||
|
@@ -33,7 +33,6 @@ input DraftInput {
|
||||
main_topic_id: Int # Changed from main_topic: Topic
|
||||
media: [MediaItemInput] # Changed to use MediaItemInput
|
||||
lead: String
|
||||
description: String
|
||||
subtitle: String
|
||||
lang: String
|
||||
seo: String
|
||||
|
@@ -4,7 +4,7 @@ type Query {
|
||||
get_author_id(user: String!): Author
|
||||
get_authors_all: [Author]
|
||||
load_authors_by(by: AuthorsBy!, limit: Int, offset: Int): [Author]
|
||||
# search_authors(what: String!): [Author]
|
||||
load_authors_search(text: String!, limit: Int, offset: Int): [Author!] # Search for authors by name or bio
|
||||
|
||||
# community
|
||||
get_community: Community
|
||||
@@ -33,6 +33,7 @@ type Query {
|
||||
get_shout(slug: String, shout_id: Int): Shout
|
||||
load_shouts_by(options: LoadShoutsOptions): [Shout]
|
||||
load_shouts_search(text: String!, options: LoadShoutsOptions): [SearchResult]
|
||||
get_search_results_count(text: String!): CountResult!
|
||||
load_shouts_bookmarked(options: LoadShoutsOptions): [Shout]
|
||||
|
||||
# rating
|
||||
@@ -60,7 +61,7 @@ type Query {
|
||||
get_topic(slug: String!): Topic
|
||||
get_topics_all: [Topic]
|
||||
get_topics_by_author(slug: String, user: String, author_id: Int): [Topic]
|
||||
get_topics_by_community(slug: String, community_id: Int): [Topic]
|
||||
get_topics_by_community(community_id: Int!, limit: Int, offset: Int): [Topic]
|
||||
|
||||
# notifier
|
||||
load_notifications(after: Int!, limit: Int, offset: Int): NotificationsResult!
|
||||
|
@@ -80,7 +80,6 @@ type Shout {
|
||||
layout: String!
|
||||
|
||||
lead: String
|
||||
description: String
|
||||
subtitle: String
|
||||
lang: String
|
||||
cover: String
|
||||
@@ -100,6 +99,7 @@ type Shout {
|
||||
featured_at: Int
|
||||
deleted_at: Int
|
||||
|
||||
seo: String # generated if not set
|
||||
version_of: Shout # TODO: use version_of somewhere
|
||||
draft: Draft
|
||||
media: [MediaItem]
|
||||
@@ -111,13 +111,12 @@ type Draft {
|
||||
id: Int!
|
||||
created_at: Int!
|
||||
created_by: Author!
|
||||
|
||||
community: Community!
|
||||
layout: String
|
||||
slug: String
|
||||
title: String
|
||||
subtitle: String
|
||||
lead: String
|
||||
description: String
|
||||
body: String
|
||||
media: [MediaItem]
|
||||
cover: String
|
||||
@@ -137,7 +136,7 @@ type Draft {
|
||||
|
||||
type Stat {
|
||||
rating: Int
|
||||
commented: Int
|
||||
comments_count: Int
|
||||
viewed: Int
|
||||
last_commented_at: Int
|
||||
}
|
||||
@@ -208,6 +207,7 @@ type CommonResult {
|
||||
}
|
||||
|
||||
type SearchResult {
|
||||
id: Int!
|
||||
slug: String!
|
||||
title: String!
|
||||
cover: String
|
||||
@@ -275,3 +275,7 @@ type MyRateComment {
|
||||
my_rate: ReactionKind
|
||||
}
|
||||
|
||||
type CountResult {
|
||||
count: Int!
|
||||
}
|
||||
|
||||
|
@@ -19,7 +19,7 @@ from sqlalchemy import (
|
||||
inspect,
|
||||
text,
|
||||
)
|
||||
from sqlalchemy.orm import Session, configure_mappers, declarative_base
|
||||
from sqlalchemy.orm import Session, configure_mappers, declarative_base, joinedload
|
||||
from sqlalchemy.sql.schema import Table
|
||||
|
||||
from settings import DB_URL
|
||||
@@ -259,3 +259,32 @@ def get_json_builder():
|
||||
|
||||
# Используем их в коде
|
||||
json_builder, json_array_builder, json_cast = get_json_builder()
|
||||
|
||||
# Fetch all shouts, with authors preloaded
|
||||
# This function is used for search indexing
|
||||
|
||||
async def fetch_all_shouts(session=None):
|
||||
"""Fetch all published shouts for search indexing with authors preloaded"""
|
||||
from orm.shout import Shout
|
||||
|
||||
close_session = False
|
||||
if session is None:
|
||||
session = local_session()
|
||||
close_session = True
|
||||
|
||||
try:
|
||||
# Fetch only published and non-deleted shouts with authors preloaded
|
||||
query = session.query(Shout).options(
|
||||
joinedload(Shout.authors)
|
||||
).filter(
|
||||
Shout.published_at.is_not(None),
|
||||
Shout.deleted_at.is_(None)
|
||||
)
|
||||
shouts = query.all()
|
||||
return shouts
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching shouts for search indexing: {e}")
|
||||
return []
|
||||
finally:
|
||||
if close_session:
|
||||
session.close()
|
1080
services/search.py
1080
services/search.py
File diff suppressed because it is too large
Load Diff
@@ -2,9 +2,7 @@ import asyncio
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Dict
|
||||
|
||||
import orjson
|
||||
from typing import Dict, Optional
|
||||
|
||||
# ga
|
||||
from google.analytics.data_v1beta import BetaAnalyticsDataClient
|
||||
@@ -20,33 +18,39 @@ from orm.author import Author
|
||||
from orm.shout import Shout, ShoutAuthor, ShoutTopic
|
||||
from orm.topic import Topic
|
||||
from services.db import local_session
|
||||
from services.redis import redis
|
||||
from utils.logger import root_logger as logger
|
||||
|
||||
GOOGLE_KEYFILE_PATH = os.environ.get("GOOGLE_KEYFILE_PATH", "/dump/google-service.json")
|
||||
GOOGLE_PROPERTY_ID = os.environ.get("GOOGLE_PROPERTY_ID", "")
|
||||
VIEWS_FILEPATH = "/dump/views.json"
|
||||
|
||||
|
||||
class ViewedStorage:
|
||||
"""
|
||||
Класс для хранения и доступа к данным о просмотрах.
|
||||
Использует Redis в качестве основного хранилища и Google Analytics для сбора новых данных.
|
||||
"""
|
||||
|
||||
lock = asyncio.Lock()
|
||||
precounted_by_slug = {}
|
||||
views_by_shout = {}
|
||||
shouts_by_topic = {}
|
||||
shouts_by_author = {}
|
||||
views = None
|
||||
period = 60 * 60 # каждый час
|
||||
analytics_client: BetaAnalyticsDataClient | None = None
|
||||
analytics_client: Optional[BetaAnalyticsDataClient] = None
|
||||
auth_result = None
|
||||
running = False
|
||||
redis_views_key = None
|
||||
last_update_timestamp = 0
|
||||
start_date = datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
@staticmethod
|
||||
async def init():
|
||||
"""Подключение к клиенту Google Analytics с использованием аутентификации"""
|
||||
"""Подключение к клиенту Google Analytics и загрузка данных о просмотрах из Redis"""
|
||||
self = ViewedStorage
|
||||
async with self.lock:
|
||||
# Загрузка предварительно подсчитанных просмотров из файла JSON
|
||||
self.load_precounted_views()
|
||||
# Загрузка предварительно подсчитанных просмотров из Redis
|
||||
await self.load_views_from_redis()
|
||||
|
||||
os.environ.setdefault("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_KEYFILE_PATH)
|
||||
if GOOGLE_KEYFILE_PATH and os.path.isfile(GOOGLE_KEYFILE_PATH):
|
||||
@@ -62,40 +66,54 @@ class ViewedStorage:
|
||||
self.running = False
|
||||
|
||||
@staticmethod
|
||||
def load_precounted_views():
|
||||
"""Загрузка предварительно подсчитанных просмотров из файла JSON"""
|
||||
async def load_views_from_redis():
|
||||
"""Загрузка предварительно подсчитанных просмотров из Redis"""
|
||||
self = ViewedStorage
|
||||
viewfile_path = VIEWS_FILEPATH
|
||||
if not os.path.exists(viewfile_path):
|
||||
viewfile_path = os.path.join(os.path.curdir, "views.json")
|
||||
if not os.path.exists(viewfile_path):
|
||||
logger.warning(" * views.json not found")
|
||||
return
|
||||
|
||||
logger.info(f" * loading views from {viewfile_path}")
|
||||
try:
|
||||
start_date_int = os.path.getmtime(viewfile_path)
|
||||
start_date_str = datetime.fromtimestamp(start_date_int).strftime("%Y-%m-%d")
|
||||
self.start_date = start_date_str
|
||||
# Подключаемся к Redis если соединение не установлено
|
||||
if not redis._client:
|
||||
await redis.connect()
|
||||
|
||||
# Получаем список всех ключей migrated_views_* и находим самый последний
|
||||
keys = await redis.execute("KEYS", "migrated_views_*")
|
||||
if not keys:
|
||||
logger.warning(" * No migrated_views keys found in Redis")
|
||||
return
|
||||
|
||||
# Фильтруем только ключи timestamp формата (исключаем migrated_views_slugs)
|
||||
timestamp_keys = [k for k in keys if k != "migrated_views_slugs"]
|
||||
if not timestamp_keys:
|
||||
logger.warning(" * No migrated_views timestamp keys found in Redis")
|
||||
return
|
||||
|
||||
# Сортируем по времени создания (в названии ключа) и берем последний
|
||||
timestamp_keys.sort()
|
||||
latest_key = timestamp_keys[-1]
|
||||
self.redis_views_key = latest_key
|
||||
|
||||
# Получаем метку времени создания для установки start_date
|
||||
timestamp = await redis.execute("HGET", latest_key, "_timestamp")
|
||||
if timestamp:
|
||||
self.last_update_timestamp = int(timestamp)
|
||||
timestamp_dt = datetime.fromtimestamp(int(timestamp))
|
||||
self.start_date = timestamp_dt.strftime("%Y-%m-%d")
|
||||
|
||||
# Если данные сегодняшние, считаем их актуальными
|
||||
now_date = datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
if now_date == self.start_date:
|
||||
logger.info(" * views data is up to date!")
|
||||
logger.info(" * Views data is up to date!")
|
||||
else:
|
||||
logger.warn(f" * {viewfile_path} is too old: {self.start_date}")
|
||||
logger.warning(f" * Views data is from {self.start_date}, may need update")
|
||||
|
||||
with open(viewfile_path, "r") as file:
|
||||
precounted_views = orjson.loads(file.read())
|
||||
self.precounted_by_slug.update(precounted_views)
|
||||
logger.info(f" * {len(precounted_views)} shouts with views was loaded.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"precounted views loading error: {e}")
|
||||
# Выводим информацию о количестве загруженных записей
|
||||
total_entries = await redis.execute("HGET", latest_key, "_total")
|
||||
if total_entries:
|
||||
logger.info(f" * {total_entries} shouts with views loaded from Redis key: {latest_key}")
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
@staticmethod
|
||||
async def update_pages():
|
||||
"""Запрос всех страниц от Google Analytics, отсортрованных по количеству просмотров"""
|
||||
"""Запрос всех страниц от Google Analytics, отсортированных по количеству просмотров"""
|
||||
self = ViewedStorage
|
||||
logger.info(" ⎧ views update from Google Analytics ---")
|
||||
if self.running:
|
||||
@@ -140,15 +158,40 @@ class ViewedStorage:
|
||||
self.running = False
|
||||
|
||||
@staticmethod
|
||||
def get_shout(shout_slug="", shout_id=0) -> int:
|
||||
"""Получение метрики просмотров shout по slug или id."""
|
||||
async def get_shout(shout_slug="", shout_id=0) -> int:
|
||||
"""
|
||||
Получение метрики просмотров shout по slug или id.
|
||||
|
||||
Args:
|
||||
shout_slug: Slug публикации
|
||||
shout_id: ID публикации
|
||||
|
||||
Returns:
|
||||
int: Количество просмотров
|
||||
"""
|
||||
self = ViewedStorage
|
||||
|
||||
# Получаем данные из Redis для новой схемы хранения
|
||||
if not redis._client:
|
||||
await redis.connect()
|
||||
|
||||
fresh_views = self.views_by_shout.get(shout_slug, 0)
|
||||
precounted_views = self.precounted_by_slug.get(shout_slug, 0)
|
||||
return fresh_views + precounted_views
|
||||
|
||||
# Если есть id, пытаемся получить данные из Redis по ключу migrated_views_<timestamp>
|
||||
if shout_id and self.redis_views_key:
|
||||
precounted_views = await redis.execute("HGET", self.redis_views_key, str(shout_id))
|
||||
if precounted_views:
|
||||
return fresh_views + int(precounted_views)
|
||||
|
||||
# Если нет id или данных, пытаемся получить по slug из отдельного хеша
|
||||
precounted_views = await redis.execute("HGET", "migrated_views_slugs", shout_slug)
|
||||
if precounted_views:
|
||||
return fresh_views + int(precounted_views)
|
||||
|
||||
return fresh_views
|
||||
|
||||
@staticmethod
|
||||
def get_shout_media(shout_slug) -> Dict[str, int]:
|
||||
async def get_shout_media(shout_slug) -> Dict[str, int]:
|
||||
"""Получение метрики воспроизведения shout по slug."""
|
||||
self = ViewedStorage
|
||||
|
||||
@@ -157,23 +200,29 @@ class ViewedStorage:
|
||||
return self.views_by_shout.get(shout_slug, 0)
|
||||
|
||||
@staticmethod
|
||||
def get_topic(topic_slug) -> int:
|
||||
async def get_topic(topic_slug) -> int:
|
||||
"""Получение суммарного значения просмотров темы."""
|
||||
self = ViewedStorage
|
||||
return sum(self.views_by_shout.get(shout_slug, 0) for shout_slug in self.shouts_by_topic.get(topic_slug, []))
|
||||
views_count = 0
|
||||
for shout_slug in self.shouts_by_topic.get(topic_slug, []):
|
||||
views_count += await self.get_shout(shout_slug=shout_slug)
|
||||
return views_count
|
||||
|
||||
@staticmethod
|
||||
def get_author(author_slug) -> int:
|
||||
async def get_author(author_slug) -> int:
|
||||
"""Получение суммарного значения просмотров автора."""
|
||||
self = ViewedStorage
|
||||
return sum(self.views_by_shout.get(shout_slug, 0) for shout_slug in self.shouts_by_author.get(author_slug, []))
|
||||
views_count = 0
|
||||
for shout_slug in self.shouts_by_author.get(author_slug, []):
|
||||
views_count += await self.get_shout(shout_slug=shout_slug)
|
||||
return views_count
|
||||
|
||||
@staticmethod
|
||||
def update_topics(shout_slug):
|
||||
"""Обновление счетчиков темы по slug shout"""
|
||||
self = ViewedStorage
|
||||
with local_session() as session:
|
||||
# Определение вспомогательной функции для избежа<EFBFBD><EFBFBD>ия повторения кода
|
||||
# Определение вспомогательной функции для избежания повторения кода
|
||||
def update_groups(dictionary, key, value):
|
||||
dictionary[key] = list(set(dictionary.get(key, []) + [value]))
|
||||
|
||||
|
Reference in New Issue
Block a user